[MognoDB]: How do I enable Sharding (draft version)

To enable sharding :-
==================
sh.addShard(“rs1/ip-10-0-0-197:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011”)
sh.addShard(“rs2/ip-10-0-0-197:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021”)

use shard_db
db.shard_col.ensureIndex( { user_id : 1 }, { unique : true })
sh.enableSharding(“shard_db”)
sh.shardCollection(“shard_db.shard_col”, {“user_id”:1})
sh.startBalancer()
sh.getBalancerState()
sh.isBalancerRunning()
use shard_db
for(var i = 1; i <= 100000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}

[root@ip-10-0-0-197 ~]# mongo –port 10000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:10000/test
Server has startup warnings:
2015-05-11T01:03:27.551+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:03:27.551+0000 I CONTROL
mongos>
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“554fffe05384fafde4642cd9”)
}
shards:
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ “_id” : “admin”, “partitioned” : false, “primary” : “config” }

mongos>
mongos>
mongos>
bye
[root@ip-10-0-0-197 ~]# mongo –port 27010
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27010/test
Server has startup warnings:
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> rs.status()
{
“set” : “rs1”,
“date” : ISODate(“2015-05-11T01:07:48.890Z”),
“myState” : 1,
“members” : [
{
“_id” : 0,
“name” : “ip-10-0-0-197:27010”,
“health” : 1,
“state” : 1,
“stateStr” : “PRIMARY”,
“uptime” : 461,
“optime” : Timestamp(1431306407, 1),
“optimeDate” : ISODate(“2015-05-11T01:06:47Z”),
“electionTime” : Timestamp(1431306407, 2),
“electionDate” : ISODate(“2015-05-11T01:06:47Z”),
“configVersion” : 1,
“self” : true
}
],
“ok” : 1
}
rs1:PRIMARY> rs.status()
{
“set” : “rs1”,
“date” : ISODate(“2015-05-11T01:08:15.050Z”),
“myState” : 1,
“members” : [
{
“_id” : 0,
“name” : “ip-10-0-0-197:27010”,
“health” : 1,
“state” : 1,
“stateStr” : “PRIMARY”,
“uptime” : 488,
“optime” : Timestamp(1431306407, 1),
“optimeDate” : ISODate(“2015-05-11T01:06:47Z”),
“electionTime” : Timestamp(1431306407, 2),
“electionDate” : ISODate(“2015-05-11T01:06:47Z”),
“configVersion” : 1,
“self” : true
}
],
“ok” : 1
}
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> rs.status()
{
“set” : “rs1”,
“date” : ISODate(“2015-05-11T01:13:07.239Z”),
“myState” : 1,
“members” : [
{
“_id” : 0,
“name” : “ip-10-0-0-197:27010”,
“health” : 1,
“state” : 1,
“stateStr” : “PRIMARY”,
“uptime” : 780,
“optime” : Timestamp(1431306607, 2),
“optimeDate” : ISODate(“2015-05-11T01:10:07Z”),
“electionTime” : Timestamp(1431306407, 2),
“electionDate” : ISODate(“2015-05-11T01:06:47Z”),
“configVersion” : 3,
“self” : true
},
{
“_id” : 1,
“name” : “ec2-52-7-8-107.compute-1.amazonaws.com:27011”,
“health” : 1,
“state” : 2,
“stateStr” : “SECONDARY”,
“uptime” : 179,
“optime” : Timestamp(1431306607, 2),
“optimeDate” : ISODate(“2015-05-11T01:10:07Z”),
“lastHeartbeat” : ISODate(“2015-05-11T01:13:05.912Z”),
“lastHeartbeatRecv” : ISODate(“2015-05-11T01:13:05.826Z”),
“pingMs” : 0,
“syncingTo” : “ip-10-0-0-197:27010”,
“configVersion” : 3
},
{
“_id” : 2,
“name” : “ec2-52-7-8-107.compute-1.amazonaws.com:27012”,
“health” : 1,
“state” : 7,
“stateStr” : “ARBITER”,
“uptime” : 179,
“lastHeartbeat” : ISODate(“2015-05-11T01:13:05.912Z”),
“lastHeartbeatRecv” : ISODate(“2015-05-11T01:13:05.911Z”),
“pingMs” : 0,
“configVersion” : 3
}
],
“ok” : 1
}
rs1:PRIMARY>
rs1:PRIMARY>
bye
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# ps -ef | grep mongo
root 14253 1 0 01:00 pts/0 00:00:00 ./mongod –configsvr –dbpath /data/config1 –logpath /tmp/config1.log –port 39000 –config /etc/mongod.conf
root 14254 1 0 01:00 pts/0 00:00:00 ./mongod –configsvr –dbpath /data/config2 –logpath /tmp/config2.log –port 39001 –config /etc/mongod.conf
root 14255 1 0 01:00 pts/0 00:00:00 ./mongod –configsvr –dbpath /data/config3 –logpath /tmp/config3.log –port 39002 –config /etc/mongod.conf
root 14256 1 0 01:00 pts/0 00:00:00 ./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_1 –logpath /tmp/shard1_1.log –port 27010 –config /etc/mongod.conf
root 14257 1 0 01:00 pts/0 00:00:00 ./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_2 –logpath /tmp/shard1_2.log –port 27011 –config /etc/mongod.conf
root 14258 1 0 01:00 pts/0 00:00:00 ./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_1 –logpath /tmp/shard2_1.log –port 27020 –config /etc/mongod.conf
root 14259 1 0 01:00 pts/0 00:00:00 ./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_2 –logpath /tmp/shard2_2.log –port 27021 –config /etc/mongod.conf
root 14260 1 0 01:00 pts/0 00:00:00 ./mongod –replSet rs1 –dbpath /data/arbiter1 –logpath /tmp/arbiter1.log –port 27012 –config /etc/mongod.conf
root 14261 1 0 01:00 pts/0 00:00:00 ./mongod –replSet rs2 –dbpath /data/arbiter2 –logpath /tmp/arbiter2.log –port 27022 –config /etc/mongod.conf
root 14442 1 0 01:03 pts/0 00:00:00 ./mongos –configdb ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 –logpath /tmp/router.log –port 10000
root 15129 14399 0 01:13 pts/1 00:00:00 grep mongo
[root@ip-10-0-0-197 ~]# cd /tmp/
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# cat shard1_1.log
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] MongoDB starting : pid=14256 port=27010 dbpath=/data/1/shard1_1 64-bit host=ip-10-0-0-197
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] db version v3.0.2
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] git version: 6201872043ecbbc0a4cc169b5482dcf385fc464f
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] build info: Linux build6.nj1.10gen.cc 2.6.32-431.3.1.el6.x86_64 #1 SMP Fri Jan 3 21:39:27 UTC 2014 x86_64 BOOST_LIB_VERSION=1_49
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] allocator: tcmalloc
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] options: { config: “/etc/mongod.conf”, net: { port: 27010 }, replication: { replSet: “rs1” }, sharding: { clusterRole: “shardsvr” }, storage: { dbPath: “/data/1/shard1_1”, journal: { enabled: false }, mmapv1: { smallFiles: true } }, systemLog: { destination: “file”, path: “/tmp/shard1_1.log”, quiet: true } }
2015-05-11T01:00:07.974+0000 I INDEX [initandlisten] allocating new ns file /data/1/shard1_1/local.ns, filling with zeroes…
2015-05-11T01:00:10.384+0000 I STORAGE [FileAllocator] allocating new datafile /data/1/shard1_1/local.0, filling with zeroes…
2015-05-11T01:00:10.385+0000 I STORAGE [FileAllocator] creating directory /data/1/shard1_1/_tmp
2015-05-11T01:00:10.398+0000 I STORAGE [FileAllocator] done allocating datafile /data/1/shard1_1/local.0, size: 16MB, took 0.006 secs
2015-05-11T01:00:10.427+0000 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset
2015-05-11T01:00:10.428+0000 I NETWORK [initandlisten] waiting for connections on port 27010
2015-05-11T01:06:47.582+0000 I COMMAND [conn1] replSet info initiate : no configuration specified. Using a default configuration for the set
2015-05-11T01:06:47.582+0000 I COMMAND [conn1] replSet created this configuration for initiation : { _id: “rs1”, version: 1, members: [ { _id: 0, host: “ip-10-0-0-197:27010” } ] }
2015-05-11T01:06:47.582+0000 I REPL [conn1] replSetInitiate admin command received from client
2015-05-11T01:06:47.584+0000 I REPL [conn1] replSet replSetInitiate config object with 1 members parses ok
2015-05-11T01:06:47.584+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: “rs1”, version: 1, members: [ { _id: 0, host: “ip-10-0-0-197:27010”, arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 } } }
2015-05-11T01:06:47.584+0000 I REPL [ReplicationExecutor] This node is ip-10-0-0-197:27010 in the config
2015-05-11T01:06:47.584+0000 I REPL [ReplicationExecutor] transition to STARTUP2
2015-05-11T01:06:47.585+0000 I REPL [conn1] ******
2015-05-11T01:06:47.585+0000 I REPL [conn1] creating replication oplog of size: 990MB…
2015-05-11T01:06:47.585+0000 I STORAGE [FileAllocator] allocating new datafile /data/1/shard1_1/local.1, filling with zeroes…
2015-05-11T01:06:47.588+0000 I STORAGE [FileAllocator] done allocating datafile /data/1/shard1_1/local.1, size: 511MB, took 0.003 secs
2015-05-11T01:06:47.588+0000 I STORAGE [FileAllocator] allocating new datafile /data/1/shard1_1/local.2, filling with zeroes…
2015-05-11T01:06:47.591+0000 I STORAGE [FileAllocator] done allocating datafile /data/1/shard1_1/local.2, size: 511MB, took 0.002 secs
2015-05-11T01:06:47.630+0000 I REPL [conn1] ******
2015-05-11T01:06:47.630+0000 I REPL [conn1] Starting replication applier threads
2015-05-11T01:06:47.636+0000 I REPL [ReplicationExecutor] transition to RECOVERING
2015-05-11T01:06:47.636+0000 I REPL [ReplicationExecutor] transition to SECONDARY
2015-05-11T01:06:47.636+0000 I REPL [ReplicationExecutor] transition to PRIMARY
2015-05-11T01:06:48.634+0000 I REPL [rsSync] transition to primary complete; database writes are now permitted
2015-05-11T01:10:07.760+0000 I REPL [conn3] replSetReconfig admin command received from client
2015-05-11T01:10:07.761+0000 I REPL [conn3] replSetReconfig config object with 2 members parses ok
2015-05-11T01:10:07.763+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: “rs1”, version: 2, members: [ { _id: 0, host: “ip-10-0-0-197:27010”, arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: “ec2-52-7-8-107.compute-1.amazonaws.com:27011”, arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 } } }
2015-05-11T01:10:07.763+0000 I REPL [ReplicationExecutor] This node is ip-10-0-0-197:27010 in the config
2015-05-11T01:10:07.763+0000 I REPL [ReplicationExecutor] Member ec2-52-7-8-107.compute-1.amazonaws.com:27011 is now in state STARTUP
2015-05-11T01:10:07.861+0000 I REPL [conn7] replSetReconfig admin command received from client
2015-05-11T01:10:07.863+0000 I REPL [conn7] replSetReconfig config object with 3 members parses ok
2015-05-11T01:10:07.864+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: “rs1”, version: 3, members: [ { _id: 0, host: “ip-10-0-0-197:27010”, arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: “ec2-52-7-8-107.compute-1.amazonaws.com:27011”, arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: “ec2-52-7-8-107.compute-1.amazonaws.com:27012”, arbiterOnly: true, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 } } }
2015-05-11T01:10:07.864+0000 I REPL [ReplicationExecutor] This node is ip-10-0-0-197:27010 in the config
2015-05-11T01:10:07.864+0000 I REPL [ReplicationExecutor] Member ec2-52-7-8-107.compute-1.amazonaws.com:27011 is now in state SECONDARY
2015-05-11T01:10:07.868+0000 I REPL [ReplicationExecutor] Member ec2-52-7-8-107.compute-1.amazonaws.com:27012 is now in state STARTUP
2015-05-11T01:10:09.868+0000 I REPL [ReplicationExecutor] Member ec2-52-7-8-107.compute-1.amazonaws.com:27012 is now in state ARBITER
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# cat router.log
2015-05-11T01:03:27.551+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:03:27.551+0000 I CONTROL
2015-05-11T01:03:27.552+0000 I SHARDING [mongosMain] MongoS version 3.0.2 starting: pid=14442 port=10000 64-bit host=ip-10-0-0-197 (–help for usage)
2015-05-11T01:03:27.552+0000 I CONTROL [mongosMain] db version v3.0.2
2015-05-11T01:03:27.552+0000 I CONTROL [mongosMain] git version: 6201872043ecbbc0a4cc169b5482dcf385fc464f
2015-05-11T01:03:27.552+0000 I CONTROL [mongosMain] build info: Linux build6.nj1.10gen.cc 2.6.32-431.3.1.el6.x86_64 #1 SMP Fri Jan 3 21:39:27 UTC 2014 x86_64 BOOST_LIB_VERSION=1_49
2015-05-11T01:03:27.552+0000 I CONTROL [mongosMain] allocator: tcmalloc
2015-05-11T01:03:27.552+0000 I CONTROL [mongosMain] options: { net: { port: 10000 }, sharding: { configDB: “ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002” }, systemLog: { destination: “file”, path: “/tmp/router.log” } }
2015-05-11T01:03:27.556+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39000]
2015-05-11T01:03:27.556+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39001]
2015-05-11T01:03:27.557+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39002]
2015-05-11T01:03:27.560+0000 I NETWORK [mongosMain] scoped connection to ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 not being returned to the pool
2015-05-11T01:03:27.563+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39000]
2015-05-11T01:03:27.563+0000 I SHARDING [LockPinger] creating distributed lock ping thread for ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 and process ip-10-0-0-197:10000:1431306207:1804289383 (sleeping for 30000ms)
2015-05-11T01:03:27.563+0000 I NETWORK [LockPinger] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39000]
2015-05-11T01:03:27.564+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39001]
2015-05-11T01:03:27.564+0000 I NETWORK [LockPinger] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39001]
2015-05-11T01:03:27.564+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39002]
2015-05-11T01:03:27.565+0000 I NETWORK [LockPinger] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39002]
2015-05-11T01:03:28.453+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:03:27 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:03:28.469+0000 I SHARDING [mongosMain] distributed lock ‘configUpgrade/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 554fffe05384fafde4642cd7
2015-05-11T01:03:28.469+0000 I SHARDING [mongosMain] starting upgrade of config server from v0 to v6
2015-05-11T01:03:28.469+0000 I SHARDING [mongosMain] starting next upgrade step from v0 to v6
2015-05-11T01:03:28.469+0000 I SHARDING [mongosMain] about to log new metadata event: { _id: “ip-10-0-0-197-2015-05-11T01:03:28-554fffe05384fafde4642cd8”, server: “ip-10-0-0-197”, clientAddr: “N/A”, time: new Date(1431306208469), what: “starting upgrade of config database”, ns: “config.version”, details: { from: 0, to: 6 } }
2015-05-11T01:03:28.522+0000 I SHARDING [mongosMain] writing initial config version at v6
2015-05-11T01:03:28.529+0000 I SHARDING [mongosMain] about to log new metadata event: { _id: “ip-10-0-0-197-2015-05-11T01:03:28-554fffe05384fafde4642cda”, server: “ip-10-0-0-197”, clientAddr: “N/A”, time: new Date(1431306208529), what: “finished upgrade of config database”, ns: “config.version”, details: { from: 0, to: 6 } }
2015-05-11T01:03:28.537+0000 I SHARDING [mongosMain] upgrade of config server to v6 successful
2015-05-11T01:03:28.538+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39000]
2015-05-11T01:03:28.538+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39001]
2015-05-11T01:03:28.539+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39002]
2015-05-11T01:03:28.556+0000 I SHARDING [mongosMain] distributed lock ‘configUpgrade/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:03:28.670+0000 I SHARDING [Balancer] about to contact config servers and shards
2015-05-11T01:03:28.670+0000 I SHARDING [Balancer] config servers and shards contacted successfully
2015-05-11T01:03:28.670+0000 I SHARDING [Balancer] balancer id: ip-10-0-0-197:10000 started at May 11 01:03:28
2015-05-11T01:03:28.687+0000 I NETWORK [mongosMain] waiting for connections on port 10000
2015-05-11T01:03:28.754+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 554fffe05384fafde4642cdc
2015-05-11T01:03:28.818+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:03:38.843+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 554fffea5384fafde4642cde
2015-05-11T01:03:38.861+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:03:48.889+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 554ffff45384fafde4642ce0
2015-05-11T01:03:48.908+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:03:58.469+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:03:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:03:58.934+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 554ffffe5384fafde4642ce2
2015-05-11T01:03:58.953+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:08.979+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000085384fafde4642ce4
2015-05-11T01:04:08.998+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:19.025+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000125384fafde4642ce6
2015-05-11T01:04:19.043+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:28.485+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:04:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:04:29.072+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550001d5384fafde4642ce8
2015-05-11T01:04:29.091+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:39.117+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000275384fafde4642cea
2015-05-11T01:04:39.136+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:49.163+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000315384fafde4642cec
2015-05-11T01:04:49.181+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:58.502+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:04:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:04:59.208+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550003b5384fafde4642cee
2015-05-11T01:04:59.227+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:09.254+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000455384fafde4642cf0
2015-05-11T01:05:09.273+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:19.300+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550004f5384fafde4642cf2
2015-05-11T01:05:19.319+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:28.517+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:05:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:05:29.346+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000595384fafde4642cf4
2015-05-11T01:05:29.365+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:39.390+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000635384fafde4642cf6
2015-05-11T01:05:39.409+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:49.436+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550006d5384fafde4642cf8
2015-05-11T01:05:49.455+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:58.531+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:05:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:05:59.482+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000775384fafde4642cfa
2015-05-11T01:05:59.500+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:09.527+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000815384fafde4642cfc
2015-05-11T01:06:09.546+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:19.572+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550008b5384fafde4642cfe
2015-05-11T01:06:19.593+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:28.548+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:06:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:06:29.620+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000955384fafde4642d00
2015-05-11T01:06:29.639+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:39.672+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550009f5384fafde4642d02
2015-05-11T01:06:39.695+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:49.722+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000a95384fafde4642d04
2015-05-11T01:06:49.743+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:58.561+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:06:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:06:59.554+0000 I NETWORK [mongosMain] connection accepted from 127.0.0.1:49097 #1 (1 connection now open)
2015-05-11T01:06:59.555+0000 I SHARDING [conn1] couldn’t find database [admin] in config db
2015-05-11T01:06:59.556+0000 I SHARDING [conn1] put [admin] on: config:ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002
2015-05-11T01:06:59.784+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000b35384fafde4642d06
2015-05-11T01:06:59.804+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:07.714+0000 I NETWORK [conn1] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39000]
2015-05-11T01:07:07.715+0000 I NETWORK [conn1] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39001]
2015-05-11T01:07:07.716+0000 I NETWORK [conn1] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39002]
2015-05-11T01:07:09.831+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000bd5384fafde4642d08
2015-05-11T01:07:09.849+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:19.874+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000c75384fafde4642d0a
2015-05-11T01:07:19.893+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:28.575+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:07:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:07:29.919+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000d15384fafde4642d0c
2015-05-11T01:07:29.940+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:38.080+0000 I NETWORK [conn1] end connection 127.0.0.1:49097 (0 connections now open)
2015-05-11T01:07:39.970+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000db5384fafde4642d0e
2015-05-11T01:07:39.989+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:50.014+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000e55384fafde4642d10
2015-05-11T01:07:50.032+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:58.592+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:07:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:08:00.060+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000f05384fafde4642d12
2015-05-11T01:08:00.080+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:10.107+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000fa5384fafde4642d14
2015-05-11T01:08:10.126+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:20.154+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001045384fafde4642d16
2015-05-11T01:08:20.173+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:28.607+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:08:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:08:30.200+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550010e5384fafde4642d18
2015-05-11T01:08:30.219+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:40.244+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001185384fafde4642d1a
2015-05-11T01:08:40.264+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:50.290+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001225384fafde4642d1c
2015-05-11T01:08:50.310+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:58.622+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:08:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:09:00.338+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550012c5384fafde4642d1e
2015-05-11T01:09:00.358+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:10.385+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001365384fafde4642d20
2015-05-11T01:09:10.403+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:20.429+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001405384fafde4642d22
2015-05-11T01:09:20.447+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:28.638+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:09:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:09:30.473+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550014a5384fafde4642d24
2015-05-11T01:09:30.494+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:40.518+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001545384fafde4642d26
2015-05-11T01:09:40.537+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:50.563+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550015e5384fafde4642d28
2015-05-11T01:09:50.581+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:58.654+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:09:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:10:00.608+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001685384fafde4642d2a
2015-05-11T01:10:00.626+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:10.658+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001725384fafde4642d2c
2015-05-11T01:10:10.681+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:20.705+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550017c5384fafde4642d2e
2015-05-11T01:10:20.724+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:28.664+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:10:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:10:30.751+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001865384fafde4642d30
2015-05-11T01:10:30.770+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:40.796+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001905384fafde4642d32
2015-05-11T01:10:40.815+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:50.842+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550019a5384fafde4642d34
2015-05-11T01:10:50.862+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:58.679+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:10:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:11:00.889+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001a45384fafde4642d36
2015-05-11T01:11:00.909+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:10.935+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001ae5384fafde4642d38
2015-05-11T01:11:10.955+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:20.979+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001b85384fafde4642d3a
2015-05-11T01:11:20.998+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:28.697+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:11:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:11:31.025+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001c25384fafde4642d3c
2015-05-11T01:11:31.044+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:41.071+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001cd5384fafde4642d3e
2015-05-11T01:11:41.090+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:51.115+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001d75384fafde4642d40
2015-05-11T01:11:51.134+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:58.712+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:11:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:12:01.161+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001e15384fafde4642d42
2015-05-11T01:12:01.182+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:11.209+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001eb5384fafde4642d44
2015-05-11T01:12:11.229+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:21.255+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001f55384fafde4642d46
2015-05-11T01:12:21.274+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:28.728+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:12:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:12:31.301+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001ff5384fafde4642d48
2015-05-11T01:12:31.321+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:41.346+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002095384fafde4642d4a
2015-05-11T01:12:41.367+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:51.392+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002135384fafde4642d4c
2015-05-11T01:12:51.411+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:58.738+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:12:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:13:01.479+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550021d5384fafde4642d4e
2015-05-11T01:13:01.523+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:11.550+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002275384fafde4642d50
2015-05-11T01:13:11.568+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:21.593+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002315384fafde4642d52
2015-05-11T01:13:21.614+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:27.961+0000 I NETWORK [mongosMain] connection accepted from 10.0.0.197:56870 #2 (1 connection now open)
2015-05-11T01:13:27.962+0000 I NETWORK [conn2] starting new replica set monitor for replica set rs1 with seeds ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011
2015-05-11T01:13:27.963+0000 I NETWORK [ReplicaSetMonitorWatcher] starting
2015-05-11T01:13:27.964+0000 I NETWORK [conn2] changing hosts to rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010 from rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011
2015-05-11T01:13:27.965+0000 I COMMAND [conn2] addshard request { addShard: “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011” } failed: in seed list rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011, host ec2-52-7-8-107.compute-1.amazonaws.com:27010 does not belong to replica set rs1
2015-05-11T01:13:27.967+0000 I NETWORK [conn2] end connection 10.0.0.197:56870 (0 connections now open)
2015-05-11T01:13:28.005+0000 I NETWORK [mongosMain] connection accepted from 10.0.0.197:56874 #3 (1 connection now open)
2015-05-11T01:13:28.006+0000 I NETWORK [conn3] starting new replica set monitor for replica set rs2 with seeds ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021
2015-05-11T01:13:28.007+0000 I NETWORK [conn3] changing hosts to rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27021,ip-10-0-0-197:27020 from rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021
2015-05-11T01:13:28.010+0000 I COMMAND [conn3] addshard request { addShard: “rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021” } failed: in seed list rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021, host ec2-52-7-8-107.compute-1.amazonaws.com:27020 does not belong to replica set rs2
2015-05-11T01:13:28.012+0000 I NETWORK [conn3] end connection 10.0.0.197:56874 (0 connections now open)
2015-05-11T01:13:28.753+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:13:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:13:31.642+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550023b5384fafde4642d54
2015-05-11T01:13:31.662+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:41.687+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002455384fafde4642d56
2015-05-11T01:13:41.710+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:51.735+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550024f5384fafde4642d58
2015-05-11T01:13:51.754+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:58.768+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:13:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:14:01.822+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002595384fafde4642d5a
2015-05-11T01:14:01.865+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:11.891+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002635384fafde4642d5c
2015-05-11T01:14:11.910+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:21.936+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550026d5384fafde4642d5e
2015-05-11T01:14:21.954+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:28.783+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:14:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:14:31.980+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002775384fafde4642d60
2015-05-11T01:14:31.998+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:42.026+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002815384fafde4642d62
2015-05-11T01:14:42.045+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:52.071+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550028c5384fafde4642d64
2015-05-11T01:14:52.088+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:58.797+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:14:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:15:02.152+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002965384fafde4642d66
2015-05-11T01:15:02.197+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:15:12.225+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002a05384fafde4642d68
2015-05-11T01:15:12.244+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:15:22.272+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002aa5384fafde4642d6a
2015-05-11T01:15:22.292+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:15:28.808+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:15:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:15:32.318+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002b45384fafde4642d6c
2015-05-11T01:15:32.338+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:15:42.363+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002be5384fafde4642d6e
2015-05-11T01:15:42.382+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# 4
bash: 4: command not found
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# cd $MONGODB_HOME
[root@ip-10-0-0-197 mongodb-linux-x86_64-3.0.2]#
[root@ip-10-0-0-197 mongodb-linux-x86_64-3.0.2]#
[root@ip-10-0-0-197 mongodb-linux-x86_64-3.0.2]# cd bin/
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ll -lhtr
total 113M
-rwxr-xr-x 1 1046 1046 4.1M Apr 8 20:39 bsondump
-rwxr-xr-x 1 1046 1046 5.6M Apr 8 20:39 mongostat
-rwxr-xr-x 1 1046 1046 5.6M Apr 8 20:39 mongofiles
-rwxr-xr-x 1 1046 1046 5.7M Apr 8 20:39 mongoexport
-rwxr-xr-x 1 1046 1046 5.8M Apr 8 20:39 mongoimport
-rwxr-xr-x 1 1046 1046 6.0M Apr 8 20:39 mongorestore
-rwxr-xr-x 1 1046 1046 5.8M Apr 8 20:39 mongodump
-rwxr-xr-x 1 1046 1046 5.4M Apr 8 20:39 mongotop
-rwxr-xr-x 1 1046 1046 5.3M Apr 8 20:39 mongooplog
-rwxr-xr-x 1 1046 1046 22M Apr 8 20:46 mongoperf
-rwxr-xr-x 1 1046 1046 22M Apr 8 20:46 mongod
-rwxr-xr-x 1 1046 1046 11M Apr 8 20:46 mongos
-rwxr-xr-x 1 1046 1046 12M Apr 8 20:46 mongo
-rw-r–r– 1 root root 4.3K May 10 15:13 shard_creation_script.sh
-rw-r–r– 1 root root 4.3K May 10 15:36 new_shard_creation_script.sh
-rw-r–r– 1 root root 3.9K May 10 15:56 NSS.sh
-rw-r–r– 1 root root 3.9K May 10 16:06 shard-script.sh
-rw-r–r– 1 root root 4.0K May 11 00:59 new_script.sh
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# cat new_script.sh
##### Killing the existing Mongo processes ################
for i in `ps -ef | egrep ‘shardsvr|configsvr|replSet|configdb’ | grep -v egrep | awk -F” ” ‘{print $2}’`; do kill -9 $i; done

##### Creating Mongo data & log files #####################

mkdir -p /data /data/1 /data/2 /data/3
rm -rf /data/*
cd /data/
mkdir -p config1 config2 config3 arbiter1 arbiter2 arbiter3 router /data/1/shard1_1 /data/1/shard1_2 /data/2/shard2_1 /data/2/shard2_2 /data/3/shard3_1 /data/3/shard3_2

cd /home/ec2-user/mongodb-linux-x86_64-3.0.2/bin

##### Starting the Mongo Config,Shard,Arbiter & Router services ################

## Config Servers #####
./mongod –configsvr –dbpath /data/config1 –logpath /tmp/config1.log –port 39000 –config /etc/mongod.conf &
./mongod –configsvr –dbpath /data/config2 –logpath /tmp/config2.log –port 39001 –config /etc/mongod.conf &
./mongod –configsvr –dbpath /data/config3 –logpath /tmp/config3.log –port 39002 –config /etc/mongod.conf &

## Replica Set 1 ######
./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_1 –logpath /tmp/shard1_1.log –port 27010 –config /etc/mongod.conf &
./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_2 –logpath /tmp/shard1_2.log –port 27011 –config /etc/mongod.conf &

## Replica Set 2 ######
./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_1 –logpath /tmp/shard2_1.log –port 27020 –config /etc/mongod.conf &
./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_2 –logpath /tmp/shard2_2.log –port 27021 –config /etc/mongod.conf &

## Replica Set 3 ######
#./mongod –shardsvr –replSet rs3 –dbpath /data/3/shard3_1 –logpath /tmp/shard3_1.log –port 27030 –config /etc/mongod.conf &
#./mongod –shardsvr –replSet rs3 –dbpath /data/3/shard3_2 –logpath /tmp/shard3_2.log –port 27031 –config /etc/mongod.conf &

## Arbiters ####
./mongod –replSet rs1 –dbpath /data/arbiter1 –logpath /tmp/arbiter1.log –port 27012 –config /etc/mongod.conf &
./mongod –replSet rs2 –dbpath /data/arbiter2 –logpath /tmp/arbiter2.log –port 27022 –config /etc/mongod.conf &
#./mongod –replSet rs3 –dbpath /data/arbiter3 –logpath /tmp/arbiter3.log –port 27032 –config /etc/mongod.conf &

sleep 200
./mongos –configdb ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 –logpath /tmp/router.log –port 10000 &
sleep 200
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval “rs.initiate()”
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval “rs.initiate()”
#./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27030/admin –eval “rs.initiate()”

sleep 200
echo -e “\n\n Replica sets are being added. \n\n”

./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval “rs.add(\”ec2-52-7-8-107.compute-1.amazonaws.com:27011\”)”
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval “rs.add(\”ec2-52-7-8-107.compute-1.amazonaws.com:27021\”)”
#./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27030/admin –eval “rs.add(\”ec2-52-7-8-107.compute-1.amazonaws.com:27031\”)”

./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval “rs.addArb(\”ec2-52-7-8-107.compute-1.amazonaws.com:27012\”)”
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval “rs.addArb(\”ec2-52-7-8-107.compute-1.amazonaws.com:27022\”)”
#./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27030/admin –eval “rs.addArb(\”ec2-52-7-8-107.compute-1.amazonaws.com:27032\”)”

sleep 200
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin –eval “sh.addShard(\”rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011\”)”
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin –eval “sh.addShard(\”rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021\”)”
#./mongo ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin –eval “sh.addShard(\”rs3/ec2-52-7-8-107.compute-1.amazonaws.com:27030,ec2-52-7-8-107.compute-1.amazonaws.com:27031\”)”

[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27010
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27010/test
Server has startup warnings:
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27020
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27020/test
Server has startup warnings:
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> show dbs
local 1.031GB
shard_db 0.031GB
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> use shard_db
switched to db shard_db
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> show collections
shard_col
system.indexes
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
99991
rs2:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#

sh.addShard(“rs1/ip-10-0-0-197:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011”)
sh.addShard(“rs2/ip-10-0-0-197:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021”)

use shard_db
db.shard_col.ensureIndex( { user_id : 1 }, { unique : true })
sh.enableSharding(“shard_db”)
sh.shardCollection(“shard_db.shard_col”, {“user_id”:1})
sh.startBalancer()
sh.getBalancerState()
sh.isBalancerRunning()
use shard_db
for(var i = 1; i <= 100000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}
db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } })

[root@ip-10-0-0-197 bin]# mongo –port 27010
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27010/test
Server has startup warnings:
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> use shard_db
switched to db shard_db
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a95226”), “user_id” : 1, “name” : “bulk-inserts”, ” Iteration: ” : 1 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95227”), “user_id” : 2, “name” : “bulk-inserts”, ” Iteration: ” : 2 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95228”), “user_id” : 3, “name” : “bulk-inserts”, ” Iteration: ” : 3 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95229”), “user_id” : 4, “name” : “bulk-inserts”, ” Iteration: ” : 4 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522a”), “user_id” : 5, “name” : “bulk-inserts”, ” Iteration: ” : 5 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522b”), “user_id” : 6, “name” : “bulk-inserts”, ” Iteration: ” : 6 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522c”), “user_id” : 7, “name” : “bulk-inserts”, ” Iteration: ” : 7 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522d”), “user_id” : 8, “name” : “bulk-inserts”, ” Iteration: ” : 8 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522e”), “user_id” : 9, “name” : “bulk-inserts”, ” Iteration: ” : 9 }
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27020
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27020/test
Server has startup warnings:
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> use shard_db
switched to db shard_db
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522f”), “user_id” : 10, “name” : “bulk-inserts”, ” Iteration: ” : 10 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95230”), “user_id” : 11, “name” : “bulk-inserts”, ” Iteration: ” : 11 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95231”), “user_id” : 12, “name” : “bulk-inserts”, ” Iteration: ” : 12 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95232”), “user_id” : 13, “name” : “bulk-inserts”, ” Iteration: ” : 13 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95233”), “user_id” : 14, “name” : “bulk-inserts”, ” Iteration: ” : 14 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95234”), “user_id” : 15, “name” : “bulk-inserts”, ” Iteration: ” : 15 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95235”), “user_id” : 16, “name” : “bulk-inserts”, ” Iteration: ” : 16 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95236”), “user_id” : 17, “name” : “bulk-inserts”, ” Iteration: ” : 17 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95237”), “user_id” : 18, “name” : “bulk-inserts”, ” Iteration: ” : 18 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95238”), “user_id” : 19, “name” : “bulk-inserts”, ” Iteration: ” : 19 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95239”), “user_id” : 20, “name” : “bulk-inserts”, ” Iteration: ” : 20 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523a”), “user_id” : 21, “name” : “bulk-inserts”, ” Iteration: ” : 21 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523b”), “user_id” : 22, “name” : “bulk-inserts”, ” Iteration: ” : 22 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523c”), “user_id” : 23, “name” : “bulk-inserts”, ” Iteration: ” : 23 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523d”), “user_id” : 24, “name” : “bulk-inserts”, ” Iteration: ” : 24 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523e”), “user_id” : 25, “name” : “bulk-inserts”, ” Iteration: ” : 25 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523f”), “user_id” : 26, “name” : “bulk-inserts”, ” Iteration: ” : 26 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95240”), “user_id” : 27, “name” : “bulk-inserts”, ” Iteration: ” : 27 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95241”), “user_id” : 28, “name” : “bulk-inserts”, ” Iteration: ” : 28 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95242”), “user_id” : 29, “name” : “bulk-inserts”, ” Iteration: ” : 29 }
Type “it” for more
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
119991
rs2:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27010
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27010/test
Server has startup warnings:
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> use shard_db
switched to db shard_db
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27020
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27020/test
Server has startup warnings:
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
rs2:PRIMARY> use shard_db
switched to db shard_db
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
158901
rs2:PRIMARY> db.shard_col.count()
161356
rs2:PRIMARY> db.shard_col.count()
163025
rs2:PRIMARY> db.shard_col.count()
164730
rs2:PRIMARY> db.shard_col.count()
166165
rs2:PRIMARY> db.shard_col.count()
167618
rs2:PRIMARY> db.shard_col.count()
169023
rs2:PRIMARY> db.shard_col.count()
170179
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
174060
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.currentOp()
{
“inprog” : [
{
“desc” : “conn10”,
“threadId” : “0x2cb23c0”,
“connectionId” : 10,
“opid” : 633532,
“active” : true,
“secs_running” : 3,
“microsecs_running” : NumberLong(3288581),
“op” : “getmore”,
“ns” : “local.oplog.rs”,
“query” : {

},
“client” : “10.0.0.197:49000”,
“numYields” : 0,
“locks” : {

},
“waitingForLock” : false,
“lockStats” : {
“Global” : {
“acquireCount” : {
“r” : NumberLong(4)
}
},
“MMAPV1Journal” : {
“acquireCount” : {
“r” : NumberLong(4)
}
},
“Database” : {
“acquireCount” : {
“r” : NumberLong(4)
}
},
“oplog” : {
“acquireCount” : {
“R” : NumberLong(4)
}
}
}
}
]
}
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
179762
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
179762
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
179762
rs2:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27010
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27010/test
Server has startup warnings:
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> use shard_db
switched to db shard_db
rs1:PRIMARY> db.shard_col.count()
20239
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a95226”), “user_id” : 1, “name” : “bulk-inserts”, ” Iteration: ” : 1 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95227”), “user_id” : 2, “name” : “bulk-inserts”, ” Iteration: ” : 2 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95228”), “user_id” : 3, “name” : “bulk-inserts”, ” Iteration: ” : 3 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95229”), “user_id” : 4, “name” : “bulk-inserts”, ” Iteration: ” : 4 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522a”), “user_id” : 5, “name” : “bulk-inserts”, ” Iteration: ” : 5 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522b”), “user_id” : 6, “name” : “bulk-inserts”, ” Iteration: ” : 6 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522c”), “user_id” : 7, “name” : “bulk-inserts”, ” Iteration: ” : 7 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522d”), “user_id” : 8, “name” : “bulk-inserts”, ” Iteration: ” : 8 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522e”), “user_id” : 9, “name” : “bulk-inserts”, ” Iteration: ” : 9 }
{ “_id” : ObjectId(“55500521031f902dc9636298”), “user_id” : 179771, “name” : “bulk-inserts”, ” Iteration: ” : 179771 }
{ “_id” : ObjectId(“55500522031f902dc9636299”), “user_id” : 179772, “name” : “bulk-inserts”, ” Iteration: ” : 179772 }
{ “_id” : ObjectId(“55500522031f902dc963629a”), “user_id” : 179773, “name” : “bulk-inserts”, ” Iteration: ” : 179773 }
{ “_id” : ObjectId(“55500522031f902dc963629b”), “user_id” : 179774, “name” : “bulk-inserts”, ” Iteration: ” : 179774 }
{ “_id” : ObjectId(“55500522031f902dc963629c”), “user_id” : 179775, “name” : “bulk-inserts”, ” Iteration: ” : 179775 }
{ “_id” : ObjectId(“55500522031f902dc963629d”), “user_id” : 179776, “name” : “bulk-inserts”, ” Iteration: ” : 179776 }
{ “_id” : ObjectId(“55500522031f902dc963629e”), “user_id” : 179777, “name” : “bulk-inserts”, ” Iteration: ” : 179777 }
{ “_id” : ObjectId(“55500522031f902dc963629f”), “user_id” : 179778, “name” : “bulk-inserts”, ” Iteration: ” : 179778 }
{ “_id” : ObjectId(“55500522031f902dc96362a0”), “user_id” : 179779, “name” : “bulk-inserts”, ” Iteration: ” : 179779 }
{ “_id” : ObjectId(“55500522031f902dc96362a1”), “user_id” : 179780, “name” : “bulk-inserts”, ” Iteration: ” : 179780 }
{ “_id” : ObjectId(“55500522031f902dc96362a2”), “user_id” : 179781, “name” : “bulk-inserts”, ” Iteration: ” : 179781 }
Type “it” for more
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27011
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27011/test
Server has startup warnings:
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten]
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY> use shard_db
switched to db shard_db
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY> db.shard_col.find()
Error: error: { “$err” : “not master and slaveOk=false”, “code” : 13435 }
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY> rs.slaveOk()
rs1:SECONDARY>
rs1:SECONDARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a95226”), “user_id” : 1, “name” : “bulk-inserts”, ” Iteration: ” : 1 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95227”), “user_id” : 2, “name” : “bulk-inserts”, ” Iteration: ” : 2 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95228”), “user_id” : 3, “name” : “bulk-inserts”, ” Iteration: ” : 3 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95229”), “user_id” : 4, “name” : “bulk-inserts”, ” Iteration: ” : 4 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522a”), “user_id” : 5, “name” : “bulk-inserts”, ” Iteration: ” : 5 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522b”), “user_id” : 6, “name” : “bulk-inserts”, ” Iteration: ” : 6 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522c”), “user_id” : 7, “name” : “bulk-inserts”, ” Iteration: ” : 7 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522d”), “user_id” : 8, “name” : “bulk-inserts”, ” Iteration: ” : 8 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522e”), “user_id” : 9, “name” : “bulk-inserts”, ” Iteration: ” : 9 }
{ “_id” : ObjectId(“55500521031f902dc9636298”), “user_id” : 179771, “name” : “bulk-inserts”, ” Iteration: ” : 179771 }
{ “_id” : ObjectId(“55500522031f902dc9636299”), “user_id” : 179772, “name” : “bulk-inserts”, ” Iteration: ” : 179772 }
{ “_id” : ObjectId(“55500522031f902dc963629a”), “user_id” : 179773, “name” : “bulk-inserts”, ” Iteration: ” : 179773 }
{ “_id” : ObjectId(“55500522031f902dc963629b”), “user_id” : 179774, “name” : “bulk-inserts”, ” Iteration: ” : 179774 }
{ “_id” : ObjectId(“55500522031f902dc963629c”), “user_id” : 179775, “name” : “bulk-inserts”, ” Iteration: ” : 179775 }
{ “_id” : ObjectId(“55500522031f902dc963629d”), “user_id” : 179776, “name” : “bulk-inserts”, ” Iteration: ” : 179776 }
{ “_id” : ObjectId(“55500522031f902dc963629e”), “user_id” : 179777, “name” : “bulk-inserts”, ” Iteration: ” : 179777 }
{ “_id” : ObjectId(“55500522031f902dc963629f”), “user_id” : 179778, “name” : “bulk-inserts”, ” Iteration: ” : 179778 }
{ “_id” : ObjectId(“55500522031f902dc96362a0”), “user_id” : 179779, “name” : “bulk-inserts”, ” Iteration: ” : 179779 }
{ “_id” : ObjectId(“55500522031f902dc96362a1”), “user_id” : 179780, “name” : “bulk-inserts”, ” Iteration: ” : 179780 }
{ “_id” : ObjectId(“55500522031f902dc96362a2”), “user_id” : 179781, “name” : “bulk-inserts”, ” Iteration: ” : 179781 }
Type “it” for more
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27020
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27020/test
Server has startup warnings:
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> use shard_db
switched to db shard_db
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522f”), “user_id” : 10, “name” : “bulk-inserts”, ” Iteration: ” : 10 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95230”), “user_id” : 11, “name” : “bulk-inserts”, ” Iteration: ” : 11 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95231”), “user_id” : 12, “name” : “bulk-inserts”, ” Iteration: ” : 12 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95232”), “user_id” : 13, “name” : “bulk-inserts”, ” Iteration: ” : 13 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95233”), “user_id” : 14, “name” : “bulk-inserts”, ” Iteration: ” : 14 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95234”), “user_id” : 15, “name” : “bulk-inserts”, ” Iteration: ” : 15 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95235”), “user_id” : 16, “name” : “bulk-inserts”, ” Iteration: ” : 16 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95236”), “user_id” : 17, “name” : “bulk-inserts”, ” Iteration: ” : 17 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95237”), “user_id” : 18, “name” : “bulk-inserts”, ” Iteration: ” : 18 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95238”), “user_id” : 19, “name” : “bulk-inserts”, ” Iteration: ” : 19 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95239”), “user_id” : 20, “name” : “bulk-inserts”, ” Iteration: ” : 20 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523a”), “user_id” : 21, “name” : “bulk-inserts”, ” Iteration: ” : 21 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523b”), “user_id” : 22, “name” : “bulk-inserts”, ” Iteration: ” : 22 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523c”), “user_id” : 23, “name” : “bulk-inserts”, ” Iteration: ” : 23 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523d”), “user_id” : 24, “name” : “bulk-inserts”, ” Iteration: ” : 24 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523e”), “user_id” : 25, “name” : “bulk-inserts”, ” Iteration: ” : 25 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523f”), “user_id” : 26, “name” : “bulk-inserts”, ” Iteration: ” : 26 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95240”), “user_id” : 27, “name” : “bulk-inserts”, ” Iteration: ” : 27 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95241”), “user_id” : 28, “name” : “bulk-inserts”, ” Iteration: ” : 28 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95242”), “user_id” : 29, “name” : “bulk-inserts”, ” Iteration: ” : 29 }
Type “it” for more
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27021
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27021/test
Server has startup warnings:
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten]
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY> use shard_db
switched to db shard_db
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY> db.shard_col.find()
Error: error: { “$err” : “not master and slaveOk=false”, “code” : 13435 }
rs2:SECONDARY>
rs2:SECONDARY> rs.slaveOk()
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522f”), “user_id” : 10, “name” : “bulk-inserts”, ” Iteration: ” : 10 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95230”), “user_id” : 11, “name” : “bulk-inserts”, ” Iteration: ” : 11 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95231”), “user_id” : 12, “name” : “bulk-inserts”, ” Iteration: ” : 12 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95232”), “user_id” : 13, “name” : “bulk-inserts”, ” Iteration: ” : 13 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95233”), “user_id” : 14, “name” : “bulk-inserts”, ” Iteration: ” : 14 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95234”), “user_id” : 15, “name” : “bulk-inserts”, ” Iteration: ” : 15 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95235”), “user_id” : 16, “name” : “bulk-inserts”, ” Iteration: ” : 16 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95236”), “user_id” : 17, “name” : “bulk-inserts”, ” Iteration: ” : 17 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95237”), “user_id” : 18, “name” : “bulk-inserts”, ” Iteration: ” : 18 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95238”), “user_id” : 19, “name” : “bulk-inserts”, ” Iteration: ” : 19 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95239”), “user_id” : 20, “name” : “bulk-inserts”, ” Iteration: ” : 20 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523a”), “user_id” : 21, “name” : “bulk-inserts”, ” Iteration: ” : 21 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523b”), “user_id” : 22, “name” : “bulk-inserts”, ” Iteration: ” : 22 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523c”), “user_id” : 23, “name” : “bulk-inserts”, ” Iteration: ” : 23 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523d”), “user_id” : 24, “name” : “bulk-inserts”, ” Iteration: ” : 24 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523e”), “user_id” : 25, “name” : “bulk-inserts”, ” Iteration: ” : 25 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523f”), “user_id” : 26, “name” : “bulk-inserts”, ” Iteration: ” : 26 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95240”), “user_id” : 27, “name” : “bulk-inserts”, ” Iteration: ” : 27 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95241”), “user_id” : 28, “name” : “bulk-inserts”, ” Iteration: ” : 28 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95242”), “user_id” : 29, “name” : “bulk-inserts”, ” Iteration: ” : 29 }
Type “it” for more
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY> db.shard_col.count()
179762
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY>
bye
[root@ip-10-0-0-197 bin]# mongo –port 39000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:39000/test
Server has startup warnings:
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten]
configsvr>
configsvr>
configsvr>
configsvr>
configsvr> sow dbs
2015-05-11T01:28:38.712+0000 E QUERY SyntaxError: Unexpected identifier
configsvr>
configsvr>
configsvr> show dbs
config 0.031GB
local 0.031GB
configsvr>
configsvr>
configsvr>
configsvr>
configsvr> use config
switched to db config
configsvr>
configsvr>
configsvr>
configsvr> show collections
actionlog
changelog
chunks
collections
databases
lockpings
locks
mongos
settings
shards
system.indexes
tags
version
configsvr>
configsvr>
configsvr> db.chunks.find()
{ “_id” : “shard_db.shard_col-user_id_MinKey”, “lastmod” : Timestamp(2, 1), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”), “ns” : “shard_db.shard_col”, “min” : { “user_id” : { “$minKey” : 1 } }, “max” : { “user_id” : 2 }, “shard” : “rs1” }
{ “_id” : “shard_db.shard_col-user_id_2.0”, “lastmod” : Timestamp(1, 2), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”), “ns” : “shard_db.shard_col”, “min” : { “user_id” : 2 }, “max” : { “user_id” : 10 }, “shard” : “rs1” }
{ “_id” : “shard_db.shard_col-user_id_10.0”, “lastmod” : Timestamp(3, 1), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”), “ns” : “shard_db.shard_col”, “min” : { “user_id” : 10 }, “max” : { “user_id” : 74908 }, “shard” : “rs2” }
{ “_id” : “shard_db.shard_col-user_id_74908.0”, “lastmod” : Timestamp(2, 3), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”), “ns” : “shard_db.shard_col”, “min” : { “user_id” : 74908 }, “max” : { “user_id” : 179771 }, “shard” : “rs2” }
{ “_id” : “shard_db.shard_col-user_id_179771.0”, “lastmod” : Timestamp(3, 0), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”), “ns” : “shard_db.shard_col”, “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “shard” : “rs1” }
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr> show dbs
config 0.031GB
local 0.031GB
configsvr>
configsvr>
configsvr>
configsvr> show collections
actionlog
changelog
chunks
collections
databases
lockpings
locks
mongos
settings
shards
system.indexes
tags
version
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27020
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27020/test
Server has startup warnings:
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> shwo dbs
2015-05-11T01:41:54.122+0000 E QUERY SyntaxError: Unexpected identifier
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> show dbs
local 1.031GB
shard_db 0.125GB
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> use local
switched to db local
rs2:PRIMARY>
rs2:PRIMARY> show collections
me
oplog.rs
startup_log
system.indexes
system.replset
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.oplog.rs.count()
179769
rs2:PRIMARY>
rs2:PRIMARY> db.oplog.rs.find()
{ “ts” : Timestamp(1431306407, 1), “h” : NumberLong(0), “v” : 2, “op” : “n”, “ns” : “”, “o” : { “msg” : “initiating set” } }
{ “ts” : Timestamp(1431306607, 1), “h” : NumberLong(0), “v” : 2, “op” : “n”, “ns” : “”, “o” : { “msg” : “Reconfig set”, “version” : 2 } }
{ “ts” : Timestamp(1431306607, 2), “h” : NumberLong(0), “v” : 2, “op” : “n”, “ns” : “”, “o” : { “msg” : “Reconfig set”, “version” : 3 } }
{ “ts” : Timestamp(1431307149, 1), “h” : NumberLong(“-6174368718995324911”), “v” : 2, “op” : “c”, “ns” : “shard_db.$cmd”, “o” : { “create” : “shard_col” } }
{ “ts” : Timestamp(1431307149, 2), “h” : NumberLong(“4777573065741603845”), “v” : 2, “op” : “i”, “ns” : “shard_db.system.indexes”, “fromMigrate” : true, “o” : { “v” : 1, “key” : { “_id” : 1 }, “name” : “_id_”, “ns” : “shard_db.shard_col” } }
{ “ts” : Timestamp(1431307149, 3), “h” : NumberLong(“8156262230854707122”), “v” : 2, “op” : “i”, “ns” : “shard_db.system.indexes”, “fromMigrate” : true, “o” : { “v” : 1, “unique” : true, “key” : { “user_id” : 1 }, “name” : “user_id_1”, “ns” : “shard_db.shard_col” } }
{ “ts” : Timestamp(1431307149, 4), “h” : NumberLong(“6225450864238645690”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “fromMigrate” : true, “o” : { “_id” : ObjectId(“5550038cac4b3d5a22a9522f”), “user_id” : 10, “name” : “bulk-inserts”, ” Iteration: ” : 10 } }
{ “ts” : Timestamp(1431307150, 1), “h” : NumberLong(“4399906657609807135”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95230”), “user_id” : 11, “name” : “bulk-inserts”, ” Iteration: ” : 11 } }
{ “ts” : Timestamp(1431307150, 2), “h” : NumberLong(“-2803345288712299313”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95231”), “user_id” : 12, “name” : “bulk-inserts”, ” Iteration: ” : 12 } }
{ “ts” : Timestamp(1431307150, 3), “h” : NumberLong(“4221914379266546928”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95232”), “user_id” : 13, “name” : “bulk-inserts”, ” Iteration: ” : 13 } }
{ “ts” : Timestamp(1431307150, 4), “h” : NumberLong(“6636220462457355284”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95233”), “user_id” : 14, “name” : “bulk-inserts”, ” Iteration: ” : 14 } }
{ “ts” : Timestamp(1431307150, 5), “h” : NumberLong(“-3153401794801507759”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95234”), “user_id” : 15, “name” : “bulk-inserts”, ” Iteration: ” : 15 } }
{ “ts” : Timestamp(1431307150, 6), “h” : NumberLong(“-590673585599494391”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95235”), “user_id” : 16, “name” : “bulk-inserts”, ” Iteration: ” : 16 } }
{ “ts” : Timestamp(1431307150, 7), “h” : NumberLong(“6561129531032826818”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95236”), “user_id” : 17, “name” : “bulk-inserts”, ” Iteration: ” : 17 } }
{ “ts” : Timestamp(1431307150, 8), “h” : NumberLong(“-4360209413840436946”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95237”), “user_id” : 18, “name” : “bulk-inserts”, ” Iteration: ” : 18 } }
{ “ts” : Timestamp(1431307150, 9), “h” : NumberLong(“5073393576299742147”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95238”), “user_id” : 19, “name” : “bulk-inserts”, ” Iteration: ” : 19 } }
{ “ts” : Timestamp(1431307150, 10), “h” : NumberLong(“2865752663299291651”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95239”), “user_id” : 20, “name” : “bulk-inserts”, ” Iteration: ” : 20 } }
{ “ts” : Timestamp(1431307150, 11), “h” : NumberLong(“-6716636981963233068”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a9523a”), “user_id” : 21, “name” : “bulk-inserts”, ” Iteration: ” : 21 } }
{ “ts” : Timestamp(1431307150, 12), “h” : NumberLong(“-3815483699654562552”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a9523b”), “user_id” : 22, “name” : “bulk-inserts”, ” Iteration: ” : 22 } }
{ “ts” : Timestamp(1431307150, 13), “h” : NumberLong(“692450203411610997”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a9523c”), “user_id” : 23, “name” : “bulk-inserts”, ” Iteration: ” : 23 } }
Type “it” for more
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# head new_script.sh
##### Killing the existing Mongo processes ################
for i in `ps -ef | egrep ‘shardsvr|configsvr|replSet|configdb’ | grep -v egrep | awk -F” ” ‘{print $2}’`; do kill -9 $i; done

##### Creating Mongo data & log files #####################

mkdir -p /data /data/1 /data/2 /data/3
rm -rf /data/*
cd /data/
mkdir -p config1 config2 config3 arbiter1 arbiter2 arbiter3 router /data/1/shard1_1 /data/1/shard1_2 /data/2/shard2_1 /data/2/shard2_2 /data/3/shard3_1 /data/3/shard3_2

[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
Using username “ec2-user”.
Authenticating with public key “imported-openssh-key”
Last login: Mon May 11 01:01:00 2015 from 49.205.87.121

__| __|_ )
_| ( / Amazon Linux AMI
___|\___|___|

https://aws.amazon.com/amazon-linux-ami/2015.03-release-notes/
11 package(s) needed for security, out of 36 available
Run “sudo yum update” to apply all updates.
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$ sudo su
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# for i in `ps -ef | egrep ‘shardsvr|configsvr|replSet|configdb’ | grep -v egrep | awk -F” ” ‘{print $2}’`; do kill -9 $i; done
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# rm -rf /data/
[root@ip-10-0-0-197 ec2-user]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 1.3G 6.5G 16% /
devtmpfs 490M 56K 490M 1% /dev
tmpfs 499M 0 499M 0% /dev/shm
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# cat ~/.bashrc
# .bashrc

# User specific aliases and functions

alias rm=’rm -i’
alias cp=’cp -i’
alias mv=’mv -i’

# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi

export MONGODB_HOME=/home/ec2-user/mongodb-linux-x86_64-3.0.2
export PATH=$MONGODB_HOME/bin:$PATH
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# source ~/.bashrc
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# env | grep MONGODB_HOME
MONGODB_HOME=/home/ec2-user/mongodb-linux-x86_64-3.0.2
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# cd
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# cd /etc/
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]# vi mongod.conf
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]# cd
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# cat /etc/mongod.conf

smallfiles = true
nojournal = true
quiet = true
port = 27017

logpath = /tmp/mongodb.txt
auth = true

[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongod –dbpath /data & ^C
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mkdir /data
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongod –dbpath /data &
[1] 16982
[root@ip-10-0-0-197 ~]# 2015-05-11T01:50:08.148+0000 I JOURNAL [initandlisten] journal dir=/data/journal
2015-05-11T01:50:08.148+0000 I JOURNAL [initandlisten] recover : no journal files present, no recovery needed
2015-05-11T01:50:08.291+0000 I JOURNAL [durability] Durability thread started
2015-05-11T01:50:08.291+0000 I JOURNAL [journal writer] Journal writer thread started
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] MongoDB starting : pid=16982 port=27017 dbpath=/data 64-bit host=ip-10-0-0-197
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten]
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten]
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten]
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] db version v3.0.2
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] git version: 6201872043ecbbc0a4cc169b5482dcf385fc464f
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] build info: Linux build6.nj1.10gen.cc 2.6.32-431.3.1.el6.x86_64 #1 SMP Fri Jan 3 21:39:27 UTC 2014 x86_64 BOOST_LIB_VERSION=1_49
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] allocator: tcmalloc
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] options: { storage: { dbPath: “/data” } }
2015-05-11T01:50:08.294+0000 I INDEX [initandlisten] allocating new ns file /data/local.ns, filling with zeroes…
2015-05-11T01:50:08.576+0000 I STORAGE [FileAllocator] allocating new datafile /data/local.0, filling with zeroes…
2015-05-11T01:50:08.576+0000 I STORAGE [FileAllocator] creating directory /data/_tmp
2015-05-11T01:50:08.580+0000 I STORAGE [FileAllocator] done allocating datafile /data/local.0, size: 64MB, took 0.001 secs
2015-05-11T01:50:08.586+0000 I NETWORK [initandlisten] waiting for connections on port 27017
^C
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# ps -ef | grep mongo
root 16323 11486 0 01:33 pts/0 00:00:00 mongo –port 10000
root 16982 16940 0 01:50 pts/2 00:00:00 mongod –dbpath /data
root 16994 16940 0 01:50 pts/2 00:00:00 grep mongo
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# kill -9 16323 16982
[root@ip-10-0-0-197 ~]# ps -ef | grep mongo
root 16996 16940 0 01:50 pts/2 00:00:00 grep mongo
[1]+ Killed mongod –dbpath /data
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongod –dbpath /data –f /etc/mongod.conf &
[1] 16997
[root@ip-10-0-0-197 ~]# Error parsing command line: unknown option f
try ‘mongod –help’ for more information

[1]+ Exit 2 mongod –dbpath /data –f /etc/mongod.conf
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongod –dbpath /data -f /etc/mongod.conf &
[1] 16998
[root@ip-10-0-0-197 ~]#
[1]+ Exit 100 mongod –dbpath /data -f /etc/mongod.conf
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# c
bash: c: command not found
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# cat /tmp/mongodb.txt
2015-05-11T01:51:13.749+0000 W – [initandlisten] Detected unclean shutdown – /data/mongod.lock is not empty.
2015-05-11T01:51:13.758+0000 I STORAGE [initandlisten] **************
2015-05-11T01:51:13.759+0000 I STORAGE [initandlisten] Error: journal files are present in journal directory, yet starting without journaling enabled.
2015-05-11T01:51:13.759+0000 I STORAGE [initandlisten] It is recommended that you start with journaling enabled so that recovery may occur.
2015-05-11T01:51:13.759+0000 I STORAGE [initandlisten] **************
2015-05-11T01:51:13.759+0000 I STORAGE [initandlisten] exception in initAndListen: 13597 can’t start without –journal enabled when journal/ files are present, terminating
2015-05-11T01:51:13.759+0000 I CONTROL [initandlisten] dbexit: rc: 100
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# rm -rf /data/*
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongod –dbpath /data -f /etc/mongod.conf &
[1] 17003
[root@ip-10-0-0-197 ~]# 2015-05-11T01:52:19.870+0000 I CONTROL log file “/tmp/mongodb.txt” exists; moved to “/tmp/mongodb.txt.2015-05-11T01-52-19”.

[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# ps -ef | grep mogo
root 17013 16940 0 01:52 pts/2 00:00:00 grep mogo
[root@ip-10-0-0-197 ~]# ps -ef | grep mongo
root 17003 16940 0 01:52 pts/2 00:00:00 mongod –dbpath /data -f /etc/mongod.conf
root 17015 16940 0 01:52 pts/2 00:00:00 grep mongo
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongo
MongoDB shell version: 3.0.2
connecting to: test
>
>
>
>
>
>
>
>
> use admin
switched to db admin
>
>
>
>
>
>
> use admin
switched to db admin
> db.createUser(
… {
… user: “usr”,
… pwd: “pwd”,
… roles: [ “readWriteAdminAnyDatabase”,
… “dbAdminAnyDatabase”,
… “userAdminAnyDatabase” ]
… }
… )
2015-05-11T02:00:03.294+0000 E QUERY Error: couldn’t add user: No role named readWriteAdminAnyDatabase@admin
at Error (<anonymous>)
at DB.createUser (src/mongo/shell/db.js:1066:11)
at (shell):1:4 at src/mongo/shell/db.js:1066
>
>
> use admin
switched to db admin
> db.createUser(
… {
… user: “usr”,
… pwd: “pwd”,
… roles: [ “readWriteAnyDatabase”,
… “dbAdminAnyDatabase”,
… “userAdminAnyDatabase” ]
… }
… )
Successfully added user: {
“user” : “usr”,
“roles” : [
“readWriteAnyDatabase”,
“dbAdminAnyDatabase”,
“userAdminAnyDatabase”
]
}
>
>
>
>
>
>
>
bye
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongo
MongoDB shell version: 3.0.2
connecting to: test
>
>
> show dbs
2015-05-11T02:00:36.460+0000 E QUERY Error: listDatabases failed:{
“ok” : 0,
“errmsg” : “not authorized on admin to execute command { listDatabases: 1.0 }”,
“code” : 13
}
at Error (<anonymous>)
at Mongo.getDBs (src/mongo/shell/mongo.js:47:15)
at shellHelper.show (src/mongo/shell/utils.js:630:33)
at shellHelper (src/mongo/shell/utils.js:524:36)
at (shellhelp2):1:1 at src/mongo/shell/mongo.js:47
>
>
>
>
> db.auth(“usr”,”pwd”)
Error: 18 Authentication failed.
0
>
>
>
>
>
bye
[root@ip-10-0-0-197 ~]# mongo ip-10-0-0-197:27017/admin -u usr -p
MongoDB shell version: 3.0.2
Enter password:
connecting to: ip-10-0-0-197:27017/admin
>
>
>
> show dbs
admin 0.031GB
local 0.031GB
>
>
>
bye
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongo ip-10-0-0-197:27017/admin
MongoDB shell version: 3.0.2
connecting to: ip-10-0-0-197:27017/admin
>
>
>
> show dbs
2015-05-11T02:02:39.452+0000 E QUERY Error: listDatabases failed:{
“ok” : 0,
“errmsg” : “not authorized on admin to execute command { listDatabases: 1.0 }”,
“code” : 13
}
at Error (<anonymous>)
at Mongo.getDBs (src/mongo/shell/mongo.js:47:15)
at shellHelper.show (src/mongo/shell/utils.js:630:33)
at shellHelper (src/mongo/shell/utils.js:524:36)
at (shellhelp2):1:1 at src/mongo/shell/mongo.js:47
>
>
> db.auth(“usr”,”pwd”)
1
>
>
>
> show dbs
admin 0.031GB
local 0.031GB
>
>
>
bye
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# vi /tmp/mykeyfile
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# cat /tmp/mykeyfile
jsdhfkjashdfhasjdfkasdjfhakjsdhfkhasdkljfhakjsdhfjkaskdjfhksadhfkasdh
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongo ip-10-0-0-197:27017/admin
MongoDB shell version: 3.0.2
connecting to: ip-10-0-0-197:27017/admin
>
bye
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongo ip-10-0-0-197:27017/admin -u usr -p pwd
MongoDB shell version: 3.0.2
connecting to: ip-10-0-0-197:27017/admin
>
>
> show dbs
admin 0.031GB
local 0.031GB
>
>
> use amdin
switched to db amdin
>
>
> show collections
>
>
> use admin
switched to db admin
>
>
> show collections
system.indexes
system.users
system.version
>
>
> db.system.users.find()
{ “_id” : “admin.usr”, “user” : “usr”, “db” : “admin”, “credentials” : { “SCRAM-SHA-1” : { “iterationCount” : 10000, “salt” : “mdXNHVjkE+GIuUYCB/cAxQ==”, “storedKey” : “AgMRBewp0Bz+WLrhD2m6Cb1JpyM=”, “serverKey” : “TktSort4/h9n67cgRNGuphRlvnc=” } }, “roles” : [ { “role” : “readWriteAnyDatabase”, “db” : “admin” }, { “role” : “dbAdminAnyDatabase”, “db” : “admin” }, { “role” : “userAdminAnyDatabase”, “db” : “admin” } ] }
>
>
———————–
Using username “ec2-user”.
Authenticating with public key “imported-openssh-key”
Last login: Sun May 10 17:45:00 2015 from 49.205.126.24

__| __|_ )
_| ( / Amazon Linux AMI
___|\___|___|

https://aws.amazon.com/amazon-linux-ami/2015.03-release-notes/
11 package(s) needed for security, out of 36 available
Run “sudo yum update” to apply all updates.
grep: write error
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$ sudo su
grep: write error
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 7.8G 0 100% /
devtmpfs 490M 56K 490M 1% /dev
tmpfs 499M 0 499M 0% /dev/shm
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# cd /data
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]# cd /home/ec2-user/mongodb-linux-x86_64-3.0.2/bin/
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ./mongo –port 10000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:10000/test
Server has startup warnings:
2015-05-10T16:44:26.406+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-10T16:44:26.406+0000 I CONTROL
mongos>
mongos> sh.help()
sh.addShard( host ) server:port OR setname/server:port
sh.enableSharding(dbname) enables sharding on the database dbname
sh.shardCollection(fullName,key,unique) shards the collection
sh.splitFind(fullName,find) splits the chunk that find is in at the median
sh.splitAt(fullName,middle) splits the chunk that middle is in at middle
sh.moveChunk(fullName,find,to) move the chunk where ‘find’ is to ‘to’ (name of shard)
sh.setBalancerState( <bool on or not> ) turns the balancer on or off true=on, false=off
sh.getBalancerState() return true if enabled
sh.isBalancerRunning() return true if the balancer has work in progress on any mongos
sh.disableBalancing(coll) disable balancing on one collection
sh.enableBalancing(coll) re-enable balancing on one collection
sh.addShardTag(shard,tag) adds the tag to the shard
sh.removeShardTag(shard,tag) removes the tag from the shard
sh.addTagRange(fullName,min,max,tag) tags the specified range of the given collection
sh.removeTagRange(fullName,min,max,tag) removes the tagged range of the given collection
sh.status() prints a general overview of the cluster
mongos>
mongos> sh.remove
sh.removeShardTag( sh.removeTagRange(
mongos> sh.remove
sh.removeShardTag( sh.removeTagRange(
mongos>
mongos>
bye
[root@ip-10-0-0-197 bin]# cd /data/
[root@ip-10-0-0-197 data]# ll -lhtr
total 40K
drwxr-xr-x 2 root root 4.0K May 10 16:39 router
drwxr-xr-x 4 root root 4.0K May 10 16:39 3
drwxr-xr-x 4 root root 4.0K May 10 16:39 2
drwxr-xr-x 4 root root 4.0K May 10 16:39 1
drwxr-xr-x 3 root root 4.0K May 10 16:39 arbiter1
drwxr-xr-x 3 root root 4.0K May 10 16:39 arbiter3
drwxr-xr-x 3 root root 4.0K May 10 16:39 arbiter2
drwxr-xr-x 3 root root 4.0K May 10 16:44 config2
drwxr-xr-x 3 root root 4.0K May 10 16:44 config1
drwxr-xr-x 3 root root 4.0K May 10 16:44 config3
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]# rm -rf 3/
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]# df -h /
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 7.8G 0 100% /
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]# cd /tmp/
[root@ip-10-0-0-197 tmp]# ll -lhtr
total 3.2M
drwxr-xr-x 2 root root 4.0K May 8 11:54 hsperfdata_root
-rw-r–r– 1 root root 2.7K May 10 16:11 router.log.2015-05-10T16-35-03
-rw-r–r– 1 root root 3.5K May 10 16:20 arbiter1.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 3.5K May 10 16:20 arbiter2.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 3.5K May 10 16:20 arbiter3.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 7.0K May 10 16:20 shard1_1.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 7.0K May 10 16:20 shard2_1.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 7.0K May 10 16:20 shard3_1.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 6.4K May 10 16:20 shard1_2.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 6.4K May 10 16:20 shard2_2.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 6.4K May 10 16:20 shard3_2.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 2.7K May 10 16:35 router.log.2015-05-10T16-36-25
-rw-r–r– 1 root root 3.5K May 10 16:38 router.log.2015-05-10T16-37-31
-rw-r–r– 1 root root 7.3K May 10 16:39 config3.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 7.3K May 10 16:39 config2.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 7.3K May 10 16:39 config1.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 11K May 10 16:39 router.log.2015-05-10T16-44-26
srwx—— 1 root root 0 May 10 16:39 mongodb-27012.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27010.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39001.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27022.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39000.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27031.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27020.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27030.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39002.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27011.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27032.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27021.sock
srwx—— 1 root root 0 May 10 16:44 mongodb-10000.sock
-rw-r–r– 1 root root 3.5K May 10 16:52 arbiter2.log
-rw-r–r– 1 root root 6.4K May 10 16:52 shard2_2.log
-rw-r–r– 1 root root 7.8K May 10 17:36 shard1_2.log
-rw-r–r– 1 root root 12K May 10 17:45 shard2_1.log
-rw-r–r– 1 root root 416K May 10 17:45 shard1_1.log
-rw-r–r– 1 root root 592K May 10 17:50 arbiter1.log
-rw-r–r– 1 root root 1.6M May 10 19:06 router.log
-rw-r–r– 1 root root 172K May 10 19:06 config1.log
-rw-r–r– 1 root root 172K May 10 19:06 config3.log
-rw-r–r– 1 root root 172K May 10 19:06 config2.log
-rw-r–r– 1 root root 7.3K May 11 00:55 shard3_1.log
-rw-r–r– 1 root root 14K May 11 00:56 shard3_2.log
-rw-r–r– 1 root root 18K May 11 00:56 arbiter3.log
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# > *log*
bash: *log*: ambiguous redirect
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# > *.log*
bash: *.log*: ambiguous redirect
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# > *.log
bash: *.log: ambiguous redirect
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# rm -f *log.2015*
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# ll -lhtr
total 3.2M
drwxr-xr-x 2 root root 4.0K May 8 11:54 hsperfdata_root
srwx—— 1 root root 0 May 10 16:39 mongodb-27012.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27010.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39001.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27022.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39000.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27031.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27020.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27030.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39002.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27011.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27032.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27021.sock
srwx—— 1 root root 0 May 10 16:44 mongodb-10000.sock
-rw-r–r– 1 root root 3.5K May 10 16:52 arbiter2.log
-rw-r–r– 1 root root 6.4K May 10 16:52 shard2_2.log
-rw-r–r– 1 root root 7.8K May 10 17:36 shard1_2.log
-rw-r–r– 1 root root 12K May 10 17:45 shard2_1.log
-rw-r–r– 1 root root 416K May 10 17:45 shard1_1.log
-rw-r–r– 1 root root 592K May 10 17:50 arbiter1.log
-rw-r–r– 1 root root 1.6M May 10 19:06 router.log
-rw-r–r– 1 root root 172K May 10 19:06 config1.log
-rw-r–r– 1 root root 172K May 10 19:06 config3.log
-rw-r–r– 1 root root 172K May 10 19:06 config2.log
-rw-r–r– 1 root root 7.3K May 11 00:55 shard3_1.log
-rw-r–r– 1 root root 14K May 11 00:56 shard3_2.log
-rw-r–r– 1 root root 100K May 11 00:57 arbiter3.log
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 5.8G 1.9G 76% /
devtmpfs 490M 56K 490M 1% /dev
tmpfs 499M 0 499M 0% /dev/shm
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# cd –
/data
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]# cd /home/ec2-user/mongodb-linux-x86_64-3.0.2/bin/
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ./mongo 27010
MongoDB shell version: 3.0.2
connecting to: 27010
2015-05-11T00:57:59.189+0000 W NETWORK Failed to connect to 127.0.0.1:27017, reason: errno:111 Connection refused
2015-05-11T00:57:59.191+0000 E QUERY Error: couldn’t connect to server 127.0.0.1:27017 (127.0.0.1), connection attempt failed
at connect (src/mongo/shell/mongo.js:179:14)
at (connect):1:6 at src/mongo/shell/mongo.js:179
exception: connect failed
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ./mongo 27020
MongoDB shell version: 3.0.2
connecting to: 27020
2015-05-11T00:58:06.533+0000 W NETWORK Failed to connect to 127.0.0.1:27017, reason: errno:111 Connection refused
2015-05-11T00:58:06.535+0000 E QUERY Error: couldn’t connect to server 127.0.0.1:27017 (127.0.0.1), connection attempt failed
at connect (src/mongo/shell/mongo.js:179:14)
at (connect):1:6 at src/mongo/shell/mongo.js:179
exception: connect failed
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ./mongo 27030
MongoDB shell version: 3.0.2
connecting to: 27030
2015-05-11T00:58:10.231+0000 W NETWORK Failed to connect to 127.0.0.1:27017, reason: errno:111 Connection refused
2015-05-11T00:58:10.232+0000 E QUERY Error: couldn’t connect to server 127.0.0.1:27017 (127.0.0.1), connection attempt failed
at connect (src/mongo/shell/mongo.js:179:14)
at (connect):1:6 at src/mongo/shell/mongo.js:179
exception: connect failed
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ll -lhtr
total 113M
-rwxr-xr-x 1 1046 1046 4.1M Apr 8 20:39 bsondump
-rwxr-xr-x 1 1046 1046 5.6M Apr 8 20:39 mongostat
-rwxr-xr-x 1 1046 1046 5.6M Apr 8 20:39 mongofiles
-rwxr-xr-x 1 1046 1046 5.7M Apr 8 20:39 mongoexport
-rwxr-xr-x 1 1046 1046 5.8M Apr 8 20:39 mongoimport
-rwxr-xr-x 1 1046 1046 6.0M Apr 8 20:39 mongorestore
-rwxr-xr-x 1 1046 1046 5.8M Apr 8 20:39 mongodump
-rwxr-xr-x 1 1046 1046 5.4M Apr 8 20:39 mongotop
-rwxr-xr-x 1 1046 1046 5.3M Apr 8 20:39 mongooplog
-rwxr-xr-x 1 1046 1046 22M Apr 8 20:46 mongoperf
-rwxr-xr-x 1 1046 1046 22M Apr 8 20:46 mongod
-rwxr-xr-x 1 1046 1046 11M Apr 8 20:46 mongos
-rwxr-xr-x 1 1046 1046 12M Apr 8 20:46 mongo
-rw-r–r– 1 root root 4.3K May 10 15:13 shard_creation_script.sh
-rw-r–r– 1 root root 4.3K May 10 15:36 new_shard_creation_script.sh
-rw-r–r– 1 root root 3.9K May 10 15:56 NSS.sh
-rw-r–r– 1 root root 3.9K May 10 16:06 shard-script.sh
-rw-r–r– 1 root root 4.0K May 10 16:39 new_script.sh
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# vi new_script.sh
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# rm -rf /tmp/*
[root@ip-10-0-0-197 bin]# rm -rf /dat/*
[root@ip-10-0-0-197 bin]# rm -rf /data/*
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# sh -x new_script.sh
++ awk ‘-F ‘ ‘{print $2}’
++ grep -v egrep
++ egrep ‘shardsvr|configsvr|replSet|configdb’
++ ps -ef
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18909
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18910
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18911
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18912
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18914
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18915
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18918
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18919
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18920
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 19058
+ mkdir -p /data /data/1 /data/2 /data/3
+ rm -rf /data/1 /data/2 /data/3
+ cd /data/
+ mkdir -p config1 config2 config3 arbiter1 arbiter2 arbiter3 router /data/1/shard1_1 /data/1/shard1_2 /data/2/shard2_1 /data/2/shard2_2 /data/3/shard3_1 /data/3/shard3_2
+ cd /home/ec2-user/mongodb-linux-x86_64-3.0.2/bin
+ sleep 200
+ ./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_1 –logpath /tmp/shard2_1.log –port 27020 –config /etc/mongod.conf
+ ./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_2 –logpath /tmp/shard2_2.log –port 27021 –config /etc/mongod.conf
+ ./mongod –replSet rs1 –dbpath /data/arbiter1 –logpath /tmp/arbiter1.log –port 27012 –config /etc/mongod.conf
+ ./mongod –replSet rs2 –dbpath /data/arbiter2 –logpath /tmp/arbiter2.log –port 27022 –config /etc/mongod.conf
+ ./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_2 –logpath /tmp/shard1_2.log –port 27011 –config /etc/mongod.conf
+ ./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_1 –logpath /tmp/shard1_1.log –port 27010 –config /etc/mongod.conf
+ ./mongod –configsvr –dbpath /data/config3 –logpath /tmp/config3.log –port 39002 –config /etc/mongod.conf
+ ./mongod –configsvr –dbpath /data/config2 –logpath /tmp/config2.log –port 39001 –config /etc/mongod.conf
+ ./mongod –configsvr –dbpath /data/config1 –logpath /tmp/config1.log –port 39000 –config /etc/mongod.conf

+ sleep 200
+ ./mongos –configdb ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 –logpath /tmp/router.log –port 10000
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval ‘rs.initiate()’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin
[object Object]
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval ‘rs.initiate()’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin
[object Object]
+ sleep 200
+ echo -e ‘\n\n Replica sets are being added. \n\n’
Replica sets are being added.
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval ‘rs.add(“ec2-52-7-8-107.compute-1.amazonaws.com:27011”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin
[object Object]
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval ‘rs.add(“ec2-52-7-8-107.compute-1.amazonaws.com:27021”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin
[object Object]
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval ‘rs.addArb(“ec2-52-7-8-107.compute-1.amazonaws.com:27012”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin
[object Object]
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval ‘rs.addArb(“ec2-52-7-8-107.compute-1.amazonaws.com:27022”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin
[object Object]
+ sleep 200
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin –eval ‘sh.addShard(“rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin
[object Object]
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin –eval ‘sh.addShard(“rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin
[object Object]
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 10000
bash: mongo: command not found
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# source ~/.bashrc
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 10000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:10000/test
Server has startup warnings:
2015-05-11T01:03:27.551+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:03:27.551+0000 I CONTROL
mongos>
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“554fffe05384fafde4642cd9”)
}
shards:
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ “_id” : “admin”, “partitioned” : false, “primary” : “config” }

mongos>
mongos> sh.addShard(“rs1/ip-10-0-0-197:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011”)
{ “shardAdded” : “rs1”, “ok” : 1 }
mongos> sh.addShard(“rs2/ip-10-0-0-197:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021”)
{ “shardAdded” : “rs2”, “ok” : 1 }
mongos>
mongos>
mongos>
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“554fffe05384fafde4642cd9”)
}
shards:
{ “_id” : “rs1”, “host” : “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010” }
{ “_id” : “rs2”, “host” : “rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27021,ip-10-0-0-197:27020” }
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ “_id” : “admin”, “partitioned” : false, “primary” : “config” }

mongos>
mongos>
mongos>
mongos> use shard_db
switched to db shard_db
mongos> db.shard_col.ensureIndex( { user_id : 1 }, { unique : true })u
2015-05-11T01:18:22.114+0000 E QUERY SyntaxError: Unexpected identifier
mongos>
mongos>
mongos> db.shard_col.ensureIndex( { user_id : 1 }, { unique : true })
{
“raw” : {
“rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010” : {
“createdCollectionAutomatically” : true,
“numIndexesBefore” : 1,
“numIndexesAfter” : 2,
“ok” : 1,
“$gleStats” : {
“lastOpTime” : Timestamp(1431307104, 2),
“electionId” : ObjectId(“555000a7c25ee1ddcace86ce”)
}
}
},
“ok” : 1
}
mongos>
mongos>
mongos> sh.enableSharding(“shard_db”)
{ “ok” : 1 }
mongos>
mongos> sh.shardCollection(“shard_db.shard_col”, {“user_id”:1})
{ “collectionsharded” : “shard_db.shard_col”, “ok” : 1 }
mongos>
mongos>
mongos> sh.startBalancer()
mongos> sh.getBalancerState()
true
mongos> sh.isBalancerRunning()
false
mongos>
mongos>
mongos> for(var i = 1; i <= 100000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}
WriteResult({ “nInserted” : 1 })
mongos>
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“554fffe05384fafde4642cd9”)
}
shards:
{ “_id” : “rs1”, “host” : “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010” }
{ “_id” : “rs2”, “host” : “rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27021,ip-10-0-0-197:27020” }
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
1 : Success
databases:
{ “_id” : “admin”, “partitioned” : false, “primary” : “config” }
{ “_id” : “shard_db”, “partitioned” : true, “primary” : “rs1” }
shard_db.shard_col
shard key: { “user_id” : 1 }
chunks:
rs1 2
rs2 1
{ “user_id” : { “$minKey” : 1 } } –>> { “user_id” : 2 } on : rs1 Timestamp(2, 1)
{ “user_id” : 2 } –>> { “user_id” : 10 } on : rs1 Timestamp(1, 2)
{ “user_id” : 10 } –>> { “user_id” : { “$maxKey” : 1 } } on : rs2 Timestamp(2, 0)

mongos>
mongos>
mongos>
mongos>
mongos>
mongos> show dbs
admin (empty)
config 0.016GB
shard_db 0.094GB
mongos>
mongos>
mongos> use shard_db
switched to db shard_db
mongos>
mongos>
mongos>
mongos>
mongos> db.shard_col.find({ “user_id” : 10 }).explain()
{
“queryPlanner” : {
“mongosPlannerVersion” : 1,
“winningPlan” : {
“stage” : “SINGLE_SHARD”,
“shards” : [
{
“shardName” : “rs2”,
“connectionString” : “rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27021,ip-10-0-0-197:27020”,
“serverInfo” : {
“host” : “ip-10-0-0-197”,
“port” : 27020,
“version” : “3.0.2”,
“gitVersion” : “6201872043ecbbc0a4cc169b5482dcf385fc464f”
},
“plannerVersion” : 1,
“namespace” : “shard_db.shard_col”,
“indexFilterSet” : false,
“parsedQuery” : {
“user_id” : {
“$eq” : 10
}
},
“winningPlan” : {
“stage” : “FETCH”,
“inputStage” : {
“stage” : “SHARDING_FILTER”,
“inputStage” : {
“stage” : “IXSCAN”,
“keyPattern” : {
“user_id” : 1
},
“indexName” : “user_id_1”,
“isMultiKey” : false,
“direction” : “forward”,
“indexBounds” : {
“user_id” : [
“[10.0, 10.0]”
]
}
}
}
},
“rejectedPlans” : [ ]
}
]
}
},
“ok” : 1
}
mongos>
mongos>
mongos>
mongos>
mongos> db.shard_col.find({ “user_id” : 9 }).explain()
{
“queryPlanner” : {
“mongosPlannerVersion” : 1,
“winningPlan” : {
“stage” : “SINGLE_SHARD”,
“shards” : [
{
“shardName” : “rs1”,
“connectionString” : “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010”,
“serverInfo” : {
“host” : “ip-10-0-0-197”,
“port” : 27010,
“version” : “3.0.2”,
“gitVersion” : “6201872043ecbbc0a4cc169b5482dcf385fc464f”
},
“plannerVersion” : 1,
“namespace” : “shard_db.shard_col”,
“indexFilterSet” : false,
“parsedQuery” : {
“user_id” : {
“$eq” : 9
}
},
“winningPlan” : {
“stage” : “FETCH”,
“inputStage” : {
“stage” : “SHARDING_FILTER”,
“inputStage” : {
“stage” : “IXSCAN”,
“keyPattern” : {
“user_id” : 1
},
“indexName” : “user_id_1”,
“isMultiKey” : false,
“direction” : “forward”,
“indexBounds” : {
“user_id” : [
“[9.0, 9.0]”
]
}
}
}
},
“rejectedPlans” : [ ]
}
]
}
},
“ok” : 1
}
mongos>
mongos>
mongos>
mongos>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 5.8G 1.9G 76% /
devtmpfs 490M 56K 490M 1% /dev
tmpfs 499M 0 499M 0% /dev/shm
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 10000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:10000/test
Server has startup warnings:
2015-05-11T01:03:27.551+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:03:27.551+0000 I CONTROL
mongos>
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> use shard_db
switched to db shard_db
mongos>
mongos>
mongos>
mongos> for(var i = 100001; i <= 120000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}
WriteResult({ “nInserted” : 1 })
mongos>
mongos>
mongos>
mongos> for(var i = 120001; i <= 200000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}

WriteResult({ “nInserted” : 1 })
mongos>
mongos>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 6.0G 1.8G 78% /
devtmpfs 490M 56K 490M 1% /dev
tmpfs 499M 0 499M 0% /dev/shm
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 10000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:10000/test
Server has startup warnings:
2015-05-11T01:03:27.551+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:03:27.551+0000 I CONTROL
mongos>
mongos>
mongos>
mongos>
mongos> use shard_db
switched to db shard_db
mongos>
mongos>
mongos>
mongos> db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } })
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522f”), “user_id” : 10, “name” : “bulk-inserts”, ” Iteration: ” : 10 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522a”), “user_id” : 5, “name” : “bulk-inserts”, ” Iteration: ” : 5 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95230”), “user_id” : 11, “name” : “bulk-inserts”, ” Iteration: ” : 11 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522b”), “user_id” : 6, “name” : “bulk-inserts”, ” Iteration: ” : 6 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95231”), “user_id” : 12, “name” : “bulk-inserts”, ” Iteration: ” : 12 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522c”), “user_id” : 7, “name” : “bulk-inserts”, ” Iteration: ” : 7 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95232”), “user_id” : 13, “name” : “bulk-inserts”, ” Iteration: ” : 13 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522d”), “user_id” : 8, “name” : “bulk-inserts”, ” Iteration: ” : 8 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95233”), “user_id” : 14, “name” : “bulk-inserts”, ” Iteration: ” : 14 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522e”), “user_id” : 9, “name” : “bulk-inserts”, ” Iteration: ” : 9 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95234”), “user_id” : 15, “name” : “bulk-inserts”, ” Iteration: ” : 15 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95235”), “user_id” : 16, “name” : “bulk-inserts”, ” Iteration: ” : 16 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95236”), “user_id” : 17, “name” : “bulk-inserts”, ” Iteration: ” : 17 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95237”), “user_id” : 18, “name” : “bulk-inserts”, ” Iteration: ” : 18 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95238”), “user_id” : 19, “name” : “bulk-inserts”, ” Iteration: ” : 19 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95239”), “user_id” : 20, “name” : “bulk-inserts”, ” Iteration: ” : 20 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523a”), “user_id” : 21, “name” : “bulk-inserts”, ” Iteration: ” : 21 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523b”), “user_id” : 22, “name” : “bulk-inserts”, ” Iteration: ” : 22 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523c”), “user_id” : 23, “name” : “bulk-inserts”, ” Iteration: ” : 23 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523d”), “user_id” : 24, “name” : “bulk-inserts”, ” Iteration: ” : 24 }
mongos>
mongos>
mongos>
mongos> db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } }).explin()
2015-05-11T01:33:53.955+0000 E QUERY TypeError: Object DBQuery: shard_db.shard_col -> { “user_id” : { “$gte” : 5, “$lt” : 25 } } has no method ‘explin’
at (shell):1:58
mongos>
mongos>
mongos> db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } }).explain()
{
“queryPlanner” : {
“mongosPlannerVersion” : 1,
“winningPlan” : {
“stage” : “SHARD_MERGE”,
“shards” : [
{
“shardName” : “rs1”,
“connectionString” : “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010”,
“serverInfo” : {
“host” : “ip-10-0-0-197”,
“port” : 27010,
“version” : “3.0.2”,
“gitVersion” : “6201872043ecbbc0a4cc169b5482dcf385fc464f”
},
“plannerVersion” : 1,
“namespace” : “shard_db.shard_col”,
“indexFilterSet” : false,
“parsedQuery” : {
“$and” : [
{
“user_id” : {
“$lt” : 25
}
},
{
“user_id” : {
“$gte” : 5
}
}
]
},
“winningPlan” : {
“stage” : “FETCH”,
“inputStage” : {
“stage” : “SHARDING_FILTER”,
“inputStage” : {
“stage” : “IXSCAN”,
“keyPattern” : {
“user_id” : 1
},
“indexName” : “user_id_1”,
“isMultiKey” : false,
“direction” : “forward”,
“indexBounds” : {
“user_id” : [
“[5.0, 25.0)”
]
}
}
}
},
“rejectedPlans” : [ ]
},
{
“shardName” : “rs2”,
“connectionString” : “rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27021,ip-10-0-0-197:27020”,
“serverInfo” : {
“host” : “ip-10-0-0-197”,
“port” : 27020,
“version” : “3.0.2”,
“gitVersion” : “6201872043ecbbc0a4cc169b5482dcf385fc464f”
},
“plannerVersion” : 1,
“namespace” : “shard_db.shard_col”,
“indexFilterSet” : false,
“parsedQuery” : {
“$and” : [
{
“user_id” : {
“$lt” : 25
}
},
{
“user_id” : {
“$gte” : 5
}
}
]
},
“winningPlan” : {
“stage” : “FETCH”,
“inputStage” : {
“stage” : “SHARDING_FILTER”,
“inputStage” : {
“stage” : “IXSCAN”,
“keyPattern” : {
“user_id” : 1
},
“indexName” : “user_id_1”,
“isMultiKey” : false,
“direction” : “forward”,
“indexBounds” : {
“user_id” : [
“[5.0, 25.0)”
]
}
}
}
},
“rejectedPlans” : [ ]
}
]
}
},
“ok” : 1
}
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } }).count()
20
mongos> use non_shard_db
switched to db non_shard_db
mongos> for(var i = 1; i <= 1000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}
WriteResult({ “nInserted” : 1 })
mongos>
mongos>
mongos>
mongos>
mongos> db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } }).explain()
{
“queryPlanner” : {
“mongosPlannerVersion” : 1,
“winningPlan” : {
“stage” : “SINGLE_SHARD”,
“shards” : [
{
“shardName” : “rs1”,
“connectionString” : “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010”,
“serverInfo” : {
“host” : “ip-10-0-0-197”,
“port” : 27010,
“version” : “3.0.2”,
“gitVersion” : “6201872043ecbbc0a4cc169b5482dcf385fc464f”
},
“plannerVersion” : 1,
“namespace” : “non_shard_db.shard_col”,
“indexFilterSet” : false,
“parsedQuery” : {
“$and” : [
{
“user_id” : {
“$lt” : 25
}
},
{
“user_id” : {
“$gte” : 5
}
}
]
},
“winningPlan” : {
“stage” : “COLLSCAN”,
“filter” : {
“$and” : [
{
“user_id” : {
“$lt” : 25
}
},
{
“user_id” : {
“$gte” : 5
}
}
]
},
“direction” : “forward”
},
“rejectedPlans” : [ ]
}
]
}
},
“ok” : 1
}
mongos>
mongos>
mongos>
mongos> db
non_shard_db
mongos>
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> show dbs
admin (empty)
config 0.016GB
non_shard_db 0.031GB
shard_db 0.156GB
mongos>
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> use configg
switched to db configg
mongos>
mongos>
mongos>
mongos>
mongos> use config
switched to db config
mongos>
mongos>
mongos>
mongos>
mongos> show ds
2015-05-11T01:38:53.067+0000 E QUERY Error: don’t know how to show [ds]
at Error (<anonymous>)
at shellHelper.show (src/mongo/shell/utils.js:733:11)
at shellHelper (src/mongo/shell/utils.js:524:36)
at (shellhelp2):1:1 at src/mongo/shell/utils.js:733
mongos>
mongos>
mongos> show dbs
admin (empty)
config 0.016GB
non_shard_db 0.031GB
shard_db 0.156GB
mongos>
mongos>
mongos>
mongos>
mongos> use config
switched to db config
mongos>
mongos>
mongos> shwo collections
2015-05-11T01:39:17.799+0000 E QUERY SyntaxError: Unexpected identifier
mongos>
mongos>
mongos> show collections
actionlog
changelog
chunks
collections
databases
lockpings
locks
mongos
settings
shards
system.indexes
tags
version
mongos>
mongos> db.settings.find()
{ “_id” : “chunksize”, “value” : 64 }
{ “_id” : “balancer”, “stopped” : false }
mongos>
mongos>
mongos> db.changelog.find()
{ “_id” : “ip-10-0-0-197-2015-05-11T01:03:28-554fffe05384fafde4642cd8”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:03:28.469Z”), “what” : “starting upgrade of config database”, “ns” : “config.version”, “details” : { “from” : 0, “to” : 6 } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:03:28-554fffe05384fafde4642cda”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:03:28.529Z”), “what” : “finished upgrade of config database”, “ns” : “config.version”, “details” : { “from” : 0, “to” : 6 } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:17:44-555003385384fafde4642d88”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:17:44.353Z”), “what” : “addShard”, “ns” : “”, “details” : { “name” : “rs1”, “host” : “rs1/ip-10-0-0-197:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:17:52-555003405384fafde4642d89”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:17:52.807Z”), “what” : “addShard”, “ns” : “”, “details” : { “name” : “rs2”, “host” : “rs2/ip-10-0-0-197:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:18:43-555003735384fafde4642d96”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:18:43.905Z”), “what” : “shardCollection.start”, “ns” : “shard_db.shard_col”, “details” : { “shardKey” : { “user_id” : 1 }, “collection” : “shard_db.shard_col”, “primary” : “rs1:rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010”, “initShards” : [ ], “numChunks” : 1 } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:18:43-555003735384fafde4642d98”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:18:43.927Z”), “what” : “shardCollection”, “ns” : “shard_db.shard_col”, “details” : { “version” : “1|0||555003735384fafde4642d97” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:08-5550038cc25ee1ddcace86d0”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:08.788Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : { “$minKey” : 1 } }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 1, “of” : 3, “chunk” : { “min” : { “user_id” : { “$minKey” : 1 } }, “max” : { “user_id” : 2 }, “lastmod” : Timestamp(1, 1), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:08-5550038cc25ee1ddcace86d1”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:08.792Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : { “$minKey” : 1 } }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 2, “of” : 3, “chunk” : { “min” : { “user_id” : 2 }, “max” : { “user_id” : 10 }, “lastmod” : Timestamp(1, 2), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:08-5550038cc25ee1ddcace86d2”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:08.797Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : { “$minKey” : 1 } }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 3, “of” : 3, “chunk” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “lastmod” : Timestamp(1, 3), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:08-5550038cc25ee1ddcace86d4”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:08.842Z”), “what” : “moveChunk.start”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “from” : “rs1”, “to” : “rs2” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:10-5550038e5c36cdce695aeb3d”, “server” : “ip-10-0-0-197”, “clientAddr” : “:27017”, “time” : ISODate(“2015-05-11T01:19:10.900Z”), “what” : “moveChunk.to”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “step 1 of 5” : 296, “step 2 of 5” : 286, “step 3 of 5” : 0, “step 4 of 5” : 0, “step 5 of 5” : 1468, “note” : “success” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:10-5550038ec25ee1ddcace86d5”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:10.932Z”), “what” : “moveChunk.commit”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “from” : “rs1”, “to” : “rs2”, “cloned” : NumberLong(1), “clonedBytes” : NumberLong(84), “catchup” : NumberLong(0), “steady” : NumberLong(0) } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:10-5550038ec25ee1ddcace86d6”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:10.957Z”), “what” : “moveChunk.from”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “step 1 of 6” : 0, “step 2 of 6” : 19, “step 3 of 6” : 4, “step 4 of 6” : 2049, “step 5 of 6” : 34, “step 6 of 6” : 0, “to” : “rs2”, “from” : “rs1”, “note” : “success” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:53-555005215c36cdce695aeb3f”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:53.841Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 1, “of” : 3, “chunk” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : 74908 }, “lastmod” : Timestamp(2, 2), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:53-555005215c36cdce695aeb40”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:53.843Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 2, “of” : 3, “chunk” : { “min” : { “user_id” : 74908 }, “max” : { “user_id” : 179771 }, “lastmod” : Timestamp(2, 3), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:53-555005215c36cdce695aeb41”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:53.849Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 3, “of” : 3, “chunk” : { “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “lastmod” : Timestamp(2, 4), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:53-555005215c36cdce695aeb43”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:53.893Z”), “what” : “moveChunk.start”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “from” : “rs2”, “to” : “rs1” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:54-55500522c25ee1ddcace86d7”, “server” : “ip-10-0-0-197”, “clientAddr” : “:27017”, “time” : ISODate(“2015-05-11T01:25:54.927Z”), “what” : “moveChunk.to”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “step 1 of 5” : 0, “step 2 of 5” : 0, “step 3 of 5” : 0, “step 4 of 5” : 0, “step 5 of 5” : 1030, “note” : “success” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:54-555005225c36cdce695aeb44”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:54.954Z”), “what” : “moveChunk.commit”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “from” : “rs2”, “to” : “rs1”, “cloned” : NumberLong(1), “clonedBytes” : NumberLong(84), “catchup” : NumberLong(0), “steady” : NumberLong(0) } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:54-555005225c36cdce695aeb45”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:54.972Z”), “what” : “moveChunk.from”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “step 1 of 6” : 0, “step 2 of 6” : 18, “step 3 of 6” : 0, “step 4 of 6” : 1025, “step 5 of 6” : 34, “step 6 of 6” : 0, “to” : “rs1”, “from” : “rs2”, “note” : “success” } }
mongos>
mongos>
mongos>
mongos>
mongos> db.actionlog.find()
{ “_id” : ObjectId(“554fffe05384fafde4642cdd”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:03:28.754Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 84, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“554fffea5384fafde4642cdf”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:03:38.843Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 24, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“554ffff45384fafde4642ce1”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:03:48.889Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 27, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“554ffffe5384fafde4642ce3”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:03:58.934Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000085384fafde4642ce5”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:08.979Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 25, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000135384fafde4642ce7”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:19.025Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550001d5384fafde4642ce9”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:29.072Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 28, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000275384fafde4642ceb”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:39.118Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000315384fafde4642ced”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:49.163Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550003b5384fafde4642cef”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:59.209Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000455384fafde4642cf1”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:09.255Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550004f5384fafde4642cf3”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:19.300Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000595384fafde4642cf5”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:29.346Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000635384fafde4642cf7”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:39.390Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 24, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550006d5384fafde4642cf9”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:49.436Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000775384fafde4642cfb”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:59.482Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 25, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000815384fafde4642cfd”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:06:09.528Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550008b5384fafde4642cff”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:06:19.572Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 25, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000955384fafde4642d01”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:06:29.620Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550009f5384fafde4642d03”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:06:39.673Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 32, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
Type “it” for more
mongos>
read
write
readWite
readWritreAdmin
dbAdmin
clusterAdnin
userAdmin
readWritreAdminAnyDatabase
dbAdminAnyDatabase
userAdminAnyDatabase

use admin
db.crea

To enable sharding :-
==================
sh.addShard(“rs1/ip-10-0-0-197:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011”)
sh.addShard(“rs2/ip-10-0-0-197:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021”)

use shard_db
db.shard_col.ensureIndex( { user_id : 1 }, { unique : true })
sh.enableSharding(“shard_db”)
sh.shardCollection(“shard_db.shard_col”, {“user_id”:1})
sh.startBalancer()
sh.getBalancerState()
sh.isBalancerRunning()
use shard_db
for(var i = 1; i <= 100000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}

[root@ip-10-0-0-197 ~]# mongo –port 10000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:10000/test
Server has startup warnings:
2015-05-11T01:03:27.551+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:03:27.551+0000 I CONTROL
mongos>
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“554fffe05384fafde4642cd9”)
}
shards:
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ “_id” : “admin”, “partitioned” : false, “primary” : “config” }

mongos>
mongos>
mongos>
bye
[root@ip-10-0-0-197 ~]# mongo –port 27010
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27010/test
Server has startup warnings:
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> rs.status()
{
“set” : “rs1”,
“date” : ISODate(“2015-05-11T01:07:48.890Z”),
“myState” : 1,
“members” : [
{
“_id” : 0,
“name” : “ip-10-0-0-197:27010”,
“health” : 1,
“state” : 1,
“stateStr” : “PRIMARY”,
“uptime” : 461,
“optime” : Timestamp(1431306407, 1),
“optimeDate” : ISODate(“2015-05-11T01:06:47Z”),
“electionTime” : Timestamp(1431306407, 2),
“electionDate” : ISODate(“2015-05-11T01:06:47Z”),
“configVersion” : 1,
“self” : true
}
],
“ok” : 1
}
rs1:PRIMARY> rs.status()
{
“set” : “rs1”,
“date” : ISODate(“2015-05-11T01:08:15.050Z”),
“myState” : 1,
“members” : [
{
“_id” : 0,
“name” : “ip-10-0-0-197:27010”,
“health” : 1,
“state” : 1,
“stateStr” : “PRIMARY”,
“uptime” : 488,
“optime” : Timestamp(1431306407, 1),
“optimeDate” : ISODate(“2015-05-11T01:06:47Z”),
“electionTime” : Timestamp(1431306407, 2),
“electionDate” : ISODate(“2015-05-11T01:06:47Z”),
“configVersion” : 1,
“self” : true
}
],
“ok” : 1
}
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> rs.status()
{
“set” : “rs1”,
“date” : ISODate(“2015-05-11T01:13:07.239Z”),
“myState” : 1,
“members” : [
{
“_id” : 0,
“name” : “ip-10-0-0-197:27010”,
“health” : 1,
“state” : 1,
“stateStr” : “PRIMARY”,
“uptime” : 780,
“optime” : Timestamp(1431306607, 2),
“optimeDate” : ISODate(“2015-05-11T01:10:07Z”),
“electionTime” : Timestamp(1431306407, 2),
“electionDate” : ISODate(“2015-05-11T01:06:47Z”),
“configVersion” : 3,
“self” : true
},
{
“_id” : 1,
“name” : “ec2-52-7-8-107.compute-1.amazonaws.com:27011”,
“health” : 1,
“state” : 2,
“stateStr” : “SECONDARY”,
“uptime” : 179,
“optime” : Timestamp(1431306607, 2),
“optimeDate” : ISODate(“2015-05-11T01:10:07Z”),
“lastHeartbeat” : ISODate(“2015-05-11T01:13:05.912Z”),
“lastHeartbeatRecv” : ISODate(“2015-05-11T01:13:05.826Z”),
“pingMs” : 0,
“syncingTo” : “ip-10-0-0-197:27010”,
“configVersion” : 3
},
{
“_id” : 2,
“name” : “ec2-52-7-8-107.compute-1.amazonaws.com:27012”,
“health” : 1,
“state” : 7,
“stateStr” : “ARBITER”,
“uptime” : 179,
“lastHeartbeat” : ISODate(“2015-05-11T01:13:05.912Z”),
“lastHeartbeatRecv” : ISODate(“2015-05-11T01:13:05.911Z”),
“pingMs” : 0,
“configVersion” : 3
}
],
“ok” : 1
}
rs1:PRIMARY>
rs1:PRIMARY>
bye
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# ps -ef | grep mongo
root 14253 1 0 01:00 pts/0 00:00:00 ./mongod –configsvr –dbpath /data/config1 –logpath /tmp/config1.log –port 39000 –config /etc/mongod.conf
root 14254 1 0 01:00 pts/0 00:00:00 ./mongod –configsvr –dbpath /data/config2 –logpath /tmp/config2.log –port 39001 –config /etc/mongod.conf
root 14255 1 0 01:00 pts/0 00:00:00 ./mongod –configsvr –dbpath /data/config3 –logpath /tmp/config3.log –port 39002 –config /etc/mongod.conf
root 14256 1 0 01:00 pts/0 00:00:00 ./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_1 –logpath /tmp/shard1_1.log –port 27010 –config /etc/mongod.conf
root 14257 1 0 01:00 pts/0 00:00:00 ./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_2 –logpath /tmp/shard1_2.log –port 27011 –config /etc/mongod.conf
root 14258 1 0 01:00 pts/0 00:00:00 ./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_1 –logpath /tmp/shard2_1.log –port 27020 –config /etc/mongod.conf
root 14259 1 0 01:00 pts/0 00:00:00 ./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_2 –logpath /tmp/shard2_2.log –port 27021 –config /etc/mongod.conf
root 14260 1 0 01:00 pts/0 00:00:00 ./mongod –replSet rs1 –dbpath /data/arbiter1 –logpath /tmp/arbiter1.log –port 27012 –config /etc/mongod.conf
root 14261 1 0 01:00 pts/0 00:00:00 ./mongod –replSet rs2 –dbpath /data/arbiter2 –logpath /tmp/arbiter2.log –port 27022 –config /etc/mongod.conf
root 14442 1 0 01:03 pts/0 00:00:00 ./mongos –configdb ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 –logpath /tmp/router.log –port 10000
root 15129 14399 0 01:13 pts/1 00:00:00 grep mongo
[root@ip-10-0-0-197 ~]# cd /tmp/
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# cat shard1_1.log
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] MongoDB starting : pid=14256 port=27010 dbpath=/data/1/shard1_1 64-bit host=ip-10-0-0-197
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] db version v3.0.2
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] git version: 6201872043ecbbc0a4cc169b5482dcf385fc464f
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] build info: Linux build6.nj1.10gen.cc 2.6.32-431.3.1.el6.x86_64 #1 SMP Fri Jan 3 21:39:27 UTC 2014 x86_64 BOOST_LIB_VERSION=1_49
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] allocator: tcmalloc
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] options: { config: “/etc/mongod.conf”, net: { port: 27010 }, replication: { replSet: “rs1” }, sharding: { clusterRole: “shardsvr” }, storage: { dbPath: “/data/1/shard1_1”, journal: { enabled: false }, mmapv1: { smallFiles: true } }, systemLog: { destination: “file”, path: “/tmp/shard1_1.log”, quiet: true } }
2015-05-11T01:00:07.974+0000 I INDEX [initandlisten] allocating new ns file /data/1/shard1_1/local.ns, filling with zeroes…
2015-05-11T01:00:10.384+0000 I STORAGE [FileAllocator] allocating new datafile /data/1/shard1_1/local.0, filling with zeroes…
2015-05-11T01:00:10.385+0000 I STORAGE [FileAllocator] creating directory /data/1/shard1_1/_tmp
2015-05-11T01:00:10.398+0000 I STORAGE [FileAllocator] done allocating datafile /data/1/shard1_1/local.0, size: 16MB, took 0.006 secs
2015-05-11T01:00:10.427+0000 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset
2015-05-11T01:00:10.428+0000 I NETWORK [initandlisten] waiting for connections on port 27010
2015-05-11T01:06:47.582+0000 I COMMAND [conn1] replSet info initiate : no configuration specified. Using a default configuration for the set
2015-05-11T01:06:47.582+0000 I COMMAND [conn1] replSet created this configuration for initiation : { _id: “rs1”, version: 1, members: [ { _id: 0, host: “ip-10-0-0-197:27010” } ] }
2015-05-11T01:06:47.582+0000 I REPL [conn1] replSetInitiate admin command received from client
2015-05-11T01:06:47.584+0000 I REPL [conn1] replSet replSetInitiate config object with 1 members parses ok
2015-05-11T01:06:47.584+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: “rs1”, version: 1, members: [ { _id: 0, host: “ip-10-0-0-197:27010”, arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 } } }
2015-05-11T01:06:47.584+0000 I REPL [ReplicationExecutor] This node is ip-10-0-0-197:27010 in the config
2015-05-11T01:06:47.584+0000 I REPL [ReplicationExecutor] transition to STARTUP2
2015-05-11T01:06:47.585+0000 I REPL [conn1] ******
2015-05-11T01:06:47.585+0000 I REPL [conn1] creating replication oplog of size: 990MB…
2015-05-11T01:06:47.585+0000 I STORAGE [FileAllocator] allocating new datafile /data/1/shard1_1/local.1, filling with zeroes…
2015-05-11T01:06:47.588+0000 I STORAGE [FileAllocator] done allocating datafile /data/1/shard1_1/local.1, size: 511MB, took 0.003 secs
2015-05-11T01:06:47.588+0000 I STORAGE [FileAllocator] allocating new datafile /data/1/shard1_1/local.2, filling with zeroes…
2015-05-11T01:06:47.591+0000 I STORAGE [FileAllocator] done allocating datafile /data/1/shard1_1/local.2, size: 511MB, took 0.002 secs
2015-05-11T01:06:47.630+0000 I REPL [conn1] ******
2015-05-11T01:06:47.630+0000 I REPL [conn1] Starting replication applier threads
2015-05-11T01:06:47.636+0000 I REPL [ReplicationExecutor] transition to RECOVERING
2015-05-11T01:06:47.636+0000 I REPL [ReplicationExecutor] transition to SECONDARY
2015-05-11T01:06:47.636+0000 I REPL [ReplicationExecutor] transition to PRIMARY
2015-05-11T01:06:48.634+0000 I REPL [rsSync] transition to primary complete; database writes are now permitted
2015-05-11T01:10:07.760+0000 I REPL [conn3] replSetReconfig admin command received from client
2015-05-11T01:10:07.761+0000 I REPL [conn3] replSetReconfig config object with 2 members parses ok
2015-05-11T01:10:07.763+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: “rs1”, version: 2, members: [ { _id: 0, host: “ip-10-0-0-197:27010”, arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: “ec2-52-7-8-107.compute-1.amazonaws.com:27011”, arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 } } }
2015-05-11T01:10:07.763+0000 I REPL [ReplicationExecutor] This node is ip-10-0-0-197:27010 in the config
2015-05-11T01:10:07.763+0000 I REPL [ReplicationExecutor] Member ec2-52-7-8-107.compute-1.amazonaws.com:27011 is now in state STARTUP
2015-05-11T01:10:07.861+0000 I REPL [conn7] replSetReconfig admin command received from client
2015-05-11T01:10:07.863+0000 I REPL [conn7] replSetReconfig config object with 3 members parses ok
2015-05-11T01:10:07.864+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: “rs1”, version: 3, members: [ { _id: 0, host: “ip-10-0-0-197:27010”, arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: “ec2-52-7-8-107.compute-1.amazonaws.com:27011”, arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: “ec2-52-7-8-107.compute-1.amazonaws.com:27012”, arbiterOnly: true, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 } } }
2015-05-11T01:10:07.864+0000 I REPL [ReplicationExecutor] This node is ip-10-0-0-197:27010 in the config
2015-05-11T01:10:07.864+0000 I REPL [ReplicationExecutor] Member ec2-52-7-8-107.compute-1.amazonaws.com:27011 is now in state SECONDARY
2015-05-11T01:10:07.868+0000 I REPL [ReplicationExecutor] Member ec2-52-7-8-107.compute-1.amazonaws.com:27012 is now in state STARTUP
2015-05-11T01:10:09.868+0000 I REPL [ReplicationExecutor] Member ec2-52-7-8-107.compute-1.amazonaws.com:27012 is now in state ARBITER
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# cat router.log
2015-05-11T01:03:27.551+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:03:27.551+0000 I CONTROL
2015-05-11T01:03:27.552+0000 I SHARDING [mongosMain] MongoS version 3.0.2 starting: pid=14442 port=10000 64-bit host=ip-10-0-0-197 (–help for usage)
2015-05-11T01:03:27.552+0000 I CONTROL [mongosMain] db version v3.0.2
2015-05-11T01:03:27.552+0000 I CONTROL [mongosMain] git version: 6201872043ecbbc0a4cc169b5482dcf385fc464f
2015-05-11T01:03:27.552+0000 I CONTROL [mongosMain] build info: Linux build6.nj1.10gen.cc 2.6.32-431.3.1.el6.x86_64 #1 SMP Fri Jan 3 21:39:27 UTC 2014 x86_64 BOOST_LIB_VERSION=1_49
2015-05-11T01:03:27.552+0000 I CONTROL [mongosMain] allocator: tcmalloc
2015-05-11T01:03:27.552+0000 I CONTROL [mongosMain] options: { net: { port: 10000 }, sharding: { configDB: “ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002” }, systemLog: { destination: “file”, path: “/tmp/router.log” } }
2015-05-11T01:03:27.556+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39000]
2015-05-11T01:03:27.556+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39001]
2015-05-11T01:03:27.557+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39002]
2015-05-11T01:03:27.560+0000 I NETWORK [mongosMain] scoped connection to ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 not being returned to the pool
2015-05-11T01:03:27.563+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39000]
2015-05-11T01:03:27.563+0000 I SHARDING [LockPinger] creating distributed lock ping thread for ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 and process ip-10-0-0-197:10000:1431306207:1804289383 (sleeping for 30000ms)
2015-05-11T01:03:27.563+0000 I NETWORK [LockPinger] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39000]
2015-05-11T01:03:27.564+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39001]
2015-05-11T01:03:27.564+0000 I NETWORK [LockPinger] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39001]
2015-05-11T01:03:27.564+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39002]
2015-05-11T01:03:27.565+0000 I NETWORK [LockPinger] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39002]
2015-05-11T01:03:28.453+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:03:27 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:03:28.469+0000 I SHARDING [mongosMain] distributed lock ‘configUpgrade/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 554fffe05384fafde4642cd7
2015-05-11T01:03:28.469+0000 I SHARDING [mongosMain] starting upgrade of config server from v0 to v6
2015-05-11T01:03:28.469+0000 I SHARDING [mongosMain] starting next upgrade step from v0 to v6
2015-05-11T01:03:28.469+0000 I SHARDING [mongosMain] about to log new metadata event: { _id: “ip-10-0-0-197-2015-05-11T01:03:28-554fffe05384fafde4642cd8”, server: “ip-10-0-0-197”, clientAddr: “N/A”, time: new Date(1431306208469), what: “starting upgrade of config database”, ns: “config.version”, details: { from: 0, to: 6 } }
2015-05-11T01:03:28.522+0000 I SHARDING [mongosMain] writing initial config version at v6
2015-05-11T01:03:28.529+0000 I SHARDING [mongosMain] about to log new metadata event: { _id: “ip-10-0-0-197-2015-05-11T01:03:28-554fffe05384fafde4642cda”, server: “ip-10-0-0-197”, clientAddr: “N/A”, time: new Date(1431306208529), what: “finished upgrade of config database”, ns: “config.version”, details: { from: 0, to: 6 } }
2015-05-11T01:03:28.537+0000 I SHARDING [mongosMain] upgrade of config server to v6 successful
2015-05-11T01:03:28.538+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39000]
2015-05-11T01:03:28.538+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39001]
2015-05-11T01:03:28.539+0000 I NETWORK [mongosMain] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39002]
2015-05-11T01:03:28.556+0000 I SHARDING [mongosMain] distributed lock ‘configUpgrade/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:03:28.670+0000 I SHARDING [Balancer] about to contact config servers and shards
2015-05-11T01:03:28.670+0000 I SHARDING [Balancer] config servers and shards contacted successfully
2015-05-11T01:03:28.670+0000 I SHARDING [Balancer] balancer id: ip-10-0-0-197:10000 started at May 11 01:03:28
2015-05-11T01:03:28.687+0000 I NETWORK [mongosMain] waiting for connections on port 10000
2015-05-11T01:03:28.754+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 554fffe05384fafde4642cdc
2015-05-11T01:03:28.818+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:03:38.843+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 554fffea5384fafde4642cde
2015-05-11T01:03:38.861+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:03:48.889+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 554ffff45384fafde4642ce0
2015-05-11T01:03:48.908+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:03:58.469+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:03:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:03:58.934+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 554ffffe5384fafde4642ce2
2015-05-11T01:03:58.953+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:08.979+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000085384fafde4642ce4
2015-05-11T01:04:08.998+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:19.025+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000125384fafde4642ce6
2015-05-11T01:04:19.043+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:28.485+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:04:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:04:29.072+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550001d5384fafde4642ce8
2015-05-11T01:04:29.091+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:39.117+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000275384fafde4642cea
2015-05-11T01:04:39.136+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:49.163+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000315384fafde4642cec
2015-05-11T01:04:49.181+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:04:58.502+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:04:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:04:59.208+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550003b5384fafde4642cee
2015-05-11T01:04:59.227+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:09.254+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000455384fafde4642cf0
2015-05-11T01:05:09.273+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:19.300+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550004f5384fafde4642cf2
2015-05-11T01:05:19.319+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:28.517+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:05:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:05:29.346+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000595384fafde4642cf4
2015-05-11T01:05:29.365+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:39.390+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000635384fafde4642cf6
2015-05-11T01:05:39.409+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:49.436+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550006d5384fafde4642cf8
2015-05-11T01:05:49.455+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:05:58.531+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:05:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:05:59.482+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000775384fafde4642cfa
2015-05-11T01:05:59.500+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:09.527+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000815384fafde4642cfc
2015-05-11T01:06:09.546+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:19.572+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550008b5384fafde4642cfe
2015-05-11T01:06:19.593+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:28.548+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:06:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:06:29.620+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000955384fafde4642d00
2015-05-11T01:06:29.639+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:39.672+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550009f5384fafde4642d02
2015-05-11T01:06:39.695+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:49.722+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000a95384fafde4642d04
2015-05-11T01:06:49.743+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:06:58.561+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:06:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:06:59.554+0000 I NETWORK [mongosMain] connection accepted from 127.0.0.1:49097 #1 (1 connection now open)
2015-05-11T01:06:59.555+0000 I SHARDING [conn1] couldn’t find database [admin] in config db
2015-05-11T01:06:59.556+0000 I SHARDING [conn1] put [admin] on: config:ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002
2015-05-11T01:06:59.784+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000b35384fafde4642d06
2015-05-11T01:06:59.804+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:07.714+0000 I NETWORK [conn1] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39000]
2015-05-11T01:07:07.715+0000 I NETWORK [conn1] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39001]
2015-05-11T01:07:07.716+0000 I NETWORK [conn1] SyncClusterConnection connecting to [ec2-52-7-8-107.compute-1.amazonaws.com:39002]
2015-05-11T01:07:09.831+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000bd5384fafde4642d08
2015-05-11T01:07:09.849+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:19.874+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000c75384fafde4642d0a
2015-05-11T01:07:19.893+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:28.575+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:07:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:07:29.919+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000d15384fafde4642d0c
2015-05-11T01:07:29.940+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:38.080+0000 I NETWORK [conn1] end connection 127.0.0.1:49097 (0 connections now open)
2015-05-11T01:07:39.970+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000db5384fafde4642d0e
2015-05-11T01:07:39.989+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:50.014+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000e55384fafde4642d10
2015-05-11T01:07:50.032+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:07:58.592+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:07:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:08:00.060+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000f05384fafde4642d12
2015-05-11T01:08:00.080+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:10.107+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555000fa5384fafde4642d14
2015-05-11T01:08:10.126+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:20.154+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001045384fafde4642d16
2015-05-11T01:08:20.173+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:28.607+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:08:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:08:30.200+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550010e5384fafde4642d18
2015-05-11T01:08:30.219+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:40.244+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001185384fafde4642d1a
2015-05-11T01:08:40.264+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:50.290+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001225384fafde4642d1c
2015-05-11T01:08:50.310+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:08:58.622+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:08:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:09:00.338+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550012c5384fafde4642d1e
2015-05-11T01:09:00.358+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:10.385+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001365384fafde4642d20
2015-05-11T01:09:10.403+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:20.429+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001405384fafde4642d22
2015-05-11T01:09:20.447+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:28.638+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:09:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:09:30.473+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550014a5384fafde4642d24
2015-05-11T01:09:30.494+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:40.518+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001545384fafde4642d26
2015-05-11T01:09:40.537+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:50.563+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550015e5384fafde4642d28
2015-05-11T01:09:50.581+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:09:58.654+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:09:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:10:00.608+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001685384fafde4642d2a
2015-05-11T01:10:00.626+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:10.658+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001725384fafde4642d2c
2015-05-11T01:10:10.681+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:20.705+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550017c5384fafde4642d2e
2015-05-11T01:10:20.724+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:28.664+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:10:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:10:30.751+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001865384fafde4642d30
2015-05-11T01:10:30.770+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:40.796+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001905384fafde4642d32
2015-05-11T01:10:40.815+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:50.842+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550019a5384fafde4642d34
2015-05-11T01:10:50.862+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:10:58.679+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:10:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:11:00.889+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001a45384fafde4642d36
2015-05-11T01:11:00.909+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:10.935+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001ae5384fafde4642d38
2015-05-11T01:11:10.955+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:20.979+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001b85384fafde4642d3a
2015-05-11T01:11:20.998+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:28.697+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:11:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:11:31.025+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001c25384fafde4642d3c
2015-05-11T01:11:31.044+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:41.071+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001cd5384fafde4642d3e
2015-05-11T01:11:41.090+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:51.115+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001d75384fafde4642d40
2015-05-11T01:11:51.134+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:11:58.712+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:11:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:12:01.161+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001e15384fafde4642d42
2015-05-11T01:12:01.182+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:11.209+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001eb5384fafde4642d44
2015-05-11T01:12:11.229+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:21.255+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001f55384fafde4642d46
2015-05-11T01:12:21.274+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:28.728+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:12:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:12:31.301+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555001ff5384fafde4642d48
2015-05-11T01:12:31.321+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:41.346+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002095384fafde4642d4a
2015-05-11T01:12:41.367+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:51.392+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002135384fafde4642d4c
2015-05-11T01:12:51.411+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:12:58.738+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:12:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:13:01.479+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550021d5384fafde4642d4e
2015-05-11T01:13:01.523+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:11.550+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002275384fafde4642d50
2015-05-11T01:13:11.568+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:21.593+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002315384fafde4642d52
2015-05-11T01:13:21.614+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:27.961+0000 I NETWORK [mongosMain] connection accepted from 10.0.0.197:56870 #2 (1 connection now open)
2015-05-11T01:13:27.962+0000 I NETWORK [conn2] starting new replica set monitor for replica set rs1 with seeds ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011
2015-05-11T01:13:27.963+0000 I NETWORK [ReplicaSetMonitorWatcher] starting
2015-05-11T01:13:27.964+0000 I NETWORK [conn2] changing hosts to rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010 from rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011
2015-05-11T01:13:27.965+0000 I COMMAND [conn2] addshard request { addShard: “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011” } failed: in seed list rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011, host ec2-52-7-8-107.compute-1.amazonaws.com:27010 does not belong to replica set rs1
2015-05-11T01:13:27.967+0000 I NETWORK [conn2] end connection 10.0.0.197:56870 (0 connections now open)
2015-05-11T01:13:28.005+0000 I NETWORK [mongosMain] connection accepted from 10.0.0.197:56874 #3 (1 connection now open)
2015-05-11T01:13:28.006+0000 I NETWORK [conn3] starting new replica set monitor for replica set rs2 with seeds ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021
2015-05-11T01:13:28.007+0000 I NETWORK [conn3] changing hosts to rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27021,ip-10-0-0-197:27020 from rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021
2015-05-11T01:13:28.010+0000 I COMMAND [conn3] addshard request { addShard: “rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021” } failed: in seed list rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021, host ec2-52-7-8-107.compute-1.amazonaws.com:27020 does not belong to replica set rs2
2015-05-11T01:13:28.012+0000 I NETWORK [conn3] end connection 10.0.0.197:56874 (0 connections now open)
2015-05-11T01:13:28.753+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:13:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:13:31.642+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550023b5384fafde4642d54
2015-05-11T01:13:31.662+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:41.687+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002455384fafde4642d56
2015-05-11T01:13:41.710+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:51.735+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550024f5384fafde4642d58
2015-05-11T01:13:51.754+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:13:58.768+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:13:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:14:01.822+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002595384fafde4642d5a
2015-05-11T01:14:01.865+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:11.891+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002635384fafde4642d5c
2015-05-11T01:14:11.910+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:21.936+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550026d5384fafde4642d5e
2015-05-11T01:14:21.954+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:28.783+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:14:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:14:31.980+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002775384fafde4642d60
2015-05-11T01:14:31.998+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:42.026+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002815384fafde4642d62
2015-05-11T01:14:42.045+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:52.071+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 5550028c5384fafde4642d64
2015-05-11T01:14:52.088+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:14:58.797+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:14:58 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:15:02.152+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002965384fafde4642d66
2015-05-11T01:15:02.197+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:15:12.225+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002a05384fafde4642d68
2015-05-11T01:15:12.244+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:15:22.272+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002aa5384fafde4642d6a
2015-05-11T01:15:22.292+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:15:28.808+0000 I SHARDING [LockPinger] cluster ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 pinged successfully at Mon May 11 01:15:28 2015 by distributed lock pinger ‘ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002/ip-10-0-0-197:10000:1431306207:1804289383’, sleeping for 30000ms
2015-05-11T01:15:32.318+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002b45384fafde4642d6c
2015-05-11T01:15:32.338+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
2015-05-11T01:15:42.363+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ acquired, ts : 555002be5384fafde4642d6e
2015-05-11T01:15:42.382+0000 I SHARDING [Balancer] distributed lock ‘balancer/ip-10-0-0-197:10000:1431306207:1804289383’ unlocked.
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# 4
bash: 4: command not found
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# cd $MONGODB_HOME
[root@ip-10-0-0-197 mongodb-linux-x86_64-3.0.2]#
[root@ip-10-0-0-197 mongodb-linux-x86_64-3.0.2]#
[root@ip-10-0-0-197 mongodb-linux-x86_64-3.0.2]# cd bin/
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ll -lhtr
total 113M
-rwxr-xr-x 1 1046 1046 4.1M Apr 8 20:39 bsondump
-rwxr-xr-x 1 1046 1046 5.6M Apr 8 20:39 mongostat
-rwxr-xr-x 1 1046 1046 5.6M Apr 8 20:39 mongofiles
-rwxr-xr-x 1 1046 1046 5.7M Apr 8 20:39 mongoexport
-rwxr-xr-x 1 1046 1046 5.8M Apr 8 20:39 mongoimport
-rwxr-xr-x 1 1046 1046 6.0M Apr 8 20:39 mongorestore
-rwxr-xr-x 1 1046 1046 5.8M Apr 8 20:39 mongodump
-rwxr-xr-x 1 1046 1046 5.4M Apr 8 20:39 mongotop
-rwxr-xr-x 1 1046 1046 5.3M Apr 8 20:39 mongooplog
-rwxr-xr-x 1 1046 1046 22M Apr 8 20:46 mongoperf
-rwxr-xr-x 1 1046 1046 22M Apr 8 20:46 mongod
-rwxr-xr-x 1 1046 1046 11M Apr 8 20:46 mongos
-rwxr-xr-x 1 1046 1046 12M Apr 8 20:46 mongo
-rw-r–r– 1 root root 4.3K May 10 15:13 shard_creation_script.sh
-rw-r–r– 1 root root 4.3K May 10 15:36 new_shard_creation_script.sh
-rw-r–r– 1 root root 3.9K May 10 15:56 NSS.sh
-rw-r–r– 1 root root 3.9K May 10 16:06 shard-script.sh
-rw-r–r– 1 root root 4.0K May 11 00:59 new_script.sh
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# cat new_script.sh
##### Killing the existing Mongo processes ################
for i in `ps -ef | egrep ‘shardsvr|configsvr|replSet|configdb’ | grep -v egrep | awk -F” ” ‘{print $2}’`; do kill -9 $i; done

##### Creating Mongo data & log files #####################

mkdir -p /data /data/1 /data/2 /data/3
rm -rf /data/*
cd /data/
mkdir -p config1 config2 config3 arbiter1 arbiter2 arbiter3 router /data/1/shard1_1 /data/1/shard1_2 /data/2/shard2_1 /data/2/shard2_2 /data/3/shard3_1 /data/3/shard3_2

cd /home/ec2-user/mongodb-linux-x86_64-3.0.2/bin

##### Starting the Mongo Config,Shard,Arbiter & Router services ################

## Config Servers #####
./mongod –configsvr –dbpath /data/config1 –logpath /tmp/config1.log –port 39000 –config /etc/mongod.conf &
./mongod –configsvr –dbpath /data/config2 –logpath /tmp/config2.log –port 39001 –config /etc/mongod.conf &
./mongod –configsvr –dbpath /data/config3 –logpath /tmp/config3.log –port 39002 –config /etc/mongod.conf &

## Replica Set 1 ######
./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_1 –logpath /tmp/shard1_1.log –port 27010 –config /etc/mongod.conf &
./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_2 –logpath /tmp/shard1_2.log –port 27011 –config /etc/mongod.conf &

## Replica Set 2 ######
./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_1 –logpath /tmp/shard2_1.log –port 27020 –config /etc/mongod.conf &
./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_2 –logpath /tmp/shard2_2.log –port 27021 –config /etc/mongod.conf &

## Replica Set 3 ######
#./mongod –shardsvr –replSet rs3 –dbpath /data/3/shard3_1 –logpath /tmp/shard3_1.log –port 27030 –config /etc/mongod.conf &
#./mongod –shardsvr –replSet rs3 –dbpath /data/3/shard3_2 –logpath /tmp/shard3_2.log –port 27031 –config /etc/mongod.conf &

## Arbiters ####
./mongod –replSet rs1 –dbpath /data/arbiter1 –logpath /tmp/arbiter1.log –port 27012 –config /etc/mongod.conf &
./mongod –replSet rs2 –dbpath /data/arbiter2 –logpath /tmp/arbiter2.log –port 27022 –config /etc/mongod.conf &
#./mongod –replSet rs3 –dbpath /data/arbiter3 –logpath /tmp/arbiter3.log –port 27032 –config /etc/mongod.conf &

sleep 200
./mongos –configdb ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 –logpath /tmp/router.log –port 10000 &
sleep 200
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval “rs.initiate()”
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval “rs.initiate()”
#./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27030/admin –eval “rs.initiate()”

sleep 200
echo -e “\n\n Replica sets are being added. \n\n”

./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval “rs.add(\”ec2-52-7-8-107.compute-1.amazonaws.com:27011\”)”
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval “rs.add(\”ec2-52-7-8-107.compute-1.amazonaws.com:27021\”)”
#./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27030/admin –eval “rs.add(\”ec2-52-7-8-107.compute-1.amazonaws.com:27031\”)”

./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval “rs.addArb(\”ec2-52-7-8-107.compute-1.amazonaws.com:27012\”)”
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval “rs.addArb(\”ec2-52-7-8-107.compute-1.amazonaws.com:27022\”)”
#./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27030/admin –eval “rs.addArb(\”ec2-52-7-8-107.compute-1.amazonaws.com:27032\”)”

sleep 200
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin –eval “sh.addShard(\”rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011\”)”
./mongo ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin –eval “sh.addShard(\”rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021\”)”
#./mongo ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin –eval “sh.addShard(\”rs3/ec2-52-7-8-107.compute-1.amazonaws.com:27030,ec2-52-7-8-107.compute-1.amazonaws.com:27031\”)”

[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27010
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27010/test
Server has startup warnings:
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27020
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27020/test
Server has startup warnings:
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> show dbs
local 1.031GB
shard_db 0.031GB
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> use shard_db
switched to db shard_db
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> show collections
shard_col
system.indexes
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
99991
rs2:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#

sh.addShard(“rs1/ip-10-0-0-197:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011”)
sh.addShard(“rs2/ip-10-0-0-197:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021”)

use shard_db
db.shard_col.ensureIndex( { user_id : 1 }, { unique : true })
sh.enableSharding(“shard_db”)
sh.shardCollection(“shard_db.shard_col”, {“user_id”:1})
sh.startBalancer()
sh.getBalancerState()
sh.isBalancerRunning()
use shard_db
for(var i = 1; i <= 100000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}
db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } })

[root@ip-10-0-0-197 bin]# mongo –port 27010
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27010/test
Server has startup warnings:
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> use shard_db
switched to db shard_db
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a95226”), “user_id” : 1, “name” : “bulk-inserts”, ” Iteration: ” : 1 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95227”), “user_id” : 2, “name” : “bulk-inserts”, ” Iteration: ” : 2 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95228”), “user_id” : 3, “name” : “bulk-inserts”, ” Iteration: ” : 3 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95229”), “user_id” : 4, “name” : “bulk-inserts”, ” Iteration: ” : 4 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522a”), “user_id” : 5, “name” : “bulk-inserts”, ” Iteration: ” : 5 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522b”), “user_id” : 6, “name” : “bulk-inserts”, ” Iteration: ” : 6 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522c”), “user_id” : 7, “name” : “bulk-inserts”, ” Iteration: ” : 7 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522d”), “user_id” : 8, “name” : “bulk-inserts”, ” Iteration: ” : 8 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522e”), “user_id” : 9, “name” : “bulk-inserts”, ” Iteration: ” : 9 }
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27020
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27020/test
Server has startup warnings:
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> use shard_db
switched to db shard_db
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522f”), “user_id” : 10, “name” : “bulk-inserts”, ” Iteration: ” : 10 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95230”), “user_id” : 11, “name” : “bulk-inserts”, ” Iteration: ” : 11 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95231”), “user_id” : 12, “name” : “bulk-inserts”, ” Iteration: ” : 12 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95232”), “user_id” : 13, “name” : “bulk-inserts”, ” Iteration: ” : 13 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95233”), “user_id” : 14, “name” : “bulk-inserts”, ” Iteration: ” : 14 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95234”), “user_id” : 15, “name” : “bulk-inserts”, ” Iteration: ” : 15 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95235”), “user_id” : 16, “name” : “bulk-inserts”, ” Iteration: ” : 16 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95236”), “user_id” : 17, “name” : “bulk-inserts”, ” Iteration: ” : 17 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95237”), “user_id” : 18, “name” : “bulk-inserts”, ” Iteration: ” : 18 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95238”), “user_id” : 19, “name” : “bulk-inserts”, ” Iteration: ” : 19 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95239”), “user_id” : 20, “name” : “bulk-inserts”, ” Iteration: ” : 20 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523a”), “user_id” : 21, “name” : “bulk-inserts”, ” Iteration: ” : 21 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523b”), “user_id” : 22, “name” : “bulk-inserts”, ” Iteration: ” : 22 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523c”), “user_id” : 23, “name” : “bulk-inserts”, ” Iteration: ” : 23 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523d”), “user_id” : 24, “name” : “bulk-inserts”, ” Iteration: ” : 24 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523e”), “user_id” : 25, “name” : “bulk-inserts”, ” Iteration: ” : 25 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523f”), “user_id” : 26, “name” : “bulk-inserts”, ” Iteration: ” : 26 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95240”), “user_id” : 27, “name” : “bulk-inserts”, ” Iteration: ” : 27 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95241”), “user_id” : 28, “name” : “bulk-inserts”, ” Iteration: ” : 28 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95242”), “user_id” : 29, “name” : “bulk-inserts”, ” Iteration: ” : 29 }
Type “it” for more
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
119991
rs2:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27010
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27010/test
Server has startup warnings:
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> use shard_db
switched to db shard_db
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY> db.shard_col.count()
9
rs1:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27020
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27020/test
Server has startup warnings:
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
rs2:PRIMARY> use shard_db
switched to db shard_db
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
158901
rs2:PRIMARY> db.shard_col.count()
161356
rs2:PRIMARY> db.shard_col.count()
163025
rs2:PRIMARY> db.shard_col.count()
164730
rs2:PRIMARY> db.shard_col.count()
166165
rs2:PRIMARY> db.shard_col.count()
167618
rs2:PRIMARY> db.shard_col.count()
169023
rs2:PRIMARY> db.shard_col.count()
170179
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
174060
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.currentOp()
{
“inprog” : [
{
“desc” : “conn10”,
“threadId” : “0x2cb23c0”,
“connectionId” : 10,
“opid” : 633532,
“active” : true,
“secs_running” : 3,
“microsecs_running” : NumberLong(3288581),
“op” : “getmore”,
“ns” : “local.oplog.rs”,
“query” : {

},
“client” : “10.0.0.197:49000”,
“numYields” : 0,
“locks” : {

},
“waitingForLock” : false,
“lockStats” : {
“Global” : {
“acquireCount” : {
“r” : NumberLong(4)
}
},
“MMAPV1Journal” : {
“acquireCount” : {
“r” : NumberLong(4)
}
},
“Database” : {
“acquireCount” : {
“r” : NumberLong(4)
}
},
“oplog” : {
“acquireCount” : {
“R” : NumberLong(4)
}
}
}
}
]
}
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
179762
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
179762
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.count()
179762
rs2:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27010
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27010/test
Server has startup warnings:
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.974+0000 I CONTROL [initandlisten]
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> use shard_db
switched to db shard_db
rs1:PRIMARY> db.shard_col.count()
20239
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a95226”), “user_id” : 1, “name” : “bulk-inserts”, ” Iteration: ” : 1 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95227”), “user_id” : 2, “name” : “bulk-inserts”, ” Iteration: ” : 2 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95228”), “user_id” : 3, “name” : “bulk-inserts”, ” Iteration: ” : 3 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95229”), “user_id” : 4, “name” : “bulk-inserts”, ” Iteration: ” : 4 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522a”), “user_id” : 5, “name” : “bulk-inserts”, ” Iteration: ” : 5 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522b”), “user_id” : 6, “name” : “bulk-inserts”, ” Iteration: ” : 6 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522c”), “user_id” : 7, “name” : “bulk-inserts”, ” Iteration: ” : 7 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522d”), “user_id” : 8, “name” : “bulk-inserts”, ” Iteration: ” : 8 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522e”), “user_id” : 9, “name” : “bulk-inserts”, ” Iteration: ” : 9 }
{ “_id” : ObjectId(“55500521031f902dc9636298”), “user_id” : 179771, “name” : “bulk-inserts”, ” Iteration: ” : 179771 }
{ “_id” : ObjectId(“55500522031f902dc9636299”), “user_id” : 179772, “name” : “bulk-inserts”, ” Iteration: ” : 179772 }
{ “_id” : ObjectId(“55500522031f902dc963629a”), “user_id” : 179773, “name” : “bulk-inserts”, ” Iteration: ” : 179773 }
{ “_id” : ObjectId(“55500522031f902dc963629b”), “user_id” : 179774, “name” : “bulk-inserts”, ” Iteration: ” : 179774 }
{ “_id” : ObjectId(“55500522031f902dc963629c”), “user_id” : 179775, “name” : “bulk-inserts”, ” Iteration: ” : 179775 }
{ “_id” : ObjectId(“55500522031f902dc963629d”), “user_id” : 179776, “name” : “bulk-inserts”, ” Iteration: ” : 179776 }
{ “_id” : ObjectId(“55500522031f902dc963629e”), “user_id” : 179777, “name” : “bulk-inserts”, ” Iteration: ” : 179777 }
{ “_id” : ObjectId(“55500522031f902dc963629f”), “user_id” : 179778, “name” : “bulk-inserts”, ” Iteration: ” : 179778 }
{ “_id” : ObjectId(“55500522031f902dc96362a0”), “user_id” : 179779, “name” : “bulk-inserts”, ” Iteration: ” : 179779 }
{ “_id” : ObjectId(“55500522031f902dc96362a1”), “user_id” : 179780, “name” : “bulk-inserts”, ” Iteration: ” : 179780 }
{ “_id” : ObjectId(“55500522031f902dc96362a2”), “user_id” : 179781, “name” : “bulk-inserts”, ” Iteration: ” : 179781 }
Type “it” for more
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
rs1:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27011
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27011/test
Server has startup warnings:
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.950+0000 I CONTROL [initandlisten]
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY> use shard_db
switched to db shard_db
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY> db.shard_col.find()
Error: error: { “$err” : “not master and slaveOk=false”, “code” : 13435 }
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY> rs.slaveOk()
rs1:SECONDARY>
rs1:SECONDARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a95226”), “user_id” : 1, “name” : “bulk-inserts”, ” Iteration: ” : 1 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95227”), “user_id” : 2, “name” : “bulk-inserts”, ” Iteration: ” : 2 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95228”), “user_id” : 3, “name” : “bulk-inserts”, ” Iteration: ” : 3 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a95229”), “user_id” : 4, “name” : “bulk-inserts”, ” Iteration: ” : 4 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522a”), “user_id” : 5, “name” : “bulk-inserts”, ” Iteration: ” : 5 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522b”), “user_id” : 6, “name” : “bulk-inserts”, ” Iteration: ” : 6 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522c”), “user_id” : 7, “name” : “bulk-inserts”, ” Iteration: ” : 7 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522d”), “user_id” : 8, “name” : “bulk-inserts”, ” Iteration: ” : 8 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522e”), “user_id” : 9, “name” : “bulk-inserts”, ” Iteration: ” : 9 }
{ “_id” : ObjectId(“55500521031f902dc9636298”), “user_id” : 179771, “name” : “bulk-inserts”, ” Iteration: ” : 179771 }
{ “_id” : ObjectId(“55500522031f902dc9636299”), “user_id” : 179772, “name” : “bulk-inserts”, ” Iteration: ” : 179772 }
{ “_id” : ObjectId(“55500522031f902dc963629a”), “user_id” : 179773, “name” : “bulk-inserts”, ” Iteration: ” : 179773 }
{ “_id” : ObjectId(“55500522031f902dc963629b”), “user_id” : 179774, “name” : “bulk-inserts”, ” Iteration: ” : 179774 }
{ “_id” : ObjectId(“55500522031f902dc963629c”), “user_id” : 179775, “name” : “bulk-inserts”, ” Iteration: ” : 179775 }
{ “_id” : ObjectId(“55500522031f902dc963629d”), “user_id” : 179776, “name” : “bulk-inserts”, ” Iteration: ” : 179776 }
{ “_id” : ObjectId(“55500522031f902dc963629e”), “user_id” : 179777, “name” : “bulk-inserts”, ” Iteration: ” : 179777 }
{ “_id” : ObjectId(“55500522031f902dc963629f”), “user_id” : 179778, “name” : “bulk-inserts”, ” Iteration: ” : 179778 }
{ “_id” : ObjectId(“55500522031f902dc96362a0”), “user_id” : 179779, “name” : “bulk-inserts”, ” Iteration: ” : 179779 }
{ “_id” : ObjectId(“55500522031f902dc96362a1”), “user_id” : 179780, “name” : “bulk-inserts”, ” Iteration: ” : 179780 }
{ “_id” : ObjectId(“55500522031f902dc96362a2”), “user_id” : 179781, “name” : “bulk-inserts”, ” Iteration: ” : 179781 }
Type “it” for more
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY>
rs1:SECONDARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27020
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27020/test
Server has startup warnings:
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> use shard_db
switched to db shard_db
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522f”), “user_id” : 10, “name” : “bulk-inserts”, ” Iteration: ” : 10 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95230”), “user_id” : 11, “name” : “bulk-inserts”, ” Iteration: ” : 11 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95231”), “user_id” : 12, “name” : “bulk-inserts”, ” Iteration: ” : 12 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95232”), “user_id” : 13, “name” : “bulk-inserts”, ” Iteration: ” : 13 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95233”), “user_id” : 14, “name” : “bulk-inserts”, ” Iteration: ” : 14 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95234”), “user_id” : 15, “name” : “bulk-inserts”, ” Iteration: ” : 15 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95235”), “user_id” : 16, “name” : “bulk-inserts”, ” Iteration: ” : 16 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95236”), “user_id” : 17, “name” : “bulk-inserts”, ” Iteration: ” : 17 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95237”), “user_id” : 18, “name” : “bulk-inserts”, ” Iteration: ” : 18 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95238”), “user_id” : 19, “name” : “bulk-inserts”, ” Iteration: ” : 19 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95239”), “user_id” : 20, “name” : “bulk-inserts”, ” Iteration: ” : 20 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523a”), “user_id” : 21, “name” : “bulk-inserts”, ” Iteration: ” : 21 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523b”), “user_id” : 22, “name” : “bulk-inserts”, ” Iteration: ” : 22 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523c”), “user_id” : 23, “name” : “bulk-inserts”, ” Iteration: ” : 23 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523d”), “user_id” : 24, “name” : “bulk-inserts”, ” Iteration: ” : 24 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523e”), “user_id” : 25, “name” : “bulk-inserts”, ” Iteration: ” : 25 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523f”), “user_id” : 26, “name” : “bulk-inserts”, ” Iteration: ” : 26 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95240”), “user_id” : 27, “name” : “bulk-inserts”, ” Iteration: ” : 27 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95241”), “user_id” : 28, “name” : “bulk-inserts”, ” Iteration: ” : 28 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95242”), “user_id” : 29, “name” : “bulk-inserts”, ” Iteration: ” : 29 }
Type “it” for more
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27021
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27021/test
Server has startup warnings:
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.958+0000 I CONTROL [initandlisten]
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY> use shard_db
switched to db shard_db
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY> db.shard_col.find()
Error: error: { “$err” : “not master and slaveOk=false”, “code” : 13435 }
rs2:SECONDARY>
rs2:SECONDARY> rs.slaveOk()
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY> db.shard_col.find()
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522f”), “user_id” : 10, “name” : “bulk-inserts”, ” Iteration: ” : 10 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95230”), “user_id” : 11, “name” : “bulk-inserts”, ” Iteration: ” : 11 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95231”), “user_id” : 12, “name” : “bulk-inserts”, ” Iteration: ” : 12 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95232”), “user_id” : 13, “name” : “bulk-inserts”, ” Iteration: ” : 13 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95233”), “user_id” : 14, “name” : “bulk-inserts”, ” Iteration: ” : 14 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95234”), “user_id” : 15, “name” : “bulk-inserts”, ” Iteration: ” : 15 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95235”), “user_id” : 16, “name” : “bulk-inserts”, ” Iteration: ” : 16 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95236”), “user_id” : 17, “name” : “bulk-inserts”, ” Iteration: ” : 17 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95237”), “user_id” : 18, “name” : “bulk-inserts”, ” Iteration: ” : 18 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95238”), “user_id” : 19, “name” : “bulk-inserts”, ” Iteration: ” : 19 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95239”), “user_id” : 20, “name” : “bulk-inserts”, ” Iteration: ” : 20 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523a”), “user_id” : 21, “name” : “bulk-inserts”, ” Iteration: ” : 21 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523b”), “user_id” : 22, “name” : “bulk-inserts”, ” Iteration: ” : 22 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523c”), “user_id” : 23, “name” : “bulk-inserts”, ” Iteration: ” : 23 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523d”), “user_id” : 24, “name” : “bulk-inserts”, ” Iteration: ” : 24 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523e”), “user_id” : 25, “name” : “bulk-inserts”, ” Iteration: ” : 25 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523f”), “user_id” : 26, “name” : “bulk-inserts”, ” Iteration: ” : 26 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95240”), “user_id” : 27, “name” : “bulk-inserts”, ” Iteration: ” : 27 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95241”), “user_id” : 28, “name” : “bulk-inserts”, ” Iteration: ” : 28 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95242”), “user_id” : 29, “name” : “bulk-inserts”, ” Iteration: ” : 29 }
Type “it” for more
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY> db.shard_col.count()
179762
rs2:SECONDARY>
rs2:SECONDARY>
rs2:SECONDARY>
bye
[root@ip-10-0-0-197 bin]# mongo –port 39000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:39000/test
Server has startup warnings:
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.981+0000 I CONTROL [initandlisten]
configsvr>
configsvr>
configsvr>
configsvr>
configsvr> sow dbs
2015-05-11T01:28:38.712+0000 E QUERY SyntaxError: Unexpected identifier
configsvr>
configsvr>
configsvr> show dbs
config 0.031GB
local 0.031GB
configsvr>
configsvr>
configsvr>
configsvr>
configsvr> use config
switched to db config
configsvr>
configsvr>
configsvr>
configsvr> show collections
actionlog
changelog
chunks
collections
databases
lockpings
locks
mongos
settings
shards
system.indexes
tags
version
configsvr>
configsvr>
configsvr> db.chunks.find()
{ “_id” : “shard_db.shard_col-user_id_MinKey”, “lastmod” : Timestamp(2, 1), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”), “ns” : “shard_db.shard_col”, “min” : { “user_id” : { “$minKey” : 1 } }, “max” : { “user_id” : 2 }, “shard” : “rs1” }
{ “_id” : “shard_db.shard_col-user_id_2.0”, “lastmod” : Timestamp(1, 2), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”), “ns” : “shard_db.shard_col”, “min” : { “user_id” : 2 }, “max” : { “user_id” : 10 }, “shard” : “rs1” }
{ “_id” : “shard_db.shard_col-user_id_10.0”, “lastmod” : Timestamp(3, 1), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”), “ns” : “shard_db.shard_col”, “min” : { “user_id” : 10 }, “max” : { “user_id” : 74908 }, “shard” : “rs2” }
{ “_id” : “shard_db.shard_col-user_id_74908.0”, “lastmod” : Timestamp(2, 3), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”), “ns” : “shard_db.shard_col”, “min” : { “user_id” : 74908 }, “max” : { “user_id” : 179771 }, “shard” : “rs2” }
{ “_id” : “shard_db.shard_col-user_id_179771.0”, “lastmod” : Timestamp(3, 0), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”), “ns” : “shard_db.shard_col”, “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “shard” : “rs1” }
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr> show dbs
config 0.031GB
local 0.031GB
configsvr>
configsvr>
configsvr>
configsvr> show collections
actionlog
changelog
chunks
collections
databases
lockpings
locks
mongos
settings
shards
system.indexes
tags
version
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
configsvr>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 27020
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:27020/test
Server has startup warnings:
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:00:07.962+0000 I CONTROL [initandlisten]
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> shwo dbs
2015-05-11T01:41:54.122+0000 E QUERY SyntaxError: Unexpected identifier
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> show dbs
local 1.031GB
shard_db 0.125GB
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> use local
switched to db local
rs2:PRIMARY>
rs2:PRIMARY> show collections
me
oplog.rs
startup_log
system.indexes
system.replset
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY> db.oplog.rs.count()
179769
rs2:PRIMARY>
rs2:PRIMARY> db.oplog.rs.find()
{ “ts” : Timestamp(1431306407, 1), “h” : NumberLong(0), “v” : 2, “op” : “n”, “ns” : “”, “o” : { “msg” : “initiating set” } }
{ “ts” : Timestamp(1431306607, 1), “h” : NumberLong(0), “v” : 2, “op” : “n”, “ns” : “”, “o” : { “msg” : “Reconfig set”, “version” : 2 } }
{ “ts” : Timestamp(1431306607, 2), “h” : NumberLong(0), “v” : 2, “op” : “n”, “ns” : “”, “o” : { “msg” : “Reconfig set”, “version” : 3 } }
{ “ts” : Timestamp(1431307149, 1), “h” : NumberLong(“-6174368718995324911”), “v” : 2, “op” : “c”, “ns” : “shard_db.$cmd”, “o” : { “create” : “shard_col” } }
{ “ts” : Timestamp(1431307149, 2), “h” : NumberLong(“4777573065741603845”), “v” : 2, “op” : “i”, “ns” : “shard_db.system.indexes”, “fromMigrate” : true, “o” : { “v” : 1, “key” : { “_id” : 1 }, “name” : “_id_”, “ns” : “shard_db.shard_col” } }
{ “ts” : Timestamp(1431307149, 3), “h” : NumberLong(“8156262230854707122”), “v” : 2, “op” : “i”, “ns” : “shard_db.system.indexes”, “fromMigrate” : true, “o” : { “v” : 1, “unique” : true, “key” : { “user_id” : 1 }, “name” : “user_id_1”, “ns” : “shard_db.shard_col” } }
{ “ts” : Timestamp(1431307149, 4), “h” : NumberLong(“6225450864238645690”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “fromMigrate” : true, “o” : { “_id” : ObjectId(“5550038cac4b3d5a22a9522f”), “user_id” : 10, “name” : “bulk-inserts”, ” Iteration: ” : 10 } }
{ “ts” : Timestamp(1431307150, 1), “h” : NumberLong(“4399906657609807135”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95230”), “user_id” : 11, “name” : “bulk-inserts”, ” Iteration: ” : 11 } }
{ “ts” : Timestamp(1431307150, 2), “h” : NumberLong(“-2803345288712299313”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95231”), “user_id” : 12, “name” : “bulk-inserts”, ” Iteration: ” : 12 } }
{ “ts” : Timestamp(1431307150, 3), “h” : NumberLong(“4221914379266546928”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95232”), “user_id” : 13, “name” : “bulk-inserts”, ” Iteration: ” : 13 } }
{ “ts” : Timestamp(1431307150, 4), “h” : NumberLong(“6636220462457355284”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95233”), “user_id” : 14, “name” : “bulk-inserts”, ” Iteration: ” : 14 } }
{ “ts” : Timestamp(1431307150, 5), “h” : NumberLong(“-3153401794801507759”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95234”), “user_id” : 15, “name” : “bulk-inserts”, ” Iteration: ” : 15 } }
{ “ts” : Timestamp(1431307150, 6), “h” : NumberLong(“-590673585599494391”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95235”), “user_id” : 16, “name” : “bulk-inserts”, ” Iteration: ” : 16 } }
{ “ts” : Timestamp(1431307150, 7), “h” : NumberLong(“6561129531032826818”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95236”), “user_id” : 17, “name” : “bulk-inserts”, ” Iteration: ” : 17 } }
{ “ts” : Timestamp(1431307150, 8), “h” : NumberLong(“-4360209413840436946”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95237”), “user_id” : 18, “name” : “bulk-inserts”, ” Iteration: ” : 18 } }
{ “ts” : Timestamp(1431307150, 9), “h” : NumberLong(“5073393576299742147”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95238”), “user_id” : 19, “name” : “bulk-inserts”, ” Iteration: ” : 19 } }
{ “ts” : Timestamp(1431307150, 10), “h” : NumberLong(“2865752663299291651”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a95239”), “user_id” : 20, “name” : “bulk-inserts”, ” Iteration: ” : 20 } }
{ “ts” : Timestamp(1431307150, 11), “h” : NumberLong(“-6716636981963233068”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a9523a”), “user_id” : 21, “name” : “bulk-inserts”, ” Iteration: ” : 21 } }
{ “ts” : Timestamp(1431307150, 12), “h” : NumberLong(“-3815483699654562552”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a9523b”), “user_id” : 22, “name” : “bulk-inserts”, ” Iteration: ” : 22 } }
{ “ts” : Timestamp(1431307150, 13), “h” : NumberLong(“692450203411610997”), “v” : 2, “op” : “i”, “ns” : “shard_db.shard_col”, “o” : { “_id” : ObjectId(“5550038eac4b3d5a22a9523c”), “user_id” : 23, “name” : “bulk-inserts”, ” Iteration: ” : 23 } }
Type “it” for more
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
rs2:PRIMARY>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# head new_script.sh
##### Killing the existing Mongo processes ################
for i in `ps -ef | egrep ‘shardsvr|configsvr|replSet|configdb’ | grep -v egrep | awk -F” ” ‘{print $2}’`; do kill -9 $i; done

##### Creating Mongo data & log files #####################

mkdir -p /data /data/1 /data/2 /data/3
rm -rf /data/*
cd /data/
mkdir -p config1 config2 config3 arbiter1 arbiter2 arbiter3 router /data/1/shard1_1 /data/1/shard1_2 /data/2/shard2_1 /data/2/shard2_2 /data/3/shard3_1 /data/3/shard3_2

[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
Using username “ec2-user”.
Authenticating with public key “imported-openssh-key”
Last login: Mon May 11 01:01:00 2015 from 49.205.87.121

__| __|_ )
_| ( / Amazon Linux AMI
___|\___|___|

https://aws.amazon.com/amazon-linux-ami/2015.03-release-notes/
11 package(s) needed for security, out of 36 available
Run “sudo yum update” to apply all updates.
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$ sudo su
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# for i in `ps -ef | egrep ‘shardsvr|configsvr|replSet|configdb’ | grep -v egrep | awk -F” ” ‘{print $2}’`; do kill -9 $i; done
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# rm -rf /data/
[root@ip-10-0-0-197 ec2-user]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 1.3G 6.5G 16% /
devtmpfs 490M 56K 490M 1% /dev
tmpfs 499M 0 499M 0% /dev/shm
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# cat ~/.bashrc
# .bashrc

# User specific aliases and functions

alias rm=’rm -i’
alias cp=’cp -i’
alias mv=’mv -i’

# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi

export MONGODB_HOME=/home/ec2-user/mongodb-linux-x86_64-3.0.2
export PATH=$MONGODB_HOME/bin:$PATH
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# source ~/.bashrc
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# env | grep MONGODB_HOME
MONGODB_HOME=/home/ec2-user/mongodb-linux-x86_64-3.0.2
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# cd
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# cd /etc/
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]# vi mongod.conf
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]#
[root@ip-10-0-0-197 etc]# cd
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# cat /etc/mongod.conf

smallfiles = true
nojournal = true
quiet = true
port = 27017

logpath = /tmp/mongodb.txt
auth = true

[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongod –dbpath /data & ^C
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mkdir /data
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongod –dbpath /data &
[1] 16982
[root@ip-10-0-0-197 ~]# 2015-05-11T01:50:08.148+0000 I JOURNAL [initandlisten] journal dir=/data/journal
2015-05-11T01:50:08.148+0000 I JOURNAL [initandlisten] recover : no journal files present, no recovery needed
2015-05-11T01:50:08.291+0000 I JOURNAL [durability] Durability thread started
2015-05-11T01:50:08.291+0000 I JOURNAL [journal writer] Journal writer thread started
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] MongoDB starting : pid=16982 port=27017 dbpath=/data 64-bit host=ip-10-0-0-197
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten]
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten]
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is ‘always’.
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] ** We suggest setting it to ‘never’
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten]
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] db version v3.0.2
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] git version: 6201872043ecbbc0a4cc169b5482dcf385fc464f
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] build info: Linux build6.nj1.10gen.cc 2.6.32-431.3.1.el6.x86_64 #1 SMP Fri Jan 3 21:39:27 UTC 2014 x86_64 BOOST_LIB_VERSION=1_49
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] allocator: tcmalloc
2015-05-11T01:50:08.293+0000 I CONTROL [initandlisten] options: { storage: { dbPath: “/data” } }
2015-05-11T01:50:08.294+0000 I INDEX [initandlisten] allocating new ns file /data/local.ns, filling with zeroes…
2015-05-11T01:50:08.576+0000 I STORAGE [FileAllocator] allocating new datafile /data/local.0, filling with zeroes…
2015-05-11T01:50:08.576+0000 I STORAGE [FileAllocator] creating directory /data/_tmp
2015-05-11T01:50:08.580+0000 I STORAGE [FileAllocator] done allocating datafile /data/local.0, size: 64MB, took 0.001 secs
2015-05-11T01:50:08.586+0000 I NETWORK [initandlisten] waiting for connections on port 27017
^C
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# ps -ef | grep mongo
root 16323 11486 0 01:33 pts/0 00:00:00 mongo –port 10000
root 16982 16940 0 01:50 pts/2 00:00:00 mongod –dbpath /data
root 16994 16940 0 01:50 pts/2 00:00:00 grep mongo
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# kill -9 16323 16982
[root@ip-10-0-0-197 ~]# ps -ef | grep mongo
root 16996 16940 0 01:50 pts/2 00:00:00 grep mongo
[1]+ Killed mongod –dbpath /data
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongod –dbpath /data –f /etc/mongod.conf &
[1] 16997
[root@ip-10-0-0-197 ~]# Error parsing command line: unknown option f
try ‘mongod –help’ for more information

[1]+ Exit 2 mongod –dbpath /data –f /etc/mongod.conf
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongod –dbpath /data -f /etc/mongod.conf &
[1] 16998
[root@ip-10-0-0-197 ~]#
[1]+ Exit 100 mongod –dbpath /data -f /etc/mongod.conf
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# c
bash: c: command not found
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# cat /tmp/mongodb.txt
2015-05-11T01:51:13.749+0000 W – [initandlisten] Detected unclean shutdown – /data/mongod.lock is not empty.
2015-05-11T01:51:13.758+0000 I STORAGE [initandlisten] **************
2015-05-11T01:51:13.759+0000 I STORAGE [initandlisten] Error: journal files are present in journal directory, yet starting without journaling enabled.
2015-05-11T01:51:13.759+0000 I STORAGE [initandlisten] It is recommended that you start with journaling enabled so that recovery may occur.
2015-05-11T01:51:13.759+0000 I STORAGE [initandlisten] **************
2015-05-11T01:51:13.759+0000 I STORAGE [initandlisten] exception in initAndListen: 13597 can’t start without –journal enabled when journal/ files are present, terminating
2015-05-11T01:51:13.759+0000 I CONTROL [initandlisten] dbexit: rc: 100
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# rm -rf /data/*
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongod –dbpath /data -f /etc/mongod.conf &
[1] 17003
[root@ip-10-0-0-197 ~]# 2015-05-11T01:52:19.870+0000 I CONTROL log file “/tmp/mongodb.txt” exists; moved to “/tmp/mongodb.txt.2015-05-11T01-52-19”.

[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# ps -ef | grep mogo
root 17013 16940 0 01:52 pts/2 00:00:00 grep mogo
[root@ip-10-0-0-197 ~]# ps -ef | grep mongo
root 17003 16940 0 01:52 pts/2 00:00:00 mongod –dbpath /data -f /etc/mongod.conf
root 17015 16940 0 01:52 pts/2 00:00:00 grep mongo
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongo
MongoDB shell version: 3.0.2
connecting to: test
>
>
>
>
>
>
>
>
> use admin
switched to db admin
>
>
>
>
>
>
> use admin
switched to db admin
> db.createUser(
… {
… user: “usr”,
… pwd: “pwd”,
… roles: [ “readWriteAdminAnyDatabase”,
… “dbAdminAnyDatabase”,
… “userAdminAnyDatabase” ]
… }
… )
2015-05-11T02:00:03.294+0000 E QUERY Error: couldn’t add user: No role named readWriteAdminAnyDatabase@admin
at Error (<anonymous>)
at DB.createUser (src/mongo/shell/db.js:1066:11)
at (shell):1:4 at src/mongo/shell/db.js:1066
>
>
> use admin
switched to db admin
> db.createUser(
… {
… user: “usr”,
… pwd: “pwd”,
… roles: [ “readWriteAnyDatabase”,
… “dbAdminAnyDatabase”,
… “userAdminAnyDatabase” ]
… }
… )
Successfully added user: {
“user” : “usr”,
“roles” : [
“readWriteAnyDatabase”,
“dbAdminAnyDatabase”,
“userAdminAnyDatabase”
]
}
>
>
>
>
>
>
>
bye
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongo
MongoDB shell version: 3.0.2
connecting to: test
>
>
> show dbs
2015-05-11T02:00:36.460+0000 E QUERY Error: listDatabases failed:{
“ok” : 0,
“errmsg” : “not authorized on admin to execute command { listDatabases: 1.0 }”,
“code” : 13
}
at Error (<anonymous>)
at Mongo.getDBs (src/mongo/shell/mongo.js:47:15)
at shellHelper.show (src/mongo/shell/utils.js:630:33)
at shellHelper (src/mongo/shell/utils.js:524:36)
at (shellhelp2):1:1 at src/mongo/shell/mongo.js:47
>
>
>
>
> db.auth(“usr”,”pwd”)
Error: 18 Authentication failed.
0
>
>
>
>
>
bye
[root@ip-10-0-0-197 ~]# mongo ip-10-0-0-197:27017/admin -u usr -p
MongoDB shell version: 3.0.2
Enter password:
connecting to: ip-10-0-0-197:27017/admin
>
>
>
> show dbs
admin 0.031GB
local 0.031GB
>
>
>
bye
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongo ip-10-0-0-197:27017/admin
MongoDB shell version: 3.0.2
connecting to: ip-10-0-0-197:27017/admin
>
>
>
> show dbs
2015-05-11T02:02:39.452+0000 E QUERY Error: listDatabases failed:{
“ok” : 0,
“errmsg” : “not authorized on admin to execute command { listDatabases: 1.0 }”,
“code” : 13
}
at Error (<anonymous>)
at Mongo.getDBs (src/mongo/shell/mongo.js:47:15)
at shellHelper.show (src/mongo/shell/utils.js:630:33)
at shellHelper (src/mongo/shell/utils.js:524:36)
at (shellhelp2):1:1 at src/mongo/shell/mongo.js:47
>
>
> db.auth(“usr”,”pwd”)
1
>
>
>
> show dbs
admin 0.031GB
local 0.031GB
>
>
>
bye
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# vi /tmp/mykeyfile
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# cat /tmp/mykeyfile
jsdhfkjashdfhasjdfkasdjfhakjsdhfkhasdkljfhakjsdhfjkaskdjfhksadhfkasdh
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongo ip-10-0-0-197:27017/admin
MongoDB shell version: 3.0.2
connecting to: ip-10-0-0-197:27017/admin
>
bye
[root@ip-10-0-0-197 ~]#
[root@ip-10-0-0-197 ~]# mongo ip-10-0-0-197:27017/admin -u usr -p pwd
MongoDB shell version: 3.0.2
connecting to: ip-10-0-0-197:27017/admin
>
>
> show dbs
admin 0.031GB
local 0.031GB
>
>
> use amdin
switched to db amdin
>
>
> show collections
>
>
> use admin
switched to db admin
>
>
> show collections
system.indexes
system.users
system.version
>
>
> db.system.users.find()
{ “_id” : “admin.usr”, “user” : “usr”, “db” : “admin”, “credentials” : { “SCRAM-SHA-1” : { “iterationCount” : 10000, “salt” : “mdXNHVjkE+GIuUYCB/cAxQ==”, “storedKey” : “AgMRBewp0Bz+WLrhD2m6Cb1JpyM=”, “serverKey” : “TktSort4/h9n67cgRNGuphRlvnc=” } }, “roles” : [ { “role” : “readWriteAnyDatabase”, “db” : “admin” }, { “role” : “dbAdminAnyDatabase”, “db” : “admin” }, { “role” : “userAdminAnyDatabase”, “db” : “admin” } ] }
>
>
———————–
Using username “ec2-user”.
Authenticating with public key “imported-openssh-key”
Last login: Sun May 10 17:45:00 2015 from 49.205.126.24

__| __|_ )
_| ( / Amazon Linux AMI
___|\___|___|

https://aws.amazon.com/amazon-linux-ami/2015.03-release-notes/
11 package(s) needed for security, out of 36 available
Run “sudo yum update” to apply all updates.
grep: write error
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$
[ec2-user@ip-10-0-0-197 ~]$ sudo su
grep: write error
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 7.8G 0 100% /
devtmpfs 490M 56K 490M 1% /dev
tmpfs 499M 0 499M 0% /dev/shm
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]#
[root@ip-10-0-0-197 ec2-user]# cd /data
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]# cd /home/ec2-user/mongodb-linux-x86_64-3.0.2/bin/
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ./mongo –port 10000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:10000/test
Server has startup warnings:
2015-05-10T16:44:26.406+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-10T16:44:26.406+0000 I CONTROL
mongos>
mongos> sh.help()
sh.addShard( host ) server:port OR setname/server:port
sh.enableSharding(dbname) enables sharding on the database dbname
sh.shardCollection(fullName,key,unique) shards the collection
sh.splitFind(fullName,find) splits the chunk that find is in at the median
sh.splitAt(fullName,middle) splits the chunk that middle is in at middle
sh.moveChunk(fullName,find,to) move the chunk where ‘find’ is to ‘to’ (name of shard)
sh.setBalancerState( <bool on or not> ) turns the balancer on or off true=on, false=off
sh.getBalancerState() return true if enabled
sh.isBalancerRunning() return true if the balancer has work in progress on any mongos
sh.disableBalancing(coll) disable balancing on one collection
sh.enableBalancing(coll) re-enable balancing on one collection
sh.addShardTag(shard,tag) adds the tag to the shard
sh.removeShardTag(shard,tag) removes the tag from the shard
sh.addTagRange(fullName,min,max,tag) tags the specified range of the given collection
sh.removeTagRange(fullName,min,max,tag) removes the tagged range of the given collection
sh.status() prints a general overview of the cluster
mongos>
mongos> sh.remove
sh.removeShardTag( sh.removeTagRange(
mongos> sh.remove
sh.removeShardTag( sh.removeTagRange(
mongos>
mongos>
bye
[root@ip-10-0-0-197 bin]# cd /data/
[root@ip-10-0-0-197 data]# ll -lhtr
total 40K
drwxr-xr-x 2 root root 4.0K May 10 16:39 router
drwxr-xr-x 4 root root 4.0K May 10 16:39 3
drwxr-xr-x 4 root root 4.0K May 10 16:39 2
drwxr-xr-x 4 root root 4.0K May 10 16:39 1
drwxr-xr-x 3 root root 4.0K May 10 16:39 arbiter1
drwxr-xr-x 3 root root 4.0K May 10 16:39 arbiter3
drwxr-xr-x 3 root root 4.0K May 10 16:39 arbiter2
drwxr-xr-x 3 root root 4.0K May 10 16:44 config2
drwxr-xr-x 3 root root 4.0K May 10 16:44 config1
drwxr-xr-x 3 root root 4.0K May 10 16:44 config3
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]# rm -rf 3/
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]# df -h /
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 7.8G 0 100% /
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]# cd /tmp/
[root@ip-10-0-0-197 tmp]# ll -lhtr
total 3.2M
drwxr-xr-x 2 root root 4.0K May 8 11:54 hsperfdata_root
-rw-r–r– 1 root root 2.7K May 10 16:11 router.log.2015-05-10T16-35-03
-rw-r–r– 1 root root 3.5K May 10 16:20 arbiter1.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 3.5K May 10 16:20 arbiter2.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 3.5K May 10 16:20 arbiter3.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 7.0K May 10 16:20 shard1_1.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 7.0K May 10 16:20 shard2_1.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 7.0K May 10 16:20 shard3_1.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 6.4K May 10 16:20 shard1_2.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 6.4K May 10 16:20 shard2_2.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 6.4K May 10 16:20 shard3_2.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 2.7K May 10 16:35 router.log.2015-05-10T16-36-25
-rw-r–r– 1 root root 3.5K May 10 16:38 router.log.2015-05-10T16-37-31
-rw-r–r– 1 root root 7.3K May 10 16:39 config3.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 7.3K May 10 16:39 config2.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 7.3K May 10 16:39 config1.log.2015-05-10T16-39-26
-rw-r–r– 1 root root 11K May 10 16:39 router.log.2015-05-10T16-44-26
srwx—— 1 root root 0 May 10 16:39 mongodb-27012.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27010.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39001.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27022.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39000.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27031.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27020.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27030.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39002.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27011.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27032.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27021.sock
srwx—— 1 root root 0 May 10 16:44 mongodb-10000.sock
-rw-r–r– 1 root root 3.5K May 10 16:52 arbiter2.log
-rw-r–r– 1 root root 6.4K May 10 16:52 shard2_2.log
-rw-r–r– 1 root root 7.8K May 10 17:36 shard1_2.log
-rw-r–r– 1 root root 12K May 10 17:45 shard2_1.log
-rw-r–r– 1 root root 416K May 10 17:45 shard1_1.log
-rw-r–r– 1 root root 592K May 10 17:50 arbiter1.log
-rw-r–r– 1 root root 1.6M May 10 19:06 router.log
-rw-r–r– 1 root root 172K May 10 19:06 config1.log
-rw-r–r– 1 root root 172K May 10 19:06 config3.log
-rw-r–r– 1 root root 172K May 10 19:06 config2.log
-rw-r–r– 1 root root 7.3K May 11 00:55 shard3_1.log
-rw-r–r– 1 root root 14K May 11 00:56 shard3_2.log
-rw-r–r– 1 root root 18K May 11 00:56 arbiter3.log
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# > *log*
bash: *log*: ambiguous redirect
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# > *.log*
bash: *.log*: ambiguous redirect
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# > *.log
bash: *.log: ambiguous redirect
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# rm -f *log.2015*
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# ll -lhtr
total 3.2M
drwxr-xr-x 2 root root 4.0K May 8 11:54 hsperfdata_root
srwx—— 1 root root 0 May 10 16:39 mongodb-27012.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27010.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39001.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27022.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39000.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27031.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27020.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27030.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-39002.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27011.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27032.sock
srwx—— 1 root root 0 May 10 16:39 mongodb-27021.sock
srwx—— 1 root root 0 May 10 16:44 mongodb-10000.sock
-rw-r–r– 1 root root 3.5K May 10 16:52 arbiter2.log
-rw-r–r– 1 root root 6.4K May 10 16:52 shard2_2.log
-rw-r–r– 1 root root 7.8K May 10 17:36 shard1_2.log
-rw-r–r– 1 root root 12K May 10 17:45 shard2_1.log
-rw-r–r– 1 root root 416K May 10 17:45 shard1_1.log
-rw-r–r– 1 root root 592K May 10 17:50 arbiter1.log
-rw-r–r– 1 root root 1.6M May 10 19:06 router.log
-rw-r–r– 1 root root 172K May 10 19:06 config1.log
-rw-r–r– 1 root root 172K May 10 19:06 config3.log
-rw-r–r– 1 root root 172K May 10 19:06 config2.log
-rw-r–r– 1 root root 7.3K May 11 00:55 shard3_1.log
-rw-r–r– 1 root root 14K May 11 00:56 shard3_2.log
-rw-r–r– 1 root root 100K May 11 00:57 arbiter3.log
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 5.8G 1.9G 76% /
devtmpfs 490M 56K 490M 1% /dev
tmpfs 499M 0 499M 0% /dev/shm
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]#
[root@ip-10-0-0-197 tmp]# cd –
/data
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]#
[root@ip-10-0-0-197 data]# cd /home/ec2-user/mongodb-linux-x86_64-3.0.2/bin/
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ./mongo 27010
MongoDB shell version: 3.0.2
connecting to: 27010
2015-05-11T00:57:59.189+0000 W NETWORK Failed to connect to 127.0.0.1:27017, reason: errno:111 Connection refused
2015-05-11T00:57:59.191+0000 E QUERY Error: couldn’t connect to server 127.0.0.1:27017 (127.0.0.1), connection attempt failed
at connect (src/mongo/shell/mongo.js:179:14)
at (connect):1:6 at src/mongo/shell/mongo.js:179
exception: connect failed
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ./mongo 27020
MongoDB shell version: 3.0.2
connecting to: 27020
2015-05-11T00:58:06.533+0000 W NETWORK Failed to connect to 127.0.0.1:27017, reason: errno:111 Connection refused
2015-05-11T00:58:06.535+0000 E QUERY Error: couldn’t connect to server 127.0.0.1:27017 (127.0.0.1), connection attempt failed
at connect (src/mongo/shell/mongo.js:179:14)
at (connect):1:6 at src/mongo/shell/mongo.js:179
exception: connect failed
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ./mongo 27030
MongoDB shell version: 3.0.2
connecting to: 27030
2015-05-11T00:58:10.231+0000 W NETWORK Failed to connect to 127.0.0.1:27017, reason: errno:111 Connection refused
2015-05-11T00:58:10.232+0000 E QUERY Error: couldn’t connect to server 127.0.0.1:27017 (127.0.0.1), connection attempt failed
at connect (src/mongo/shell/mongo.js:179:14)
at (connect):1:6 at src/mongo/shell/mongo.js:179
exception: connect failed
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# ll -lhtr
total 113M
-rwxr-xr-x 1 1046 1046 4.1M Apr 8 20:39 bsondump
-rwxr-xr-x 1 1046 1046 5.6M Apr 8 20:39 mongostat
-rwxr-xr-x 1 1046 1046 5.6M Apr 8 20:39 mongofiles
-rwxr-xr-x 1 1046 1046 5.7M Apr 8 20:39 mongoexport
-rwxr-xr-x 1 1046 1046 5.8M Apr 8 20:39 mongoimport
-rwxr-xr-x 1 1046 1046 6.0M Apr 8 20:39 mongorestore
-rwxr-xr-x 1 1046 1046 5.8M Apr 8 20:39 mongodump
-rwxr-xr-x 1 1046 1046 5.4M Apr 8 20:39 mongotop
-rwxr-xr-x 1 1046 1046 5.3M Apr 8 20:39 mongooplog
-rwxr-xr-x 1 1046 1046 22M Apr 8 20:46 mongoperf
-rwxr-xr-x 1 1046 1046 22M Apr 8 20:46 mongod
-rwxr-xr-x 1 1046 1046 11M Apr 8 20:46 mongos
-rwxr-xr-x 1 1046 1046 12M Apr 8 20:46 mongo
-rw-r–r– 1 root root 4.3K May 10 15:13 shard_creation_script.sh
-rw-r–r– 1 root root 4.3K May 10 15:36 new_shard_creation_script.sh
-rw-r–r– 1 root root 3.9K May 10 15:56 NSS.sh
-rw-r–r– 1 root root 3.9K May 10 16:06 shard-script.sh
-rw-r–r– 1 root root 4.0K May 10 16:39 new_script.sh
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# vi new_script.sh
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# rm -rf /tmp/*
[root@ip-10-0-0-197 bin]# rm -rf /dat/*
[root@ip-10-0-0-197 bin]# rm -rf /data/*
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# sh -x new_script.sh
++ awk ‘-F ‘ ‘{print $2}’
++ grep -v egrep
++ egrep ‘shardsvr|configsvr|replSet|configdb’
++ ps -ef
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18909
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18910
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18911
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18912
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18914
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18915
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18918
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18919
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 18920
+ for i in ‘`ps -ef | egrep ‘\”shardsvr|configsvr|replSet|configdb’\” | grep -v egrep | awk -F” ” ‘\”{print $2}’\”`’
+ kill -9 19058
+ mkdir -p /data /data/1 /data/2 /data/3
+ rm -rf /data/1 /data/2 /data/3
+ cd /data/
+ mkdir -p config1 config2 config3 arbiter1 arbiter2 arbiter3 router /data/1/shard1_1 /data/1/shard1_2 /data/2/shard2_1 /data/2/shard2_2 /data/3/shard3_1 /data/3/shard3_2
+ cd /home/ec2-user/mongodb-linux-x86_64-3.0.2/bin
+ sleep 200
+ ./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_1 –logpath /tmp/shard2_1.log –port 27020 –config /etc/mongod.conf
+ ./mongod –shardsvr –replSet rs2 –dbpath /data/2/shard2_2 –logpath /tmp/shard2_2.log –port 27021 –config /etc/mongod.conf
+ ./mongod –replSet rs1 –dbpath /data/arbiter1 –logpath /tmp/arbiter1.log –port 27012 –config /etc/mongod.conf
+ ./mongod –replSet rs2 –dbpath /data/arbiter2 –logpath /tmp/arbiter2.log –port 27022 –config /etc/mongod.conf
+ ./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_2 –logpath /tmp/shard1_2.log –port 27011 –config /etc/mongod.conf
+ ./mongod –shardsvr –replSet rs1 –dbpath /data/1/shard1_1 –logpath /tmp/shard1_1.log –port 27010 –config /etc/mongod.conf
+ ./mongod –configsvr –dbpath /data/config3 –logpath /tmp/config3.log –port 39002 –config /etc/mongod.conf
+ ./mongod –configsvr –dbpath /data/config2 –logpath /tmp/config2.log –port 39001 –config /etc/mongod.conf
+ ./mongod –configsvr –dbpath /data/config1 –logpath /tmp/config1.log –port 39000 –config /etc/mongod.conf

+ sleep 200
+ ./mongos –configdb ec2-52-7-8-107.compute-1.amazonaws.com:39000,ec2-52-7-8-107.compute-1.amazonaws.com:39001,ec2-52-7-8-107.compute-1.amazonaws.com:39002 –logpath /tmp/router.log –port 10000
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval ‘rs.initiate()’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin
[object Object]
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval ‘rs.initiate()’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin
[object Object]
+ sleep 200
+ echo -e ‘\n\n Replica sets are being added. \n\n’
Replica sets are being added.
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval ‘rs.add(“ec2-52-7-8-107.compute-1.amazonaws.com:27011”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin
[object Object]
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval ‘rs.add(“ec2-52-7-8-107.compute-1.amazonaws.com:27021”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin
[object Object]
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin –eval ‘rs.addArb(“ec2-52-7-8-107.compute-1.amazonaws.com:27012”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27010/admin
[object Object]
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin –eval ‘rs.addArb(“ec2-52-7-8-107.compute-1.amazonaws.com:27022”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:27020/admin
[object Object]
+ sleep 200
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin –eval ‘sh.addShard(“rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin
[object Object]
+ ./mongo ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin –eval ‘sh.addShard(“rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021”)’
MongoDB shell version: 3.0.2
connecting to: ec2-52-7-8-107.compute-1.amazonaws.com:10000/admin
[object Object]
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 10000
bash: mongo: command not found
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# source ~/.bashrc
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 10000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:10000/test
Server has startup warnings:
2015-05-11T01:03:27.551+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:03:27.551+0000 I CONTROL
mongos>
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“554fffe05384fafde4642cd9”)
}
shards:
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ “_id” : “admin”, “partitioned” : false, “primary” : “config” }

mongos>
mongos> sh.addShard(“rs1/ip-10-0-0-197:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011”)
{ “shardAdded” : “rs1”, “ok” : 1 }
mongos> sh.addShard(“rs2/ip-10-0-0-197:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021”)
{ “shardAdded” : “rs2”, “ok” : 1 }
mongos>
mongos>
mongos>
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“554fffe05384fafde4642cd9”)
}
shards:
{ “_id” : “rs1”, “host” : “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010” }
{ “_id” : “rs2”, “host” : “rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27021,ip-10-0-0-197:27020” }
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ “_id” : “admin”, “partitioned” : false, “primary” : “config” }

mongos>
mongos>
mongos>
mongos> use shard_db
switched to db shard_db
mongos> db.shard_col.ensureIndex( { user_id : 1 }, { unique : true })u
2015-05-11T01:18:22.114+0000 E QUERY SyntaxError: Unexpected identifier
mongos>
mongos>
mongos> db.shard_col.ensureIndex( { user_id : 1 }, { unique : true })
{
“raw” : {
“rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010” : {
“createdCollectionAutomatically” : true,
“numIndexesBefore” : 1,
“numIndexesAfter” : 2,
“ok” : 1,
“$gleStats” : {
“lastOpTime” : Timestamp(1431307104, 2),
“electionId” : ObjectId(“555000a7c25ee1ddcace86ce”)
}
}
},
“ok” : 1
}
mongos>
mongos>
mongos> sh.enableSharding(“shard_db”)
{ “ok” : 1 }
mongos>
mongos> sh.shardCollection(“shard_db.shard_col”, {“user_id”:1})
{ “collectionsharded” : “shard_db.shard_col”, “ok” : 1 }
mongos>
mongos>
mongos> sh.startBalancer()
mongos> sh.getBalancerState()
true
mongos> sh.isBalancerRunning()
false
mongos>
mongos>
mongos> for(var i = 1; i <= 100000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}
WriteResult({ “nInserted” : 1 })
mongos>
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“554fffe05384fafde4642cd9”)
}
shards:
{ “_id” : “rs1”, “host” : “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010” }
{ “_id” : “rs2”, “host” : “rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27021,ip-10-0-0-197:27020” }
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
1 : Success
databases:
{ “_id” : “admin”, “partitioned” : false, “primary” : “config” }
{ “_id” : “shard_db”, “partitioned” : true, “primary” : “rs1” }
shard_db.shard_col
shard key: { “user_id” : 1 }
chunks:
rs1 2
rs2 1
{ “user_id” : { “$minKey” : 1 } } –>> { “user_id” : 2 } on : rs1 Timestamp(2, 1)
{ “user_id” : 2 } –>> { “user_id” : 10 } on : rs1 Timestamp(1, 2)
{ “user_id” : 10 } –>> { “user_id” : { “$maxKey” : 1 } } on : rs2 Timestamp(2, 0)

mongos>
mongos>
mongos>
mongos>
mongos>
mongos> show dbs
admin (empty)
config 0.016GB
shard_db 0.094GB
mongos>
mongos>
mongos> use shard_db
switched to db shard_db
mongos>
mongos>
mongos>
mongos>
mongos> db.shard_col.find({ “user_id” : 10 }).explain()
{
“queryPlanner” : {
“mongosPlannerVersion” : 1,
“winningPlan” : {
“stage” : “SINGLE_SHARD”,
“shards” : [
{
“shardName” : “rs2”,
“connectionString” : “rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27021,ip-10-0-0-197:27020”,
“serverInfo” : {
“host” : “ip-10-0-0-197”,
“port” : 27020,
“version” : “3.0.2”,
“gitVersion” : “6201872043ecbbc0a4cc169b5482dcf385fc464f”
},
“plannerVersion” : 1,
“namespace” : “shard_db.shard_col”,
“indexFilterSet” : false,
“parsedQuery” : {
“user_id” : {
“$eq” : 10
}
},
“winningPlan” : {
“stage” : “FETCH”,
“inputStage” : {
“stage” : “SHARDING_FILTER”,
“inputStage” : {
“stage” : “IXSCAN”,
“keyPattern” : {
“user_id” : 1
},
“indexName” : “user_id_1”,
“isMultiKey” : false,
“direction” : “forward”,
“indexBounds” : {
“user_id” : [
“[10.0, 10.0]”
]
}
}
}
},
“rejectedPlans” : [ ]
}
]
}
},
“ok” : 1
}
mongos>
mongos>
mongos>
mongos>
mongos> db.shard_col.find({ “user_id” : 9 }).explain()
{
“queryPlanner” : {
“mongosPlannerVersion” : 1,
“winningPlan” : {
“stage” : “SINGLE_SHARD”,
“shards” : [
{
“shardName” : “rs1”,
“connectionString” : “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010”,
“serverInfo” : {
“host” : “ip-10-0-0-197”,
“port” : 27010,
“version” : “3.0.2”,
“gitVersion” : “6201872043ecbbc0a4cc169b5482dcf385fc464f”
},
“plannerVersion” : 1,
“namespace” : “shard_db.shard_col”,
“indexFilterSet” : false,
“parsedQuery” : {
“user_id” : {
“$eq” : 9
}
},
“winningPlan” : {
“stage” : “FETCH”,
“inputStage” : {
“stage” : “SHARDING_FILTER”,
“inputStage” : {
“stage” : “IXSCAN”,
“keyPattern” : {
“user_id” : 1
},
“indexName” : “user_id_1”,
“isMultiKey” : false,
“direction” : “forward”,
“indexBounds” : {
“user_id” : [
“[9.0, 9.0]”
]
}
}
}
},
“rejectedPlans” : [ ]
}
]
}
},
“ok” : 1
}
mongos>
mongos>
mongos>
mongos>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 5.8G 1.9G 76% /
devtmpfs 490M 56K 490M 1% /dev
tmpfs 499M 0 499M 0% /dev/shm
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 10000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:10000/test
Server has startup warnings:
2015-05-11T01:03:27.551+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:03:27.551+0000 I CONTROL
mongos>
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> use shard_db
switched to db shard_db
mongos>
mongos>
mongos>
mongos> for(var i = 100001; i <= 120000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}
WriteResult({ “nInserted” : 1 })
mongos>
mongos>
mongos>
mongos> for(var i = 120001; i <= 200000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}

WriteResult({ “nInserted” : 1 })
mongos>
mongos>
bye
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/xvda1 7.8G 6.0G 1.8G 78% /
devtmpfs 490M 56K 490M 1% /dev
tmpfs 499M 0 499M 0% /dev/shm
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]#
[root@ip-10-0-0-197 bin]# mongo –port 10000
MongoDB shell version: 3.0.2
connecting to: 127.0.0.1:10000/test
Server has startup warnings:
2015-05-11T01:03:27.551+0000 I CONTROL ** WARNING: You are running this process as the root user, which is not recommended.
2015-05-11T01:03:27.551+0000 I CONTROL
mongos>
mongos>
mongos>
mongos>
mongos> use shard_db
switched to db shard_db
mongos>
mongos>
mongos>
mongos> db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } })
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522f”), “user_id” : 10, “name” : “bulk-inserts”, ” Iteration: ” : 10 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522a”), “user_id” : 5, “name” : “bulk-inserts”, ” Iteration: ” : 5 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95230”), “user_id” : 11, “name” : “bulk-inserts”, ” Iteration: ” : 11 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522b”), “user_id” : 6, “name” : “bulk-inserts”, ” Iteration: ” : 6 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95231”), “user_id” : 12, “name” : “bulk-inserts”, ” Iteration: ” : 12 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522c”), “user_id” : 7, “name” : “bulk-inserts”, ” Iteration: ” : 7 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95232”), “user_id” : 13, “name” : “bulk-inserts”, ” Iteration: ” : 13 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522d”), “user_id” : 8, “name” : “bulk-inserts”, ” Iteration: ” : 8 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95233”), “user_id” : 14, “name” : “bulk-inserts”, ” Iteration: ” : 14 }
{ “_id” : ObjectId(“5550038cac4b3d5a22a9522e”), “user_id” : 9, “name” : “bulk-inserts”, ” Iteration: ” : 9 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95234”), “user_id” : 15, “name” : “bulk-inserts”, ” Iteration: ” : 15 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95235”), “user_id” : 16, “name” : “bulk-inserts”, ” Iteration: ” : 16 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95236”), “user_id” : 17, “name” : “bulk-inserts”, ” Iteration: ” : 17 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95237”), “user_id” : 18, “name” : “bulk-inserts”, ” Iteration: ” : 18 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95238”), “user_id” : 19, “name” : “bulk-inserts”, ” Iteration: ” : 19 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a95239”), “user_id” : 20, “name” : “bulk-inserts”, ” Iteration: ” : 20 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523a”), “user_id” : 21, “name” : “bulk-inserts”, ” Iteration: ” : 21 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523b”), “user_id” : 22, “name” : “bulk-inserts”, ” Iteration: ” : 22 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523c”), “user_id” : 23, “name” : “bulk-inserts”, ” Iteration: ” : 23 }
{ “_id” : ObjectId(“5550038eac4b3d5a22a9523d”), “user_id” : 24, “name” : “bulk-inserts”, ” Iteration: ” : 24 }
mongos>
mongos>
mongos>
mongos> db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } }).explin()
2015-05-11T01:33:53.955+0000 E QUERY TypeError: Object DBQuery: shard_db.shard_col -> { “user_id” : { “$gte” : 5, “$lt” : 25 } } has no method ‘explin’
at (shell):1:58
mongos>
mongos>
mongos> db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } }).explain()
{
“queryPlanner” : {
“mongosPlannerVersion” : 1,
“winningPlan” : {
“stage” : “SHARD_MERGE”,
“shards” : [
{
“shardName” : “rs1”,
“connectionString” : “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010”,
“serverInfo” : {
“host” : “ip-10-0-0-197”,
“port” : 27010,
“version” : “3.0.2”,
“gitVersion” : “6201872043ecbbc0a4cc169b5482dcf385fc464f”
},
“plannerVersion” : 1,
“namespace” : “shard_db.shard_col”,
“indexFilterSet” : false,
“parsedQuery” : {
“$and” : [
{
“user_id” : {
“$lt” : 25
}
},
{
“user_id” : {
“$gte” : 5
}
}
]
},
“winningPlan” : {
“stage” : “FETCH”,
“inputStage” : {
“stage” : “SHARDING_FILTER”,
“inputStage” : {
“stage” : “IXSCAN”,
“keyPattern” : {
“user_id” : 1
},
“indexName” : “user_id_1”,
“isMultiKey” : false,
“direction” : “forward”,
“indexBounds” : {
“user_id” : [
“[5.0, 25.0)”
]
}
}
}
},
“rejectedPlans” : [ ]
},
{
“shardName” : “rs2”,
“connectionString” : “rs2/ec2-52-7-8-107.compute-1.amazonaws.com:27021,ip-10-0-0-197:27020”,
“serverInfo” : {
“host” : “ip-10-0-0-197”,
“port” : 27020,
“version” : “3.0.2”,
“gitVersion” : “6201872043ecbbc0a4cc169b5482dcf385fc464f”
},
“plannerVersion” : 1,
“namespace” : “shard_db.shard_col”,
“indexFilterSet” : false,
“parsedQuery” : {
“$and” : [
{
“user_id” : {
“$lt” : 25
}
},
{
“user_id” : {
“$gte” : 5
}
}
]
},
“winningPlan” : {
“stage” : “FETCH”,
“inputStage” : {
“stage” : “SHARDING_FILTER”,
“inputStage” : {
“stage” : “IXSCAN”,
“keyPattern” : {
“user_id” : 1
},
“indexName” : “user_id_1”,
“isMultiKey” : false,
“direction” : “forward”,
“indexBounds” : {
“user_id” : [
“[5.0, 25.0)”
]
}
}
}
},
“rejectedPlans” : [ ]
}
]
}
},
“ok” : 1
}
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } }).count()
20
mongos> use non_shard_db
switched to db non_shard_db
mongos> for(var i = 1; i <= 1000 ; i++){db.shard_col.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}
WriteResult({ “nInserted” : 1 })
mongos>
mongos>
mongos>
mongos>
mongos> db.shard_col.find( { user_id : { $gte : 5, $lt : 25 } }).explain()
{
“queryPlanner” : {
“mongosPlannerVersion” : 1,
“winningPlan” : {
“stage” : “SINGLE_SHARD”,
“shards” : [
{
“shardName” : “rs1”,
“connectionString” : “rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010”,
“serverInfo” : {
“host” : “ip-10-0-0-197”,
“port” : 27010,
“version” : “3.0.2”,
“gitVersion” : “6201872043ecbbc0a4cc169b5482dcf385fc464f”
},
“plannerVersion” : 1,
“namespace” : “non_shard_db.shard_col”,
“indexFilterSet” : false,
“parsedQuery” : {
“$and” : [
{
“user_id” : {
“$lt” : 25
}
},
{
“user_id” : {
“$gte” : 5
}
}
]
},
“winningPlan” : {
“stage” : “COLLSCAN”,
“filter” : {
“$and” : [
{
“user_id” : {
“$lt” : 25
}
},
{
“user_id” : {
“$gte” : 5
}
}
]
},
“direction” : “forward”
},
“rejectedPlans” : [ ]
}
]
}
},
“ok” : 1
}
mongos>
mongos>
mongos>
mongos> db
non_shard_db
mongos>
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> show dbs
admin (empty)
config 0.016GB
non_shard_db 0.031GB
shard_db 0.156GB
mongos>
mongos>
mongos>
mongos>
mongos>
mongos>
mongos> use configg
switched to db configg
mongos>
mongos>
mongos>
mongos>
mongos> use config
switched to db config
mongos>
mongos>
mongos>
mongos>
mongos> show ds
2015-05-11T01:38:53.067+0000 E QUERY Error: don’t know how to show [ds]
at Error (<anonymous>)
at shellHelper.show (src/mongo/shell/utils.js:733:11)
at shellHelper (src/mongo/shell/utils.js:524:36)
at (shellhelp2):1:1 at src/mongo/shell/utils.js:733
mongos>
mongos>
mongos> show dbs
admin (empty)
config 0.016GB
non_shard_db 0.031GB
shard_db 0.156GB
mongos>
mongos>
mongos>
mongos>
mongos> use config
switched to db config
mongos>
mongos>
mongos> shwo collections
2015-05-11T01:39:17.799+0000 E QUERY SyntaxError: Unexpected identifier
mongos>
mongos>
mongos> show collections
actionlog
changelog
chunks
collections
databases
lockpings
locks
mongos
settings
shards
system.indexes
tags
version
mongos>
mongos> db.settings.find()
{ “_id” : “chunksize”, “value” : 64 }
{ “_id” : “balancer”, “stopped” : false }
mongos>
mongos>
mongos> db.changelog.find()
{ “_id” : “ip-10-0-0-197-2015-05-11T01:03:28-554fffe05384fafde4642cd8”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:03:28.469Z”), “what” : “starting upgrade of config database”, “ns” : “config.version”, “details” : { “from” : 0, “to” : 6 } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:03:28-554fffe05384fafde4642cda”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:03:28.529Z”), “what” : “finished upgrade of config database”, “ns” : “config.version”, “details” : { “from” : 0, “to” : 6 } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:17:44-555003385384fafde4642d88”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:17:44.353Z”), “what” : “addShard”, “ns” : “”, “details” : { “name” : “rs1”, “host” : “rs1/ip-10-0-0-197:27010,ec2-52-7-8-107.compute-1.amazonaws.com:27011” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:17:52-555003405384fafde4642d89”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:17:52.807Z”), “what” : “addShard”, “ns” : “”, “details” : { “name” : “rs2”, “host” : “rs2/ip-10-0-0-197:27020,ec2-52-7-8-107.compute-1.amazonaws.com:27021” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:18:43-555003735384fafde4642d96”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:18:43.905Z”), “what” : “shardCollection.start”, “ns” : “shard_db.shard_col”, “details” : { “shardKey” : { “user_id” : 1 }, “collection” : “shard_db.shard_col”, “primary” : “rs1:rs1/ec2-52-7-8-107.compute-1.amazonaws.com:27011,ip-10-0-0-197:27010”, “initShards” : [ ], “numChunks” : 1 } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:18:43-555003735384fafde4642d98”, “server” : “ip-10-0-0-197”, “clientAddr” : “N/A”, “time” : ISODate(“2015-05-11T01:18:43.927Z”), “what” : “shardCollection”, “ns” : “shard_db.shard_col”, “details” : { “version” : “1|0||555003735384fafde4642d97” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:08-5550038cc25ee1ddcace86d0”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:08.788Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : { “$minKey” : 1 } }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 1, “of” : 3, “chunk” : { “min” : { “user_id” : { “$minKey” : 1 } }, “max” : { “user_id” : 2 }, “lastmod” : Timestamp(1, 1), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:08-5550038cc25ee1ddcace86d1”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:08.792Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : { “$minKey” : 1 } }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 2, “of” : 3, “chunk” : { “min” : { “user_id” : 2 }, “max” : { “user_id” : 10 }, “lastmod” : Timestamp(1, 2), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:08-5550038cc25ee1ddcace86d2”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:08.797Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : { “$minKey” : 1 } }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 3, “of” : 3, “chunk” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “lastmod” : Timestamp(1, 3), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:08-5550038cc25ee1ddcace86d4”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:08.842Z”), “what” : “moveChunk.start”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “from” : “rs1”, “to” : “rs2” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:10-5550038e5c36cdce695aeb3d”, “server” : “ip-10-0-0-197”, “clientAddr” : “:27017”, “time” : ISODate(“2015-05-11T01:19:10.900Z”), “what” : “moveChunk.to”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “step 1 of 5” : 296, “step 2 of 5” : 286, “step 3 of 5” : 0, “step 4 of 5” : 0, “step 5 of 5” : 1468, “note” : “success” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:10-5550038ec25ee1ddcace86d5”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:10.932Z”), “what” : “moveChunk.commit”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “from” : “rs1”, “to” : “rs2”, “cloned” : NumberLong(1), “clonedBytes” : NumberLong(84), “catchup” : NumberLong(0), “steady” : NumberLong(0) } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:19:10-5550038ec25ee1ddcace86d6”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:41689”, “time” : ISODate(“2015-05-11T01:19:10.957Z”), “what” : “moveChunk.from”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “step 1 of 6” : 0, “step 2 of 6” : 19, “step 3 of 6” : 4, “step 4 of 6” : 2049, “step 5 of 6” : 34, “step 6 of 6” : 0, “to” : “rs2”, “from” : “rs1”, “note” : “success” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:53-555005215c36cdce695aeb3f”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:53.841Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 1, “of” : 3, “chunk” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : 74908 }, “lastmod” : Timestamp(2, 2), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:53-555005215c36cdce695aeb40”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:53.843Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 2, “of” : 3, “chunk” : { “min” : { “user_id” : 74908 }, “max” : { “user_id” : 179771 }, “lastmod” : Timestamp(2, 3), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:53-555005215c36cdce695aeb41”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:53.849Z”), “what” : “multi-split”, “ns” : “shard_db.shard_col”, “details” : { “before” : { “min” : { “user_id” : 10 }, “max” : { “user_id” : { “$maxKey” : 1 } } }, “number” : 3, “of” : 3, “chunk” : { “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “lastmod” : Timestamp(2, 4), “lastmodEpoch” : ObjectId(“555003735384fafde4642d97”) } } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:53-555005215c36cdce695aeb43”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:53.893Z”), “what” : “moveChunk.start”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “from” : “rs2”, “to” : “rs1” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:54-55500522c25ee1ddcace86d7”, “server” : “ip-10-0-0-197”, “clientAddr” : “:27017”, “time” : ISODate(“2015-05-11T01:25:54.927Z”), “what” : “moveChunk.to”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “step 1 of 5” : 0, “step 2 of 5” : 0, “step 3 of 5” : 0, “step 4 of 5” : 0, “step 5 of 5” : 1030, “note” : “success” } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:54-555005225c36cdce695aeb44”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:54.954Z”), “what” : “moveChunk.commit”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “from” : “rs2”, “to” : “rs1”, “cloned” : NumberLong(1), “clonedBytes” : NumberLong(84), “catchup” : NumberLong(0), “steady” : NumberLong(0) } }
{ “_id” : “ip-10-0-0-197-2015-05-11T01:25:54-555005225c36cdce695aeb45”, “server” : “ip-10-0-0-197”, “clientAddr” : “10.0.0.197:49084”, “time” : ISODate(“2015-05-11T01:25:54.972Z”), “what” : “moveChunk.from”, “ns” : “shard_db.shard_col”, “details” : { “min” : { “user_id” : 179771 }, “max” : { “user_id” : { “$maxKey” : 1 } }, “step 1 of 6” : 0, “step 2 of 6” : 18, “step 3 of 6” : 0, “step 4 of 6” : 1025, “step 5 of 6” : 34, “step 6 of 6” : 0, “to” : “rs1”, “from” : “rs2”, “note” : “success” } }
mongos>
mongos>
mongos>
mongos>
mongos> db.actionlog.find()
{ “_id” : ObjectId(“554fffe05384fafde4642cdd”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:03:28.754Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 84, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“554fffea5384fafde4642cdf”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:03:38.843Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 24, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“554ffff45384fafde4642ce1”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:03:48.889Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 27, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“554ffffe5384fafde4642ce3”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:03:58.934Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000085384fafde4642ce5”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:08.979Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 25, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000135384fafde4642ce7”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:19.025Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550001d5384fafde4642ce9”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:29.072Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 28, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000275384fafde4642ceb”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:39.118Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000315384fafde4642ced”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:49.163Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550003b5384fafde4642cef”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:04:59.209Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000455384fafde4642cf1”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:09.255Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550004f5384fafde4642cf3”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:19.300Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000595384fafde4642cf5”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:29.346Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000635384fafde4642cf7”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:39.390Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 24, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550006d5384fafde4642cf9”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:49.436Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000775384fafde4642cfb”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:05:59.482Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 25, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000815384fafde4642cfd”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:06:09.528Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550008b5384fafde4642cff”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:06:19.572Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 25, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“555000955384fafde4642d01”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:06:29.620Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 26, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
{ “_id” : ObjectId(“5550009f5384fafde4642d03”), “server” : “ip-10-0-0-197”, “time” : ISODate(“2015-05-11T01:06:39.673Z”), “what” : “balancer.round”, “details” : { “executionTimeMillis” : 32, “errorOccured” : false, “candidateChunks” : 0, “chunksMoved” : 0 } }
Type “it” for more
mongos>
read
write
readWite
readWritreAdmin
dbAdmin
clusterAdnin
userAdmin
readWritreAdminAnyDatabase
dbAdminAnyDatabase
userAdminAnyDatabase

use admin
db.createUser(
{
user: “usr”,
pwd: “pwd”,
roles: [ “readWriteAnyDatabase”,
“dbAdminAnyDatabase”,
“userAdminAnyDatabase” ]
}
)

teUser(
{
user: “usr”,
pwd: “pwd”,
roles: [ “readWriteAnyDatabase”,
“dbAdminAnyDatabase”,
“userAdminAnyDatabase” ]
}
)

  • Ask Question