MongoDB Shard creation script

Below shell script will be useful to create a MongoDB’s sharded environment on a single sever/multiple servers depending on your requirement.

Things to remember to execute below automated script.

1) We should have required privileges to the user on your Linux box ( preferebly root user )
2) We need to replace the hostname of yours through out the script
3) Please check whether the /tmp has ample amount of space to create the required mongo prerequisites, else change the directory path to other one.
4) This script is intended for your testing if you’re creating this Shard on a single server.

[Lab root @ hostname /opt/mongodb/bin]# cat /etc/mongod.conf 
# MongoDB Configuration File
 
# General Settings
fork = true
quiet = true
 
# Logging
verbose = true
logappend = true
logpath = /opt/mongodb/mongod.log
 
# Security
#auth = true
 
#setParameter = supportCompatibilityFormPrivilegeDocuments=0
#setParameter = logUserIds=1
#sslOnNormalPorts = true
#sslPEMKeyFile = /etc/ssl/mongodb.pem
#sslPEMKeyPassword = pass
 
#nohttpinterface = true
#bind_ip = 10.116.34.35
noscripting = true
dbpath = /opt/mongodb/data
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]# cat shard_creation_script.sh 
##### Killing the existing Mongo processes ################
for i in `ps -ef | egrep 'shardsvr|configsvr|replSet|configdb' | grep -v egrep | awk -F" " '{print $2}'`; do kill -9 $i; done

##### Creating Mongo data & log files ################
mkdir -p /opt/mongodb/data /opt/mongodb/logs
rm -rf /opt/mongodb/data/*
rm -rf /opt/mongodb/logs/*
cd /opt/mongodb/data/
mkdir -p shard1_1 shard1_2 shard2_1 shard2_2 config1 config2 config3 arbiter1 arbiter2 router
cd /opt/mongodb/bin

##### Starting the Mongo Config,Shard,Arbiter & Router services ################

## Config Servers #####
numactl --interleave=all ./mongod --configsvr --dbpath /opt/mongodb/data/config1 --logpath /opt/mongodb/logs/config1.log --port 39000 --config /etc/mongod.conf &
numactl --interleave=all ./mongod --configsvr --dbpath /opt/mongodb/data/config2 --logpath /opt/mongodb/logs/config2.log --port 39001 --config /etc/mongod.conf &
numactl --interleave=all ./mongod --configsvr --dbpath /opt/mongodb/data/config3 --logpath /opt/mongodb/logs/config3.log --port 39002 --config /etc/mongod.conf &

## Replica Set 1 ######
numactl --interleave=all ./mongod --shardsvr --replSet rs1 --dbpath /opt/mongodb/data/shard1_1 --logpath /opt/mongodb/logs/shard1_1.log --port 27010 --config /etc/mongod.conf &
numactl --interleave=all ./mongod --shardsvr --replSet rs1 --dbpath /opt/mongodb/data/shard1_2 --logpath /opt/mongodb/logs/shard1_2.log --port 27011 --config /etc/mongod.conf &

## Replica Set 2 ######
numactl --interleave=all ./mongod --shardsvr --replSet rs2 --dbpath /opt/mongodb/data/shard2_1 --logpath /opt/mongodb/logs/shard2_1.log --port 27020 --config /etc/mongod.conf &
numactl --interleave=all ./mongod --shardsvr --replSet rs2 --dbpath /opt/mongodb/data/shard2_2 --logpath /opt/mongodb/logs/shard2_2.log --port 27021 --config /etc/mongod.conf &

## Arbiters ####
numactl --interleave=all ./mongod --replSet rs1 --dbpath /opt/mongodb/data/arbiter1 --logpath /opt/mongodb/logs/arbiter1.log --port 27012 --config /etc/mongod.conf &
numactl --interleave=all ./mongod --replSet rs2 --dbpath /opt/mongodb/data/arbiter2 --logpath /opt/mongodb/logs/arbiter2.log --port 27022 --config /etc/mongod.conf &

sleep 10
echo -e "\n\n\n\n Config, Shard, Router & Arbiter services are initiating ... \n\n\n\n"
numactl --interleave=all ./mongos --configdb xx.xx.xx.xx:39000,xx.xx.xx.xx:39001,xx.xx.xx.xx:39002 --logpath /opt/mongodb/logs/router.log --port 10000 &

sleep 10
./mongo hostname:27010/admin --eval "rs.initiate()"
./mongo hostname:27020/admin --eval "rs.initiate()"

sleep 30
echo -e "\n\n\n\n Replica sets are added. \n\n\n\n"

./mongo hostname:27010/admin --eval "rs.add(\"hostname:27011\")"
./mongo hostname:27020/admin --eval "rs.add(\"hostname:27021\")"

./mongo hostname:27010/admin --eval "rs.addArb(\"hostname:27012\")"
./mongo hostname:27020/admin --eval "rs.addArb(\"hostname:27022\")"

sleep 20
./mongo hostname:10000/admin --eval "sh.addShard(\"rs1/hostname:27010,hostname:27011\")"
./mongo hostname:10000/admin --eval "sh.addShard(\"rs2/hostname:27020,hostname:27021\")"
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]# sh -x shard_creation_script.sh 
++ ps -ef
++ egrep 'shardsvr|configsvr|replSet|configdb'
++ grep -v egrep
++ awk '-F ' '{print $2}'
+ for i in '`ps -ef | egrep '\''shardsvr|configsvr|replSet|configdb'\'' | grep -v egrep | awk -F" " '\''{print $2}'\''`'
+ kill -9 5462
+ for i in '`ps -ef | egrep '\''shardsvr|configsvr|replSet|configdb'\'' | grep -v egrep | awk -F" " '\''{print $2}'\''`'
+ kill -9 5467
+ for i in '`ps -ef | egrep '\''shardsvr|configsvr|replSet|configdb'\'' | grep -v egrep | awk -F" " '\''{print $2}'\''`'
+ kill -9 5468
+ for i in '`ps -ef | egrep '\''shardsvr|configsvr|replSet|configdb'\'' | grep -v egrep | awk -F" " '\''{print $2}'\''`'
+ kill -9 5477
+ for i in '`ps -ef | egrep '\''shardsvr|configsvr|replSet|configdb'\'' | grep -v egrep | awk -F" " '\''{print $2}'\''`'
+ kill -9 5478
+ for i in '`ps -ef | egrep '\''shardsvr|configsvr|replSet|configdb'\'' | grep -v egrep | awk -F" " '\''{print $2}'\''`'
+ kill -9 5479
+ for i in '`ps -ef | egrep '\''shardsvr|configsvr|replSet|configdb'\'' | grep -v egrep | awk -F" " '\''{print $2}'\''`'
+ kill -9 5480
+ for i in '`ps -ef | egrep '\''shardsvr|configsvr|replSet|configdb'\'' | grep -v egrep | awk -F" " '\''{print $2}'\''`'
+ kill -9 5490
+ for i in '`ps -ef | egrep '\''shardsvr|configsvr|replSet|configdb'\'' | grep -v egrep | awk -F" " '\''{print $2}'\''`'
+ kill -9 5494
+ for i in '`ps -ef | egrep '\''shardsvr|configsvr|replSet|configdb'\'' | grep -v egrep | awk -F" " '\''{print $2}'\''`'
+ kill -9 5791
+ mkdir -p /opt/mongodb/data /opt/mongodb/logs
+ rm -rf /opt/mongodb/data/arbiter1 /opt/mongodb/data/arbiter2 /opt/mongodb/data/config1 /opt/mongodb/data/config2 /opt/mongodb/data/config3 /opt/mongodb/data/router /opt/mongodb/data/shard1_1 /opt/mongodb/data/shard1_2 /opt/mongodb/data/shard2_1 /opt/mongodb/data/shard2_2
+ rm -rf /opt/mongodb/logs/arbiter1.log /opt/mongodb/logs/arbiter2.log /opt/mongodb/logs/config1.log /opt/mongodb/logs/config2.log /opt/mongodb/logs/config3.log /opt/mongodb/logs/router.log /opt/mongodb/logs/shard1_1.log /opt/mongodb/logs/shard1_2.log /opt/mongodb/logs/shard2_1.log /opt/mongodb/logs/shard2_2.log
+ cd /opt/mongodb/data/
+ mkdir -p shard1_1 shard1_2 shard2_1 shard2_2 config1 config2 config3 arbiter1 arbiter2 router
+ cd /opt/mongodb/bin
+ numactl --interleave=all ./mongod --configsvr --dbpath /opt/mongodb/data/config1 --logpath /opt/mongodb/logs/config1.log --port 39000 --config /etc/mongod.conf
+ numactl --interleave=all ./mongod --configsvr --dbpath /opt/mongodb/data/config2 --logpath /opt/mongodb/logs/config2.log --port 39001 --config /etc/mongod.conf
+ numactl --interleave=all ./mongod --configsvr --dbpath /opt/mongodb/data/config3 --logpath /opt/mongodb/logs/config3.log --port 39002 --config /etc/mongod.conf
+ numactl --interleave=all ./mongod --shardsvr --replSet rs1 --dbpath /opt/mongodb/data/shard1_1 --logpath /opt/mongodb/logs/shard1_1.log --port 27010 --config /etc/mongod.conf
+ numactl --interleave=all ./mongod --shardsvr --replSet rs1 --dbpath /opt/mongodb/data/shard1_2 --logpath /opt/mongodb/logs/shard1_2.log --port 27011 --config /etc/mongod.conf
+ numactl --interleave=all ./mongod --shardsvr --replSet rs2 --dbpath /opt/mongodb/data/shard2_1 --logpath /opt/mongodb/logs/shard2_1.log --port 27020 --config /etc/mongod.conf
+ numactl --interleave=all ./mongod --shardsvr --replSet rs2 --dbpath /opt/mongodb/data/shard2_2 --logpath /opt/mongodb/logs/shard2_2.log --port 27021 --config /etc/mongod.conf
+ sleep 10
+ numactl --interleave=all ./mongod --replSet rs1 --dbpath /opt/mongodb/data/arbiter1 --logpath /opt/mongodb/logs/arbiter1.log --port 27012 --config /etc/mongod.conf
+ numactl --interleave=all ./mongod --replSet rs2 --dbpath /opt/mongodb/data/arbiter2 --logpath /opt/mongodb/logs/arbiter2.log --port 27022 --config /etc/mongod.conf
about to fork child process, waiting until server is ready for connections.
about to fork child process, waiting until server is ready for connections.
about to fork child process, waiting until server is ready for connections.
forked process: 7415
forked process: 7417
forked process: 7418
all output going to: /opt/mongodb/logs/config1.log
all output going to: /opt/mongodb/logs/arbiter1.log
about to fork child process, waiting until server is ready for connections.
about to fork child process, waiting until server is ready for connections.
all output going to: /opt/mongodb/logs/shard2_1.log
forked process: 7427
about to fork child process, waiting until server is ready for connections.
all output going to: /opt/mongodb/logs/shard1_2.log
about to fork child process, waiting until server is ready for connections.
forked process: 7432
all output going to: /opt/mongodb/logs/shard1_1.log
forked process: 7435
forked process: 7436
all output going to: /opt/mongodb/logs/config3.log
all output going to: /opt/mongodb/logs/shard2_2.log
about to fork child process, waiting until server is ready for connections.
forked process: 7442
about to fork child process, waiting until server is ready for connections.
all output going to: /opt/mongodb/logs/config2.log
forked process: 7446
all output going to: /opt/mongodb/logs/arbiter2.log
child process started successfully, parent exiting
child process started successfully, parent exiting
child process started successfully, parent exiting
child process started successfully, parent exiting
child process started successfully, parent exiting
child process started successfully, parent exiting
child process started successfully, parent exiting
child process started successfully, parent exiting
child process started successfully, parent exiting
+ echo -e '\n\n\n\n Config, Shard, Router & Arbiter services are initiating ... \n\n\n\n'




 Config, Shard, Router & Arbiter services are initiating ... 




+ sleep 10
+ numactl --interleave=all ./mongos --configdb xx.xx.xx.xx:39000,xx.xx.xx.xx:39001,xx.xx.xx.xx:39002 --logpath /opt/mongodb/logs/router.log --port 10000
all output going to: /opt/mongodb/logs/router.log
+ ./mongo hostname:27010/admin --eval 'rs.initiate()'
MongoDB shell version: 2.4.11
connecting to: hostname:27010/admin
[object Object]
+ ./mongo hostname:27020/admin --eval 'rs.initiate()'
MongoDB shell version: 2.4.11
connecting to: hostname:27020/admin
[object Object]
+ sleep 30
+ echo -e '\n\n\n\n Replica sets are added. \n\n\n\n'




 Replica sets are added. 




+ ./mongo hostname:27010/admin --eval 'rs.add("hostname:27011")'
MongoDB shell version: 2.4.11
connecting to: hostname:27010/admin
[object Object]
+ ./mongo hostname:27020/admin --eval 'rs.add("hostname:27021")'
MongoDB shell version: 2.4.11
connecting to: hostname:27020/admin
[object Object]
+ ./mongo hostname:27010/admin --eval 'rs.addArb("hostname:27012")'
MongoDB shell version: 2.4.11
connecting to: hostname:27010/admin
[object Object]
+ ./mongo hostname:27020/admin --eval 'rs.addArb("hostname:27022")'
MongoDB shell version: 2.4.11
connecting to: hostname:27020/admin
[object Object]
+ sleep 20
+ ./mongo hostname:10000/admin --eval 'sh.addShard("rs1/hostname:27010,hostname:27011")'
MongoDB shell version: 2.4.11
connecting to: hostname:10000/admin
[object Object]
+ ./mongo hostname:10000/admin --eval 'sh.addShard("rs2/hostname:27020,hostname:27021")'
MongoDB shell version: 2.4.11
connecting to: hostname:10000/admin
[object Object]
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]# ./mongo hostname:27010/admin
MongoDB shell version: 2.4.11
connecting to: hostname:27010/admin
rs1:PRIMARY> 
rs1:PRIMARY> rs.status()
{
 "set" : "rs1",
 "date" : ISODate("2014-10-17T07:39:32Z"),
 "myState" : 1,
 "members" : [
 {
 "_id" : 0,
 "name" : "hostname:27010",
 "health" : 1,
 "state" : 1,
 "stateStr" : "PRIMARY",
 "uptime" : 118,
 "optime" : Timestamp(1413531505, 1),
 "optimeDate" : ISODate("2014-10-17T07:38:25Z"),
 "self" : true
 },
 {
 "_id" : 1,
 "name" : "hostname:27011",
 "health" : 1,
 "state" : 2,
 "stateStr" : "SECONDARY",
 "uptime" : 68,
 "optime" : Timestamp(1413531505, 1),
 "optimeDate" : ISODate("2014-10-17T07:38:25Z"),
 "lastHeartbeat" : ISODate("2014-10-17T07:39:30Z"),
 "lastHeartbeatRecv" : ISODate("2014-10-17T07:39:30Z"),
 "pingMs" : 0,
 "syncingTo" : "hostname:27010"
 },
 {
 "_id" : 2,
 "name" : "hostname:27012",
 "health" : 1,
 "state" : 7,
 "stateStr" : "ARBITER",
 "uptime" : 67,
 "lastHeartbeat" : ISODate("2014-10-17T07:39:31Z"),
 "lastHeartbeatRecv" : ISODate("2014-10-17T07:39:30Z"),
 "pingMs" : 0
 }
 ],
 "ok" : 1
}
rs1:PRIMARY> 
rs1:PRIMARY> 
bye
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]# ./mongo hostname:27020/admin
MongoDB shell version: 2.4.11
connecting to: hostname:27020/admin
rs2:PRIMARY> 
rs2:PRIMARY> 
rs2:PRIMARY> 
rs2:PRIMARY> rs.status()
{
 "set" : "rs2",
 "date" : ISODate("2014-10-17T07:39:42Z"),
 "myState" : 1,
 "members" : [
 {
 "_id" : 0,
 "name" : "hostname:27020",
 "health" : 1,
 "state" : 1,
 "stateStr" : "PRIMARY",
 "uptime" : 128,
 "optime" : Timestamp(1413531505, 2),
 "optimeDate" : ISODate("2014-10-17T07:38:25Z"),
 "self" : true
 },
 {
 "_id" : 1,
 "name" : "hostname:27021",
 "health" : 1,
 "state" : 2,
 "stateStr" : "SECONDARY",
 "uptime" : 77,
 "optime" : Timestamp(1413531505, 2),
 "optimeDate" : ISODate("2014-10-17T07:38:25Z"),
 "lastHeartbeat" : ISODate("2014-10-17T07:39:41Z"),
 "lastHeartbeatRecv" : ISODate("2014-10-17T07:39:40Z"),
 "pingMs" : 0,
 "syncingTo" : "hostname:27020"
 },
 {
 "_id" : 2,
 "name" : "hostname:27022",
 "health" : 1,
 "state" : 7,
 "stateStr" : "ARBITER",
 "uptime" : 77,
 "lastHeartbeat" : ISODate("2014-10-17T07:39:41Z"),
 "lastHeartbeatRecv" : ISODate("2014-10-17T07:39:40Z"),
 "pingMs" : 0
 }
 ],
 "ok" : 1
}
rs2:PRIMARY> 
rs2:PRIMARY> 
rs2:PRIMARY> 
rs2:PRIMARY> 
rs2:PRIMARY> 
bye
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]
# 
[Lab root @ hostname /opt/mongodb/bin]# ./mongo hostname:10000/admin
MongoDB shell version: 2.4.11
connecting to: hostname:10000/admin
mongos> 
mongos> 
mongos> 
mongos> sh.status()
--- Sharding Status --- 
 sharding version: {
 "_id" : 1,
 "version" : 3,
 "minCompatibleVersion" : 3,
 "currentVersion" : 4,
 "clusterId" : ObjectId("5440c748d2769fad4b93c6ec")
}
 shards:
 { "_id" : "rs1", "host" : "rs1/hostname:27010,hostname:27011" }
 { "_id" : "rs2", "host" : "rs2/hostname:27020,hostname:27021" }
 databases:
 { "_id" : "admin", "partitioned" : false, "primary" : "config" }

mongos>



Enabling Shading 


[Lab root @ hostname /opt/mongodb/bin]# ./mongo hostname:10000/admin
MongoDB shell version: 2.4.11
connecting to: hostname:10000/admin
mongos> 
mongos> 
mongos> 
mongos> 
mongos> 
mongos> sh.status()
--- Sharding Status --- 
 sharding version: {
 "_id" : 1,
 "version" : 3,
 "minCompatibleVersion" : 3,
 "currentVersion" : 4,
 "clusterId" : ObjectId("5440d75a2bf8868b348ac474")
}
 shards:
 { "_id" : "rs1", "host" : "rs1/hostname:27010,hostname:27011" }
 { "_id" : "rs2", "host" : "rs2/hostname:27020,hostname:27021" }
 databases:
 { "_id" : "admin", "partitioned" : false, "primary" : "config" }

mongos> 
mongos> use shrdb
switched to db shrdb
mongos> 
mongos> db.shrcol.ensureIndex({"user_id" : 1})
mongos> 
mongos> show collections
shrcol
system.indexes
mongos> 
mongos> db.shrcol.find()
mongos> 
mongos> sh.enableSharding("shrdb")
{ "ok" : 1 }
mongos> 
mongos> use admin
switched to db admin
mongos> 
mongos> db.runCommand( { shardCollection: "shrdb.shrcol", key : {user_id:1}})
{ "collectionsharded" : "shrdb.shrcol", "ok" : 1 }
mongos> 
mongos> sh.startBalancer()
mongos> 
mongos> sh.isBalancerRunning()
true
mongos> sh.getBalancerState()
true
mongos> 
mongos> use shrdb
switched to db shrdb
mongos> // dummy data loading
mongos> for(var i=1; i <= 1000 ; i++){db.shrcol.insert({ "user_id" : i, "I do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting pointI do have a spreadsheet calculator as a starting point" : i})}

mongos> 
mongos> 
mongos> sh.status()
--- Sharding Status --- 
 sharding version: {
 "_id" : 1,
 "version" : 3,
 "minCompatibleVersion" : 3,
 "currentVersion" : 4,
 "clusterId" : ObjectId("5440d75a2bf8868b348ac474")
}
 shards:
 { "_id" : "rs1", "host" : "rs1/hostname:27010,hostname:27011" }
 { "_id" : "rs2", "host" : "rs2/hostname:27020,hostname:27021" }
 databases:
 { "_id" : "admin", "partitioned" : false, "primary" : "config" }
 { "_id" : "shrdb", "partitioned" : true, "primary" : "rs1" }
 shrdb.shrcol
 shard key: { "user_id" : 1 }
 chunks:
 rs1 2
 rs2 1
 { "user_id" : { "$minKey" : 1 } } -->> { "user_id" : 1 } on : rs1 Timestamp(2, 1) 
 { "user_id" : 1 } -->> { "user_id" : 358 } on : rs1 Timestamp(1, 3) 
 { "user_id" : 358 } -->> { "user_id" : { "$maxKey" : 1 } } on : rs2 Timestamp(2, 0) 

mongos> 
mongos> 
mongos> 
bye
[Lab root @ hostname /opt/mongodb/bin]# ./mongo hostname:27010/admin
MongoDB shell version: 2.4.11
connecting to: hostname:27010/admin
rs1:PRIMARY> 
rs1:PRIMARY> use shrdb
switched to db shrdb
rs1:PRIMARY> 
rs1:PRIMARY> db.shrcol.count()
357
rs1:PRIMARY> 
rs1:PRIMARY> 
bye
[Lab root @ hostname /opt/mongodb/bin]# ./mongo hostname:27020/admin
MongoDB shell version: 2.4.11
connecting to: hostname:27020/admin
rs2:PRIMARY> 
rs2:PRIMARY> use shrdb
switched to db shrdb
rs2:PRIMARY> 
rs2:PRIMARY> db.shrcol.count()
643
rs2:PRIMARY> 
rs2:PRIMARY> 
bye
[Lab root @ hostname /opt/mongodb/bin]# 
[Lab root @ hostname /opt/mongodb/bin]# ./mongo hostname:27010/admin
MongoDB shell version: 2.4.11
connecting to: hostname:27010/admin
rs1:PRIMARY> 
bye
[Lab root @ hostname /opt/mongodb/bin]# 
[Lab root @ hostname /opt/mongodb/bin]# ./mongo hostname:10000/admin
MongoDB shell version: 2.4.11
connecting to: hostname:10000/admin
mongos> 
mongos> use shrdb
switched to db shrdb
mongos> db.shrcol.count()
1000
mongos> 
mongos> 

// To check whether the queries are going to server1/server2 i.e., shards

mongos> db.shrcol.find({"user_id":1}).explain()
{
 "clusteredType" : "ParallelSort",
 "shards" : {
 "rs1/hostname:27010,hostname:27011" : [
 {
 "cursor" : "BtreeCursor user_id_1",
 "isMultiKey" : false,
 "n" : 1,
 "nscannedObjects" : 1,
 "nscanned" : 1,
 "nscannedObjectsAllPlans" : 1,
 "nscannedAllPlans" : 1,
 "scanAndOrder" : false,
 "indexOnly" : false,
 "nYields" : 0,
 "nChunkSkips" : 0,
 "millis" : 0,
 "indexBounds" : {
 "user_id" : [
 [
 1,
 1
 ]
 ]
 },
 "server" : "hostname:27010"
 }
 ]
 },
 "cursor" : "BtreeCursor user_id_1",
 "n" : 1,
 "nChunkSkips" : 0,
 "nYields" : 0,
 "nscanned" : 1,
 "nscannedAllPlans" : 1,
 "nscannedObjects" : 1,
 "nscannedObjectsAllPlans" : 1,
 "millisShardTotal" : 0,
 "millisShardAvg" : 0,
 "numQueries" : 1,
 "numShards" : 1,
 "indexBounds" : {
 "user_id" : [
 [
 1,
 1
 ]
 ]
 },
 "millis" : 1
}
mongos> 
mongos> 
mongos> db.shrcol.find({"user_id":360}).explain()
{
 "clusteredType" : "ParallelSort",
 "shards" : {
 "rs2/hostname:27020,hostname:27021" : [
 {
 "cursor" : "BtreeCursor user_id_1",
 "isMultiKey" : false,
 "n" : 1,
 "nscannedObjects" : 1,
 "nscanned" : 1,
 "nscannedObjectsAllPlans" : 1,
 "nscannedAllPlans" : 1,
 "scanAndOrder" : false,
 "indexOnly" : false,
 "nYields" : 0,
 "nChunkSkips" : 0,
 "millis" : 0,
 "indexBounds" : {
 "user_id" : [
 [
 360,
 360
 ]
 ]
 },
 "server" : "hostname:27020"
 }
 ]
 },
 "cursor" : "BtreeCursor user_id_1",
 "n" : 1,
 "nChunkSkips" : 0,
 "nYields" : 0,
 "nscanned" : 1,
 "nscannedAllPlans" : 1,
 "nscannedObjects" : 1,
 "nscannedObjectsAllPlans" : 1,
 "millisShardTotal" : 0,
 "millisShardAvg" : 0,
 "numQueries" : 1,
 "numShards" : 1,
 "indexBounds" : {
 "user_id" : [
 [
 360,
 360
 ]
 ]
 },
 "millis" : 1
}
mongos>

//Chunks information from configdb

[Lab root @ hostname /opt/mongodb/bin]# ./mongo hostname:39001/admin
MongoDB shell version: 2.4.11
connecting to: hostname:39001/admin
configsvr> 
configsvr> use config
switched to db config
configsvr> 
configsvr> db.chunks.find({"ns" :"shrdb.shrcol"},{"min.user_id":1,"max.user_id":1,"_id":0}).limit(5)
{ "min" : { "user_id" : { "$minKey" : 1 } }, "max" : { "user_id" : 1 } }
{ "min" : { "user_id" : 1 }, "max" : { "user_id" : 358 } }
{ "min" : { "user_id" : 358 }, "max" : { "user_id" : { "$maxKey" : 1 } } }
configsvr> 
configsvr> 
configsvr> db.chunks.find({"ns" :"shrdb.shrcol"},{"max.user_id":1,"_id":0}).sort({"max.user_id":-1}).limit(1)
{ "max" : { "user_id" : { "$maxKey" : 1 } } }
configsvr> 
  • Ask Question