How to drain a MongoDB Shard

use dbversity
db.dbfry.ensureIndex( { user_id : 1 }, { unique : true })
sh.enableSharding(“dbversity”)
sh.shardCollection(“dbversity.dbfry”, {“user_id”:1})
sh.startBalancer()
sh.getBalancerState()
sh.isBalancerRunning()
use dbversity
for(var i = 1; i <= 100000 ; i++){db.dbfry.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}
mongos> for(var i = 1; i <= 100000 ; i++){db.dbfry.insert({“user_id” : i , “name” : “bulk-inserts”, ” Iteration: ” : i });}
WriteResult({ “nInserted” : 1 })
mongos>
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“568d598d58c8ebe6f4ccaf19”)
}
shards:
{ “_id” : “rs1”, “host” : “rs1/www.dbversity.com:27010,www.dbversity.com:27011” }
{ “_id” : “rs2”, “host” : “rs2/www.dbversity.com:27020,www.dbversity.com:27021” }
{ “_id” : “rs3”, “host” : “rs3/www.dbversity.com:27030,www.dbversity.com:27031” }
active mongoses:
“3.2.0” : 1
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
2 : Success
1 : Failed with error ‘aborted’, from rs1 to rs2
databases:
{ “_id” : “dbversity”, “primary” : “rs1”, “partitioned” : true }
dbversity.dbfry
shard key: { “user_id” : 1 }
unique: false
balancing: true
chunks:
rs1 1
rs2 1
rs3 1
{ “user_id” : { “$minKey” : 1 } } –>> { “user_id” : 2 } on : rs2 Timestamp(2, 0)
{ “user_id” : 2 } –>> { “user_id” : 14 } on : rs3 Timestamp(3, 0)
{ “user_id” : 14 } –>> { “user_id” : { “$maxKey” : 1 } } on : rs1 Timestamp(3, 1)

mongos>
mongos>
mongos>
mongos> db.dbfry.getShardDistribution()

Shard rs1 at rs1/www.dbversity.com:27010,www.dbversity.com:27011
data : 8MiB docs : 99987 chunks : 1
estimated data per chunk : 8MiB
estimated docs per chunk : 99987

Shard rs2 at rs2/www.dbversity.com:27020,www.dbversity.com:27021
data : 84B docs : 1 chunks : 1
estimated data per chunk : 84B
estimated docs per chunk : 1

Shard rs3 at rs3/www.dbversity.com:27030,www.dbversity.com:27031
data : 1008B docs : 12 chunks : 1
estimated data per chunk : 1008B
estimated docs per chunk : 12

Totals
data : 8.01MiB docs : 100000 chunks : 3
Shard rs1 contains 99.98% data, 99.98% docs in cluster, avg obj size on shard : 84B
Shard rs2 contains 0% data, 0% docs in cluster, avg obj size on shard : 84B
Shard rs3 contains 0.01% data, 0.01% docs in cluster, avg obj size on shard : 84B
mongos>
mongos>
mongos> use admin
switched to db admin
mongos>
mongos> db.runCommand( { removeShard : “rs3” } )
{
“msg” : “draining started successfully”,
“state” : “started”,
“shard” : “rs3”,
“note” : “you need to drop or movePrimary these databases”,
“dbsToMove” : [ ],
“ok” : 1
}
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“568d598d58c8ebe6f4ccaf19”)
}
shards:
{ “_id” : “rs1”, “host” : “rs1/www.dbversity.com:27010,www.dbversity.com:27011” }
{ “_id” : “rs2”, “host” : “rs2/www.dbversity.com:27020,www.dbversity.com:27021” }
{ “_id” : “rs3”, “host” : “rs3/www.dbversity.com:27030,www.dbversity.com:27031”, “draining” : true }
active mongoses:
“3.2.0” : 1
balancer:
Currently enabled: yes
Currently running: yes
Balancer lock taken at Thu Jan 07 2016 01:04:03 GMT+0530 (IST) by www.dbversity.com:10000:1452104071:1804289383:Balancer:1681692777
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
2 : Success
1 : Failed with error ‘aborted’, from rs1 to rs2
databases:
{ “_id” : “dbversity”, “primary” : “rs1”, “partitioned” : true }
dbversity.dbfry
shard key: { “user_id” : 1 }
unique: false
balancing: true
chunks:
rs1 1
rs2 1
rs3 1
{ “user_id” : { “$minKey” : 1 } } –>> { “user_id” : 2 } on : rs2 Timestamp(2, 0)
{ “user_id” : 2 } –>> { “user_id” : 14 } on : rs3 Timestamp(3, 0)
{ “user_id” : 14 } –>> { “user_id” : { “$maxKey” : 1 } } on : rs1 Timestamp(3, 1)

mongos>
mongos> use dbversity
switched to db dbversity
mongos>
mongos> db.dbfry.getShardDistribution()

Shard rs1 at rs1/www.dbversity.com:27010,www.dbversity.com:27011
data : 8.01MiB docs : 99999 chunks : 2
estimated data per chunk : 4MiB
estimated docs per chunk : 49999

Shard rs2 at rs2/www.dbversity.com:27020,www.dbversity.com:27021
data : 84B docs : 1 chunks : 1
estimated data per chunk : 84B
estimated docs per chunk : 1

Shard rs3 at rs3/www.dbversity.com:27030,www.dbversity.com:27031
data : 0B docs : 0 chunks : 0
estimated data per chunk : NaNGiB
estimated docs per chunk : NaN

Totals
data : 8.01MiB docs : 100000 chunks : 3
Shard rs1 contains 99.99% data, 99.99% docs in cluster, avg obj size on shard : 84B
Shard rs2 contains 0% data, 0% docs in cluster, avg obj size on shard : 84B
Shard rs3 contains 0% data, 0% docs in cluster, avg obj size on shard : NaNGiB
mongos>
mongos> use admin
switched to db admin
mongos> db.runCommand( { removeShard : “rs3” } )
{
“msg” : “removeshard completed successfully”,
“state” : “completed”,
“shard” : “rs3”,
“ok” : 1
}
mongos>
mongos>
mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“568d598d58c8ebe6f4ccaf19”)
}
shards:
{ “_id” : “rs1”, “host” : “rs1/www.dbversity.com:27010,www.dbversity.com:27011” }
{ “_id” : “rs2”, “host” : “rs2/www.dbversity.com:27020,www.dbversity.com:27021” }
active mongoses:
“3.2.0” : 1
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
3 : Success
1 : Failed with error ‘aborted’, from rs1 to rs2
databases:
{ “_id” : “dbversity”, “primary” : “rs1”, “partitioned” : true }
dbversity.dbfry
shard key: { “user_id” : 1 }
unique: false
balancing: true
chunks:
rs1 2
rs2 1
{ “user_id” : { “$minKey” : 1 } } –>> { “user_id” : 2 } on : rs2 Timestamp(2, 0)
{ “user_id” : 2 } –>> { “user_id” : 14 } on : rs1 Timestamp(4, 0)
{ “user_id” : 14 } –>> { “user_id” : { “$maxKey” : 1 } } on : rs1 Timestamp(3, 1)

mongos>
mongos> use dbversity
switched to db dbversity
mongos> db.dbfry.getShardDistribution()

Shard rs1 at rs1/www.dbversity.com:27010,www.dbversity.com:27011
data : 8.01MiB docs : 99999 chunks : 2
estimated data per chunk : 4MiB
estimated docs per chunk : 49999

Shard rs2 at rs2/www.dbversity.com:27020,www.dbversity.com:27021
data : 84B docs : 1 chunks : 1
estimated data per chunk : 84B
estimated docs per chunk : 1

Totals
data : 8.01MiB docs : 100000 chunks : 3
Shard rs1 contains 99.99% data, 99.99% docs in cluster, avg obj size on shard : 84B
Shard rs2 contains 0% data, 0% docs in cluster, avg obj size on shard : 84B
mongos>
mongos>
mongos>
mongos>
bye
#
#ll -lhtr ../../data/shard3_1
total 484K
drwxr-xr-x 16 root root 4.0K Jan 6 23:41 ../
-rw-r–r– 1 root root 21 Jan 6 23:41 WiredTiger.lock
-rw-r–r– 1 root root 49 Jan 6 23:41 WiredTiger
-rw-r–r– 1 root root 4.0K Jan 6 23:41 WiredTigerLAS.wt
-rw-r–r– 1 root root 5 Jan 6 23:41 mongod.lock
-rw-r–r– 1 root root 95 Jan 6 23:41 storage.bson
-rw-r–r– 1 root root 16K Jan 6 23:42 index-3-7108159943712396705.wt
-rw-r–r– 1 root root 16K Jan 6 23:42 index-1-7108159943712396705.wt
-rw-r–r– 1 root root 16K Jan 6 23:42 collection-2-7108159943712396705.wt
-rw-r–r– 1 root root 16K Jan 6 23:42 collection-0-7108159943712396705.wt
-rw-r–r– 1 root root 16K Jan 6 23:48 index-8-7108159943712396705.wt
-rw-r–r– 1 root root 16K Jan 6 23:48 index-6-7108159943712396705.wt
-rw-r–r– 1 root root 16K Jan 6 23:48 collection-7-7108159943712396705.wt
-rw-r–r– 1 root root 32K Jan 6 23:50 collection-5-7108159943712396705.wt
-rw-r–r– 1 root root 36K Jan 7 00:36 _mdb_catalog.wt
-rw-r–r– 1 root root 36K Jan 7 01:05 sizeStorer.wt
-rw-r–r– 1 root root 32K Jan 7 01:05 index-11-7108159943712396705.wt
-rw-r–r– 1 root root 32K Jan 7 01:05 index-10-7108159943712396705.wt
-rw-r–r– 1 root root 32K Jan 7 01:05 collection-9-7108159943712396705.wt
-rw-r–r– 1 root root 36K Jan 7 01:05 collection-4-7108159943712396705.wt
-rw-r–r– 1 root root 68K Jan 7 01:05 WiredTiger.wt
-rw-r–r– 1 root root 948 Jan 7 01:05 WiredTiger.turtle
drwxr-xr-x 3 root root 4.0K Jan 7 01:05 ./
drwxr-xr-x 2 root root 4.0K Jan 7 01:06 diagnostic.data/
#
#ll -lhtr ../../data/shard2_1
total 424K
drwxr-xr-x 16 root root 4.0K Jan 6 23:41 ../
-rw-r–r– 1 root root 21 Jan 6 23:41 WiredTiger.lock
-rw-r–r– 1 root root 49 Jan 6 23:41 WiredTiger
-rw-r–r– 1 root root 4.0K Jan 6 23:41 WiredTigerLAS.wt
-rw-r–r– 1 root root 5 Jan 6 23:41 mongod.lock
-rw-r–r– 1 root root 95 Jan 6 23:41 storage.bson
-rw-r–r– 1 root root 16K Jan 6 23:42 index-3-3445201314898272672.wt
-rw-r–r– 1 root root 16K Jan 6 23:42 index-1-3445201314898272672.wt
-rw-r–r– 1 root root 16K Jan 6 23:42 collection-2-3445201314898272672.wt
-rw-r–r– 1 root root 16K Jan 6 23:42 collection-0-3445201314898272672.wt
-rw-r–r– 1 root root 16K Jan 6 23:48 index-8-3445201314898272672.wt
-rw-r–r– 1 root root 16K Jan 6 23:48 index-6-3445201314898272672.wt
-rw-r–r– 1 root root 16K Jan 6 23:48 collection-7-3445201314898272672.wt
-rw-r–r– 1 root root 32K Jan 6 23:50 collection-5-3445201314898272672.wt
-rw-r–r– 1 root root 16K Jan 7 00:36 index-11-3445201314898272672.wt
-rw-r–r– 1 root root 36K Jan 7 00:36 sizeStorer.wt
-rw-r–r– 1 root root 36K Jan 7 00:36 _mdb_catalog.wt
-rw-r–r– 1 root root 16K Jan 7 00:36 index-10-3445201314898272672.wt
-rw-r–r– 1 root root 16K Jan 7 00:36 collection-9-3445201314898272672.wt
-rw-r–r– 1 root root 36K Jan 7 00:36 collection-4-3445201314898272672.wt
-rw-r–r– 1 root root 68K Jan 7 00:36 WiredTiger.wt
-rw-r–r– 1 root root 948 Jan 7 00:36 WiredTiger.turtle
drwxr-xr-x 3 root root 4.0K Jan 7 00:36 ./
drwxr-xr-x 2 root root 4.0K Jan 7 01:06 diagnostic.data/
#

  • Ask Question