trying to merge chunks to trigger better balancing after 50% of the data was deleted by the developers
Trying to merge chunks using the following command:
db.adminCommand
( {
mergeChunks: "HTMLDumps.HTMLRepository",
bounds: [ { "ShardMapId" : 2, "DomainId" : 62 },
{ "ShardMapId" : 2, "DomainId" : 162 } ]
} )
getting the following error when trying to run the above command to try to merge any of the available consecutive chunks available on a shard:
{
"ok" : 0,
"errmsg" : "Failed to commit chunk merge :: caused by ::
DuplicateKey: chunk operation commit failed: version
32|6||5ba8d864bba4ff264edf0bd9 doesn't exist in
namespace: HTMLDumps.HTMLRepository. Unable to save
chunk ops. Command: { applyOps: [ { op: "u", b: false,
ns: "config.chunks", o: { _id: "HTM
Dumps.HTMLRepository-ShardMapId_2.0DomainId_62.0", ns:
"HTMLDumps.HTMLRepository", min: { ShardMapId: 2.0,
DomainId: 62.0 }, max: { ShardMapId: 2, DomainId: 162 },
shard: "shard0000", lastmod: Timestamp(32, 6),
lastmodEpoch: ObjectId('5ba8d864bba4ff264edf0bd9') },
o2: { _id: "HTMLDumps.HTMLRepository-
ShardMapId_2.0DomainId_62.0" } }, { op: "d", ns:
"config.chunks", o: { _id: "HTMLDumps.HTMLRepository-
ShardMapId_2DomainId_109" } } ], preCondition: [ { ns:
"config.chunks", q: { query: { ns:
"HTMLDumps.HTMLRepository", min: { ShardMapId: 2.0,
DomainId: 62.0 }, max: { ShardMapId: 2, DomainId: 109 }
}, orderby: { lastmod: -1 } }, res: { lastmodEpoch:
ObjectId('5ba8d864bba4ff264edf0bd9'), shard:
"shard0000" } }, { ns: "config.chunks", q: { query:
{ ns: "HTMLDumps.HTMLRepository", min: { ShardMapId:
2, DomainId: 109 }, max: { ShardMapId: 2, DomainId: 162
} }, orderby: { lastmod: -1 } }, res: { lastmodEpoch:
ObjectId('5ba8d864bba4ff264edf0bd9'), shard:
"shard0000" } } ], writeConcern: { w: 0, wtimeout: 0 }
}. Result: { applied: 1, code: 11000, codeName:
"DuplicateKey", errmsg: "E11000 duplicate key error
collection: config.chunks index: ns_1_min_1 dup key: { :
"HTMLDumps.HTMLRepository", : { ShardMapId: 2.0,
DomainId: 62.0 } }", results: [ false ], ok: 0.0,
operationTime: Timestamp(1554112692, 1), $gleStats: {
lastOpTime: { ts: Timestamp(1554112692, 1), t: 13 },
electionId: ObjectId('7fffffff000000000000000d') },
$clusterTime: { clusterTime: Timestamp(1554112692, 1),
signature: { hash: BinData(0,
0000000000000000000000000000000000000000), keyId: 0 } }
} :: caused by :: E11000 duplicate key error collection:
config.chunks index: ns_1_min_1 dup key: { :
"HTMLDumps.HTMLRepository", : { ShardMapId: 2.0,
DomainId: 62.0 } }",
"code" : 11000,
"codeName" : "DuplicateKey",
"operationTime" : Timestamp(1554112687, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1554112687, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
This is happening regardless of which chunks i select. Main reason for me to try to do this is to achieve true data balancing and not just chunk numbers. Recently developers deleted 90% of the data from these chunks that caused the distribution to get to 90% 10% state from 60/40 earlier. I hope to merge/remove empty chunks to ensure balancing of the data to reach as close to 60/40 as possible.
mongodb
New contributor
add a comment |
Trying to merge chunks using the following command:
db.adminCommand
( {
mergeChunks: "HTMLDumps.HTMLRepository",
bounds: [ { "ShardMapId" : 2, "DomainId" : 62 },
{ "ShardMapId" : 2, "DomainId" : 162 } ]
} )
getting the following error when trying to run the above command to try to merge any of the available consecutive chunks available on a shard:
{
"ok" : 0,
"errmsg" : "Failed to commit chunk merge :: caused by ::
DuplicateKey: chunk operation commit failed: version
32|6||5ba8d864bba4ff264edf0bd9 doesn't exist in
namespace: HTMLDumps.HTMLRepository. Unable to save
chunk ops. Command: { applyOps: [ { op: "u", b: false,
ns: "config.chunks", o: { _id: "HTM
Dumps.HTMLRepository-ShardMapId_2.0DomainId_62.0", ns:
"HTMLDumps.HTMLRepository", min: { ShardMapId: 2.0,
DomainId: 62.0 }, max: { ShardMapId: 2, DomainId: 162 },
shard: "shard0000", lastmod: Timestamp(32, 6),
lastmodEpoch: ObjectId('5ba8d864bba4ff264edf0bd9') },
o2: { _id: "HTMLDumps.HTMLRepository-
ShardMapId_2.0DomainId_62.0" } }, { op: "d", ns:
"config.chunks", o: { _id: "HTMLDumps.HTMLRepository-
ShardMapId_2DomainId_109" } } ], preCondition: [ { ns:
"config.chunks", q: { query: { ns:
"HTMLDumps.HTMLRepository", min: { ShardMapId: 2.0,
DomainId: 62.0 }, max: { ShardMapId: 2, DomainId: 109 }
}, orderby: { lastmod: -1 } }, res: { lastmodEpoch:
ObjectId('5ba8d864bba4ff264edf0bd9'), shard:
"shard0000" } }, { ns: "config.chunks", q: { query:
{ ns: "HTMLDumps.HTMLRepository", min: { ShardMapId:
2, DomainId: 109 }, max: { ShardMapId: 2, DomainId: 162
} }, orderby: { lastmod: -1 } }, res: { lastmodEpoch:
ObjectId('5ba8d864bba4ff264edf0bd9'), shard:
"shard0000" } } ], writeConcern: { w: 0, wtimeout: 0 }
}. Result: { applied: 1, code: 11000, codeName:
"DuplicateKey", errmsg: "E11000 duplicate key error
collection: config.chunks index: ns_1_min_1 dup key: { :
"HTMLDumps.HTMLRepository", : { ShardMapId: 2.0,
DomainId: 62.0 } }", results: [ false ], ok: 0.0,
operationTime: Timestamp(1554112692, 1), $gleStats: {
lastOpTime: { ts: Timestamp(1554112692, 1), t: 13 },
electionId: ObjectId('7fffffff000000000000000d') },
$clusterTime: { clusterTime: Timestamp(1554112692, 1),
signature: { hash: BinData(0,
0000000000000000000000000000000000000000), keyId: 0 } }
} :: caused by :: E11000 duplicate key error collection:
config.chunks index: ns_1_min_1 dup key: { :
"HTMLDumps.HTMLRepository", : { ShardMapId: 2.0,
DomainId: 62.0 } }",
"code" : 11000,
"codeName" : "DuplicateKey",
"operationTime" : Timestamp(1554112687, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1554112687, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
This is happening regardless of which chunks i select. Main reason for me to try to do this is to achieve true data balancing and not just chunk numbers. Recently developers deleted 90% of the data from these chunks that caused the distribution to get to 90% 10% state from 60/40 earlier. I hope to merge/remove empty chunks to ensure balancing of the data to reach as close to 60/40 as possible.
mongodb
New contributor
Please provide sh.status() result.
– Mani
1 min ago
add a comment |
Trying to merge chunks using the following command:
db.adminCommand
( {
mergeChunks: "HTMLDumps.HTMLRepository",
bounds: [ { "ShardMapId" : 2, "DomainId" : 62 },
{ "ShardMapId" : 2, "DomainId" : 162 } ]
} )
getting the following error when trying to run the above command to try to merge any of the available consecutive chunks available on a shard:
{
"ok" : 0,
"errmsg" : "Failed to commit chunk merge :: caused by ::
DuplicateKey: chunk operation commit failed: version
32|6||5ba8d864bba4ff264edf0bd9 doesn't exist in
namespace: HTMLDumps.HTMLRepository. Unable to save
chunk ops. Command: { applyOps: [ { op: "u", b: false,
ns: "config.chunks", o: { _id: "HTM
Dumps.HTMLRepository-ShardMapId_2.0DomainId_62.0", ns:
"HTMLDumps.HTMLRepository", min: { ShardMapId: 2.0,
DomainId: 62.0 }, max: { ShardMapId: 2, DomainId: 162 },
shard: "shard0000", lastmod: Timestamp(32, 6),
lastmodEpoch: ObjectId('5ba8d864bba4ff264edf0bd9') },
o2: { _id: "HTMLDumps.HTMLRepository-
ShardMapId_2.0DomainId_62.0" } }, { op: "d", ns:
"config.chunks", o: { _id: "HTMLDumps.HTMLRepository-
ShardMapId_2DomainId_109" } } ], preCondition: [ { ns:
"config.chunks", q: { query: { ns:
"HTMLDumps.HTMLRepository", min: { ShardMapId: 2.0,
DomainId: 62.0 }, max: { ShardMapId: 2, DomainId: 109 }
}, orderby: { lastmod: -1 } }, res: { lastmodEpoch:
ObjectId('5ba8d864bba4ff264edf0bd9'), shard:
"shard0000" } }, { ns: "config.chunks", q: { query:
{ ns: "HTMLDumps.HTMLRepository", min: { ShardMapId:
2, DomainId: 109 }, max: { ShardMapId: 2, DomainId: 162
} }, orderby: { lastmod: -1 } }, res: { lastmodEpoch:
ObjectId('5ba8d864bba4ff264edf0bd9'), shard:
"shard0000" } } ], writeConcern: { w: 0, wtimeout: 0 }
}. Result: { applied: 1, code: 11000, codeName:
"DuplicateKey", errmsg: "E11000 duplicate key error
collection: config.chunks index: ns_1_min_1 dup key: { :
"HTMLDumps.HTMLRepository", : { ShardMapId: 2.0,
DomainId: 62.0 } }", results: [ false ], ok: 0.0,
operationTime: Timestamp(1554112692, 1), $gleStats: {
lastOpTime: { ts: Timestamp(1554112692, 1), t: 13 },
electionId: ObjectId('7fffffff000000000000000d') },
$clusterTime: { clusterTime: Timestamp(1554112692, 1),
signature: { hash: BinData(0,
0000000000000000000000000000000000000000), keyId: 0 } }
} :: caused by :: E11000 duplicate key error collection:
config.chunks index: ns_1_min_1 dup key: { :
"HTMLDumps.HTMLRepository", : { ShardMapId: 2.0,
DomainId: 62.0 } }",
"code" : 11000,
"codeName" : "DuplicateKey",
"operationTime" : Timestamp(1554112687, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1554112687, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
This is happening regardless of which chunks i select. Main reason for me to try to do this is to achieve true data balancing and not just chunk numbers. Recently developers deleted 90% of the data from these chunks that caused the distribution to get to 90% 10% state from 60/40 earlier. I hope to merge/remove empty chunks to ensure balancing of the data to reach as close to 60/40 as possible.
mongodb
New contributor
Trying to merge chunks using the following command:
db.adminCommand
( {
mergeChunks: "HTMLDumps.HTMLRepository",
bounds: [ { "ShardMapId" : 2, "DomainId" : 62 },
{ "ShardMapId" : 2, "DomainId" : 162 } ]
} )
getting the following error when trying to run the above command to try to merge any of the available consecutive chunks available on a shard:
{
"ok" : 0,
"errmsg" : "Failed to commit chunk merge :: caused by ::
DuplicateKey: chunk operation commit failed: version
32|6||5ba8d864bba4ff264edf0bd9 doesn't exist in
namespace: HTMLDumps.HTMLRepository. Unable to save
chunk ops. Command: { applyOps: [ { op: "u", b: false,
ns: "config.chunks", o: { _id: "HTM
Dumps.HTMLRepository-ShardMapId_2.0DomainId_62.0", ns:
"HTMLDumps.HTMLRepository", min: { ShardMapId: 2.0,
DomainId: 62.0 }, max: { ShardMapId: 2, DomainId: 162 },
shard: "shard0000", lastmod: Timestamp(32, 6),
lastmodEpoch: ObjectId('5ba8d864bba4ff264edf0bd9') },
o2: { _id: "HTMLDumps.HTMLRepository-
ShardMapId_2.0DomainId_62.0" } }, { op: "d", ns:
"config.chunks", o: { _id: "HTMLDumps.HTMLRepository-
ShardMapId_2DomainId_109" } } ], preCondition: [ { ns:
"config.chunks", q: { query: { ns:
"HTMLDumps.HTMLRepository", min: { ShardMapId: 2.0,
DomainId: 62.0 }, max: { ShardMapId: 2, DomainId: 109 }
}, orderby: { lastmod: -1 } }, res: { lastmodEpoch:
ObjectId('5ba8d864bba4ff264edf0bd9'), shard:
"shard0000" } }, { ns: "config.chunks", q: { query:
{ ns: "HTMLDumps.HTMLRepository", min: { ShardMapId:
2, DomainId: 109 }, max: { ShardMapId: 2, DomainId: 162
} }, orderby: { lastmod: -1 } }, res: { lastmodEpoch:
ObjectId('5ba8d864bba4ff264edf0bd9'), shard:
"shard0000" } } ], writeConcern: { w: 0, wtimeout: 0 }
}. Result: { applied: 1, code: 11000, codeName:
"DuplicateKey", errmsg: "E11000 duplicate key error
collection: config.chunks index: ns_1_min_1 dup key: { :
"HTMLDumps.HTMLRepository", : { ShardMapId: 2.0,
DomainId: 62.0 } }", results: [ false ], ok: 0.0,
operationTime: Timestamp(1554112692, 1), $gleStats: {
lastOpTime: { ts: Timestamp(1554112692, 1), t: 13 },
electionId: ObjectId('7fffffff000000000000000d') },
$clusterTime: { clusterTime: Timestamp(1554112692, 1),
signature: { hash: BinData(0,
0000000000000000000000000000000000000000), keyId: 0 } }
} :: caused by :: E11000 duplicate key error collection:
config.chunks index: ns_1_min_1 dup key: { :
"HTMLDumps.HTMLRepository", : { ShardMapId: 2.0,
DomainId: 62.0 } }",
"code" : 11000,
"codeName" : "DuplicateKey",
"operationTime" : Timestamp(1554112687, 1),
"$clusterTime" : {
"clusterTime" : Timestamp(1554112687, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
This is happening regardless of which chunks i select. Main reason for me to try to do this is to achieve true data balancing and not just chunk numbers. Recently developers deleted 90% of the data from these chunks that caused the distribution to get to 90% 10% state from 60/40 earlier. I hope to merge/remove empty chunks to ensure balancing of the data to reach as close to 60/40 as possible.
mongodb
mongodb
New contributor
New contributor
New contributor
asked 6 mins ago
Amardeep SinghAmardeep Singh
31
31
New contributor
New contributor
Please provide sh.status() result.
– Mani
1 min ago
add a comment |
Please provide sh.status() result.
– Mani
1 min ago
Please provide sh.status() result.
– Mani
1 min ago
Please provide sh.status() result.
– Mani
1 min ago
add a comment |
0
active
oldest
votes
StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "182"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);
StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});
function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: false,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: null,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});
}
});
Amardeep Singh is a new contributor. Be nice, and check out our Code of Conduct.
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fdba.stackexchange.com%2fquestions%2f233715%2ftrying-to-merge-chunks-to-trigger-better-balancing-after-50-of-the-data-was-del%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
0
active
oldest
votes
0
active
oldest
votes
active
oldest
votes
active
oldest
votes
Amardeep Singh is a new contributor. Be nice, and check out our Code of Conduct.
Amardeep Singh is a new contributor. Be nice, and check out our Code of Conduct.
Amardeep Singh is a new contributor. Be nice, and check out our Code of Conduct.
Amardeep Singh is a new contributor. Be nice, and check out our Code of Conduct.
Thanks for contributing an answer to Database Administrators Stack Exchange!
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
To learn more, see our tips on writing great answers.
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fdba.stackexchange.com%2fquestions%2f233715%2ftrying-to-merge-chunks-to-trigger-better-balancing-after-50-of-the-data-was-del%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Please provide sh.status() result.
– Mani
1 min ago