summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRomans Kasperovics <romans.kasperovics@mongodb.com>2022-05-05 15:20:20 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-05 15:59:23 +0000
commita70e930841581a660fb9be8a1f28f4ef4e58bb91 (patch)
tree4c21b2f580e5b8f5e7c756df9da1ee57decb27c8
parent0d025d08cf2792a19b48678f5c146268a80c193f (diff)
downloadmongo-a70e930841581a660fb9be8a1f28f4ef4e58bb91.tar.gz
SERVER-65460 Set allowDiskUse to false in read-only mode
-rw-r--r--jstests/noPassthrough/read_only_allow_disk_use.js157
-rw-r--r--jstests/noPassthrough/views_count_distinct_disk_use.js6
-rw-r--r--jstests/readonly/aggregate.js13
-rw-r--r--src/mongo/db/commands/find_cmd.cpp4
-rw-r--r--src/mongo/db/commands/map_reduce_agg.cpp4
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp4
-rw-r--r--src/mongo/db/pipeline/aggregation_request_helper.cpp6
7 files changed, 171 insertions, 23 deletions
diff --git a/jstests/noPassthrough/read_only_allow_disk_use.js b/jstests/noPassthrough/read_only_allow_disk_use.js
new file mode 100644
index 00000000000..f3be69cbd4b
--- /dev/null
+++ b/jstests/noPassthrough/read_only_allow_disk_use.js
@@ -0,0 +1,157 @@
+/**
+ * Test that spilling to disk is prohibited in queryable backup (read-only) mode and when the
+ * parameter 'recoverFromOplogAsStandalone' is set to 'true'.
+ *
+ * The 'requires_persistence' tag exludes the test from running with 'inMemory' and
+ * 'ephemeralForTest' storage engines that do not support queryable backup (read-only) mode.
+ * @tags: [
+ * requires_persistence,
+ * requires_replication
+ * ]
+ */
+
+(function() {
+"use strict";
+
+const memoryLimitMb = 1;
+const memoryLimitBytes = 1 * 1024 * 1024;
+const largeStr = "A".repeat(1024 * 1024); // 1MB string
+
+function prepareData(conn) {
+ const testDb = conn.getDB(jsTestName());
+ testDb.largeColl.drop();
+ // Create a collection exceeding the memory limit.
+ for (let i = 0; i < memoryLimitMb + 1; ++i)
+ assert.commandWorked(testDb.largeColl.insert({x: i, largeStr: largeStr + i}));
+ // Create a view on a large collection containg a sort operation.
+ testDb.largeView.drop();
+ assert.commandWorked(testDb.createView("largeView", "largeColl", [{$sort: {x: -1}}]));
+}
+
+function runTest(conn, allowDiskUseByDefault) {
+ const testDb = conn.getDB(jsTestName());
+ const coll = testDb.largeColl;
+ const view = testDb.largeView;
+
+ function assertFailed(cmdObject) {
+ assert.commandFailedWithCode(testDb.runCommand(cmdObject),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
+ }
+
+ assert.commandWorked(
+ testDb.adminCommand({setParameter: 1, allowDiskUseByDefault: allowDiskUseByDefault}));
+
+ // The 'aggregate' pipelines running within the memory limit must pass.
+ assert.eq(1, coll.aggregate([{$match: {x: 1}}]).itcount());
+ assert.eq(1,
+ coll.aggregate([{$match: {x: 1}}], {allowDiskUse: !allowDiskUseByDefault}).itcount());
+
+ // The 'aggregate' grouping exceeding the memory limit must fail.
+ assertFailed({
+ aggregate: coll.getName(),
+ pipeline: [{$group: {_id: '$largeStr', minId: {$min: '$_id'}}}],
+ cursor: {}
+ });
+ assertFailed({
+ aggregate: coll.getName(),
+ pipeline: [{$group: {_id: '$largeStr', minId: {$min: '$_id'}}}],
+ cursor: {},
+ allowDiskUse: !allowDiskUseByDefault
+ });
+
+ // The 'aggregate' sort exceeding the memory limit must fail.
+ assertFailed({aggregate: coll.getName(), pipeline: [{$sort: {x: -1}}], cursor: {}});
+ assertFailed({
+ aggregate: coll.getName(),
+ pipeline: [{$sort: {x: -1}}],
+ cursor: {},
+ allowDiskUse: !allowDiskUseByDefault
+ });
+
+ // The 'find' queries within the memory limit must pass.
+ assert.eq(1, coll.find({x: 1}).itcount());
+ assert.eq(1, coll.find({x: 1}).allowDiskUse(!allowDiskUseByDefault).itcount());
+
+ // The 'find' and sort queries exceeding the memory limit must fail.
+ assertFailed({find: coll.getName(), sort: {x: -1}});
+ assertFailed({find: coll.getName(), sort: {x: -1}, allowDiskUse: !allowDiskUseByDefault});
+
+ // The 'mapReduce' command running within the memory limit must pass.
+ assert.eq(coll.mapReduce(
+ function() {
+ emit("x", 1);
+ },
+ function(k, v) {
+ return Array.sum(v);
+ },
+ {out: {inline: 1}})
+ .results[0]
+ .value,
+ memoryLimitMb + 1);
+
+ // The 'mapReduce' command exceeding the memory limit must fail.
+ assertFailed({
+ mapReduce: coll.getName(),
+ map: function() {
+ emit("x", this.largeStr);
+ },
+ reduce: function(k, v) {
+ return 42;
+ },
+ out: {inline: 1}
+ });
+
+ // The 'count' command within the memory limit must pass.
+ assert.eq(coll.count(), memoryLimitMb + 1);
+
+ // The 'count' command exceeding the memory limit must fail.
+ assertFailed({count: view.getName()});
+
+ // The 'distinct' command within the memory limit must pass.
+ assert.eq(coll.distinct("x"), [0, 1]);
+
+ // The 'distinct' command exceeding the memory limit must fail.
+ assertFailed({distinct: view.getName(), key: "largeStr"});
+}
+
+// Create a replica set with just one node and add some data.
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
+prepareData(primary);
+rst.stopSet(/*signal=*/null, /*forRestart=*/true);
+
+const tightMemoryLimits = {
+ internalDocumentSourceGroupMaxMemoryBytes: memoryLimitBytes,
+ internalQueryMaxBlockingSortMemoryUsageBytes: memoryLimitBytes,
+ internalQuerySlotBasedExecutionHashAggApproxMemoryUseInBytesBeforeSpill: memoryLimitBytes
+};
+
+// Start the mongod in a queryable backup mode with the existing dbpath.
+const connQueryableBackup = MongoRunner.runMongod({
+ dbpath: primary.dbpath,
+ noCleanData: true,
+ queryableBackupMode: "",
+ setParameter: tightMemoryLimits
+});
+assert.neq(null, connQueryableBackup, "mongod was unable to start up");
+
+runTest(connQueryableBackup, true);
+runTest(connQueryableBackup, false);
+
+MongoRunner.stopMongod(connQueryableBackup);
+
+// Recover the mongod from oplog as a standalone with the existing dbpath.
+const connRecoverStandalone = MongoRunner.runMongod({
+ dbpath: primary.dbpath,
+ noCleanData: true,
+ setParameter: Object.assign(tightMemoryLimits, {recoverFromOplogAsStandalone: true})
+});
+assert.neq(null, connRecoverStandalone, "mongod was unable to start up");
+
+runTest(connRecoverStandalone, true);
+runTest(connRecoverStandalone, false);
+
+MongoRunner.stopMongod(connRecoverStandalone);
+})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/views_count_distinct_disk_use.js b/jstests/noPassthrough/views_count_distinct_disk_use.js
index a8c5167c0b2..553509bf983 100644
--- a/jstests/noPassthrough/views_count_distinct_disk_use.js
+++ b/jstests/noPassthrough/views_count_distinct_disk_use.js
@@ -35,10 +35,8 @@ assert.commandWorked(viewsDB.adminCommand(
{setParameter: 1, internalQueryMaxBlockingSortMemoryUsageBytes: memoryLimitMb * 1024 * 1024}));
testDiskUse({count: "largeView"});
-// The 'distinct' command involves '$groupBy' stage. This stage needs to spill to disk if the memory
-// limit is reached.
-assert.commandWorked(viewsDB.adminCommand(
- {setParameter: 1, internalDocumentSourceGroupMaxMemoryBytes: memoryLimitMb * 1024 * 1024}));
+// The 'distinct' command executes the view definition pipeline containing the '$sort' stage. This
+// stage needs to spill to disk if the memory limit is reached.
testDiskUse({distinct: "largeView", key: "largeStr"});
MongoRunner.stopMongod(conn);
diff --git a/jstests/readonly/aggregate.js b/jstests/readonly/aggregate.js
index a79e7ed19d2..21fc050b4f8 100644
--- a/jstests/readonly/aggregate.js
+++ b/jstests/readonly/aggregate.js
@@ -75,19 +75,6 @@ runReadOnlyTest(function() {
assert.docEq(readableCollection.aggregate(mostAwardsPipeline).toArray(),
[{_id: "Spotlight", count: 3}, {_id: "The Revenant", count: 3}]);
-
- // Check that pipelines fail with allowDiskUse true. We use runCommand manually because
- // the helper has conflicting error handling logic.
- var allowDiskUseCmd = {
- aggregate: readableCollection.getName(),
- pipeline: [],
- cursor: {},
- allowDiskUse: true
- };
-
- assert.commandFailedWithCode(readableCollection.runCommand(allowDiskUseCmd),
- ErrorCodes.IllegalOperation,
- "'allowDiskUse' is not allowed in read-only mode");
}
};
}());
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 86e5d06702e..f7ac781404f 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -171,6 +171,10 @@ boost::intrusive_ptr<ExpressionContext> makeExpressionContext(
findCommand.getLet(), // let
CurOp::get(opCtx)->dbProfileLevel() > 0 // mayDbProfile
);
+ if (storageGlobalParams.readOnly) {
+ // Disallow disk use if in read-only mode.
+ expCtx->allowDiskUse = false;
+ }
expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
expCtx->startExpressionCounters();
diff --git a/src/mongo/db/commands/map_reduce_agg.cpp b/src/mongo/db/commands/map_reduce_agg.cpp
index 29db45b92f8..eb1d432f5b5 100644
--- a/src/mongo/db/commands/map_reduce_agg.cpp
+++ b/src/mongo/db/commands/map_reduce_agg.cpp
@@ -105,6 +105,10 @@ auto makeExpressionContext(OperationContext* opCtx,
boost::none, // let
CurOp::get(opCtx)->dbProfileLevel() > 0 // mayDbProfile
);
+ if (storageGlobalParams.readOnly) {
+ // Disallow disk use if in read-only mode.
+ expCtx->allowDiskUse = false;
+ }
expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
return expCtx;
}
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 140c720514e..9172103b493 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -445,6 +445,10 @@ boost::intrusive_ptr<ExpressionContext> makeExpressionContext(
expCtx->collationMatchesDefault = collationMatchesDefault;
expCtx->forPerShardCursor = request.getPassthroughToShard().has_value();
expCtx->allowDiskUse = request.getAllowDiskUse().value_or(allowDiskUseByDefault.load());
+ if (storageGlobalParams.readOnly) {
+ // Disallow disk use if in read-only mode.
+ expCtx->allowDiskUse = false;
+ }
// If the request specified v2 resume tokens for change streams, set this on the expCtx. On 6.0
// we only expect this to occur during testing.
diff --git a/src/mongo/db/pipeline/aggregation_request_helper.cpp b/src/mongo/db/pipeline/aggregation_request_helper.cpp
index 5bfa4b7c2a3..ec8433bb1a4 100644
--- a/src/mongo/db/pipeline/aggregation_request_helper.cpp
+++ b/src/mongo/db/pipeline/aggregation_request_helper.cpp
@@ -152,7 +152,6 @@ Document serializeToCommandDoc(const AggregateCommandRequest& request) {
void validate(const BSONObj& cmdObj,
const NamespaceString& nss,
boost::optional<ExplainOptions::Verbosity> explainVerbosity) {
- bool hasAllowDiskUseElem = cmdObj.hasField(AggregateCommandRequest::kAllowDiskUseFieldName);
bool hasCursorElem = cmdObj.hasField(AggregateCommandRequest::kCursorFieldName);
bool hasExplainElem = cmdObj.hasField(AggregateCommandRequest::kExplainFieldName);
bool hasExplain = explainVerbosity ||
@@ -177,11 +176,6 @@ void validate(const BSONObj& cmdObj,
<< "' without '" << AggregateCommandRequest::kFromMongosFieldName << "'",
(!hasNeedsMergeElem || hasFromMongosElem));
- uassert(ErrorCodes::IllegalOperation,
- str::stream() << "The '" << AggregateCommandRequest::kAllowDiskUseFieldName
- << "' option is not permitted in read-only mode.",
- (!hasAllowDiskUseElem || !storageGlobalParams.readOnly));
-
auto requestReshardingResumeTokenElem =
cmdObj[AggregateCommandRequest::kRequestReshardingResumeTokenFieldName];
uassert(ErrorCodes::FailedToParse,