From fc5bc0947ceedee3b61b2d922cabd3e5df7ec07c Mon Sep 17 00:00:00 2001 From: Bernard Gorman Date: Sat, 24 Nov 2018 16:56:20 +0000 Subject: SERVER-38408 Return postBatchResumeToken with each mongoD change stream batch --- src/mongo/db/pipeline/document_source_cursor.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/mongo/db/pipeline/document_source_cursor.cpp') diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp index cd0783eb5ac..b019bbf8e35 100644 --- a/src/mongo/db/pipeline/document_source_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_cursor.cpp @@ -118,9 +118,9 @@ void DocumentSourceCursor::loadBatch() { // As long as we're waiting for inserts, we shouldn't do any batching at this level // we need the whole pipeline to see each document to see if we should stop waiting. // Furthermore, if we need to return the latest oplog time (in the tailable and - // needs-merge case), batching will result in a wrong time. - if (awaitDataState(pExpCtx->opCtx).shouldWaitForInserts || - (pExpCtx->isTailableAwaitData() && pExpCtx->needsMerge) || + // awaitData case), batching will result in a wrong time. + if (pExpCtx->isTailableAwaitData() || + awaitDataState(pExpCtx->opCtx).shouldWaitForInserts || memUsageBytes > internalDocumentSourceCursorBatchSizeBytes.load()) { // End this batch and prepare PlanExecutor for yielding. _exec->saveState(); -- cgit v1.2.1