summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp4
-rw-r--r--src/mongo/db/catalog/collection_catalog_bm.cpp6
-rw-r--r--src/mongo/db/change_collection_expired_documents_remover.cpp6
-rw-r--r--src/mongo/db/client.h15
-rw-r--r--src/mongo/db/clientcursor.cpp7
-rw-r--r--src/mongo/db/commands/create_indexes_cmd.cpp6
-rw-r--r--src/mongo/db/commands/dbcheck.cpp6
-rw-r--r--src/mongo/db/commands/fsync.cpp6
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp7
-rw-r--r--src/mongo/db/concurrency/deferred_writer.cpp3
-rw-r--r--src/mongo/db/exec/sbe/stages/exchange.cpp6
-rw-r--r--src/mongo/db/fle_crud.cpp14
-rw-r--r--src/mongo/db/free_mon/free_mon_mongod.cpp4
-rw-r--r--src/mongo/db/free_mon/free_mon_processor.cpp6
-rw-r--r--src/mongo/db/ftdc/controller.cpp6
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp8
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp3
-rw-r--r--src/mongo/db/introspect.cpp7
-rw-r--r--src/mongo/db/keys_collection_manager.cpp6
-rw-r--r--src/mongo/db/mongod_main.cpp18
-rw-r--r--src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp4
-rw-r--r--src/mongo/db/pipeline/change_stream_expired_pre_image_remover.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_out.cpp7
-rw-r--r--src/mongo/db/process_health/config_server_health_observer.cpp6
-rw-r--r--src/mongo/db/process_health/dns_health_observer.cpp7
-rw-r--r--src/mongo/db/process_health/progress_monitor.cpp6
-rw-r--r--src/mongo/db/repl/bgsync.cpp5
-rw-r--r--src/mongo/db/repl/noop_writer.cpp7
-rw-r--r--src/mongo/db/repl/oplog_applier.cpp4
-rw-r--r--src/mongo/db/repl/oplog_applier_impl.cpp5
-rw-r--r--src/mongo/db/repl/oplog_batcher.cpp7
-rw-r--r--src/mongo/db/repl/primary_only_service.cpp3
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp5
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp8
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp5
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp6
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp6
-rw-r--r--src/mongo/db/repl/tenant_file_importer_service.cpp7
-rw-r--r--src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp4
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_service.cpp11
-rw-r--r--src/mongo/db/repl/topology_version_observer.cpp6
-rw-r--r--src/mongo/db/repl/transaction_oplog_application.cpp7
-rw-r--r--src/mongo/db/repl/wait_for_majority_service.cpp10
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp21
-rw-r--r--src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp7
-rw-r--r--src/mongo/db/s/balancer_stats_registry.cpp6
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_remove_chunks_command.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_remove_tags_command.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp5
-rw-r--r--src/mongo/db/s/config/placement_history_cleaner.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp18
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp4
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.cpp4
-rw-r--r--src/mongo/db/s/flush_resharding_state_change_command.cpp5
-rw-r--r--src/mongo/db/s/global_index/global_index_cloning_service.cpp8
-rw-r--r--src/mongo/db/s/global_index/global_index_inserter.cpp6
-rw-r--r--src/mongo/db/s/migration_batch_fetcher.h4
-rw-r--r--src/mongo/db/s/migration_batch_inserter.cpp4
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp27
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp4
-rw-r--r--src/mongo/db/s/migration_util.cpp38
-rw-r--r--src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp4
-rw-r--r--src/mongo/db/s/persistent_task_queue_test.cpp5
-rw-r--r--src/mongo/db/s/query_analysis_writer.cpp12
-rw-r--r--src/mongo/db/s/range_deleter_service.cpp5
-rw-r--r--src/mongo/db/s/range_deletion_util.h4
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp7
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp5
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp5
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp5
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp13
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp5
-rw-r--r--src/mongo/db/s/resharding_test_commands.cpp5
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.cpp6
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.cpp9
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp25
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp6
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp7
-rw-r--r--src/mongo/db/s/shardsvr_move_primary_enter_critical_section_command.cpp4
-rw-r--r--src/mongo/db/s/shardsvr_move_primary_exit_critical_section_command.cpp4
-rw-r--r--src/mongo/db/s/shardsvr_move_range_command.cpp4
-rw-r--r--src/mongo/db/s/shardsvr_participant_block_command.cpp4
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp7
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.h6
-rw-r--r--src/mongo/db/session/kill_sessions_local.cpp7
-rw-r--r--src/mongo/db/session/logical_session_cache_impl.cpp20
-rw-r--r--src/mongo/db/session/session_catalog_mongod.cpp13
-rw-r--r--src/mongo/db/session/session_killer.cpp6
-rw-r--r--src/mongo/db/storage/checkpointer.cpp5
-rw-r--r--src/mongo/db/storage/control/journal_flusher.cpp6
-rw-r--r--src/mongo/db/storage/disk_space_monitor.cpp6
-rw-r--r--src/mongo/db/storage/flow_control.cpp4
-rw-r--r--src/mongo/db/storage/oplog_cap_maintainer_thread.cpp5
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp4
-rw-r--r--src/mongo/db/storage/ticketholder_monitor.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict_test.cpp6
-rw-r--r--src/mongo/db/transaction/internal_transactions_reap_service.cpp4
-rw-r--r--src/mongo/db/transaction/transaction_api.cpp3
-rw-r--r--src/mongo/db/transaction/transaction_participant.cpp23
-rw-r--r--src/mongo/db/ttl.cpp10
-rw-r--r--src/mongo/db/vector_clock_mongod.cpp6
109 files changed, 312 insertions, 483 deletions
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index caa0939b3e0..d7c599f1fd9 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -145,9 +145,7 @@ void UserCacheInvalidator::start(ServiceContext* serviceCtx, OperationContext* o
PeriodicRunner::PeriodicJob job(
"UserCacheInvalidator",
[serviceCtx](Client* client) { getUserCacheInvalidator(serviceCtx)->run(); },
- loadInterval(),
- // TODO(SERVER-74660): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/);
+ loadInterval());
invalidator->_job =
std::make_unique<PeriodicJobAnchor>(periodicRunner->makeJob(std::move(job)));
diff --git a/src/mongo/db/catalog/collection_catalog_bm.cpp b/src/mongo/db/catalog/collection_catalog_bm.cpp
index a78b9d1e3d6..0d033653188 100644
--- a/src/mongo/db/catalog/collection_catalog_bm.cpp
+++ b/src/mongo/db/catalog/collection_catalog_bm.cpp
@@ -107,12 +107,6 @@ void BM_CollectionCatalogWriteBatchedWithGlobalExclusiveLock(benchmark::State& s
ThreadClient threadClient(serviceContext);
ServiceContext::UniqueOperationContext opCtx = threadClient->makeOperationContext();
- // TODO(SERVER-74657): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*threadClient.get());
- threadClient.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
createCollections(opCtx.get(), state.range(0));
Lock::GlobalLock globalLk(opCtx.get(), MODE_X);
diff --git a/src/mongo/db/change_collection_expired_documents_remover.cpp b/src/mongo/db/change_collection_expired_documents_remover.cpp
index 5dc4d1fe7e6..083256e8859 100644
--- a/src/mongo/db/change_collection_expired_documents_remover.cpp
+++ b/src/mongo/db/change_collection_expired_documents_remover.cpp
@@ -174,11 +174,7 @@ public:
ChangeCollectionExpiredDocumentsRemover(ServiceContext* serviceContext) {
const auto period = Seconds{gChangeCollectionExpiredDocumentsRemoverJobSleepSeconds.load()};
_jobAnchor = serviceContext->getPeriodicRunner()->makeJob(
- {"ChangeCollectionExpiredDocumentsRemover",
- removeExpiredDocuments,
- period,
- // TODO(SERVER-74662): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/});
+ {"ChangeCollectionExpiredDocumentsRemover", removeExpiredDocuments, period});
_jobAnchor.start();
}
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index 1610f8ff2e8..39c9b4703e4 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -41,7 +41,6 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/service_context.h"
-#include "mongo/logv2/log.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/platform/random.h"
#include "mongo/stdx/thread.h"
@@ -195,15 +194,15 @@ public:
}
/**
- * Used to mark system operations that are not allowed to be killed by the stepdown process.
- * This should only be called once per Client and only from system connections. The Client
- * should be locked by the caller.
+ * Used to mark system operations that are allowed to be killed by the stepdown process. This
+ * should only be called once per Client and only from system connections. The Client should be
+ * locked by the caller.
*/
- void setSystemOperationUnkillableByStepdown(WithLock) {
+ void setSystemOperationKillableByStepdown(WithLock) {
// This can only be changed once for system operations.
invariant(isFromSystemConnection());
- invariant(_systemOperationKillable);
- _systemOperationKillable = false;
+ invariant(!_systemOperationKillable);
+ _systemOperationKillable = true;
}
/**
@@ -295,7 +294,7 @@ private:
OperationContext* _opCtx = nullptr;
// If the active system client operation is allowed to be killed.
- bool _systemOperationKillable = true;
+ bool _systemOperationKillable = false;
PseudoRandom _prng;
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 90c3a4581cc..db7a5253ef3 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -357,13 +357,6 @@ public:
void run() {
ThreadClient tc("clientcursormon", getGlobalServiceContext());
-
- // TODO(SERVER-74662): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
while (!globalInShutdownDeprecated()) {
{
const ServiceContext::UniqueOperationContext opCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/commands/create_indexes_cmd.cpp b/src/mongo/db/commands/create_indexes_cmd.cpp
index ce94c92662a..be6ef5c15e2 100644
--- a/src/mongo/db/commands/create_indexes_cmd.cpp
+++ b/src/mongo/db/commands/create_indexes_cmd.cpp
@@ -640,6 +640,12 @@ CreateIndexesReply runCreateIndexesWithCoordinator(OperationContext* opCtx,
// The current OperationContext may be interrupted, which would prevent us from
// taking locks. Use a new OperationContext to abort the index build.
auto newClient = opCtx->getServiceContext()->makeClient("abort-index-build");
+
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient.get()->setSystemOperationKillableByStepdown(lk);
+ }
+
AlternativeClientRegion acr(newClient);
const auto abortCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index e329d1d781a..312431536a2 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -302,6 +302,12 @@ protected:
virtual void run() override {
// Every dbCheck runs in its own client.
ThreadClient tc(name(), getGlobalServiceContext());
+
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc.get()->setSystemOperationKillableByStepdown(lk);
+ }
+
auto uniqueOpCtx = tc->makeOperationContext();
auto opCtx = uniqueOpCtx.get();
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 581501ea841..795adf38540 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -356,12 +356,6 @@ void FSyncLockThread::run() {
stdx::lock_guard<SimpleMutex> lkf(filesLockedFsync);
stdx::unique_lock<Latch> stateLock(fsyncStateMutex);
- // TODO(SERVER-74657): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
invariant(fsyncCmd.getLockCount_inLock() == 1);
try {
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index e93ae15ab1e..e6b6a3cdae0 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -731,13 +731,6 @@ public:
// Subclient used by transaction operations.
_client = opCtx->getServiceContext()->makeClient(forCommand.toString());
-
- // TODO(SERVER-74660): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*_client.get());
- _client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
auto as = AuthorizationSession::get(_client.get());
if (as) {
as->grantInternalAuthorization(_client.get());
diff --git a/src/mongo/db/concurrency/deferred_writer.cpp b/src/mongo/db/concurrency/deferred_writer.cpp
index 558f3e282cc..0ec030345ea 100644
--- a/src/mongo/db/concurrency/deferred_writer.cpp
+++ b/src/mongo/db/concurrency/deferred_writer.cpp
@@ -151,6 +151,9 @@ void DeferredWriter::startup(std::string workerName) {
options.maxThreads = 1;
options.onCreateThread = [](const std::string& name) {
Client::initThread(name);
+
+ stdx::lock_guard<Client> lk(cc());
+ cc().setSystemOperationKillableByStepdown(lk);
};
_pool = std::make_unique<ThreadPool>(options);
_pool->startup();
diff --git a/src/mongo/db/exec/sbe/stages/exchange.cpp b/src/mongo/db/exec/sbe/stages/exchange.cpp
index f0642818710..02b5c3ce7d0 100644
--- a/src/mongo/db/exec/sbe/stages/exchange.cpp
+++ b/src/mongo/db/exec/sbe/stages/exchange.cpp
@@ -46,12 +46,6 @@ MONGO_INITIALIZER(s_globalThreadPool)(InitializerContext* context) {
options.maxThreads = 128;
options.onCreateThread = [](const std::string& name) {
Client::initThread(name);
-
- // TODO(SERVER-74662): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
};
s_globalThreadPool = std::make_unique<ThreadPool>(options);
s_globalThreadPool->startup();
diff --git a/src/mongo/db/fle_crud.cpp b/src/mongo/db/fle_crud.cpp
index 7c41c7374d7..a37b7f8169b 100644
--- a/src/mongo/db/fle_crud.cpp
+++ b/src/mongo/db/fle_crud.cpp
@@ -1456,13 +1456,6 @@ BSONObj FLEQueryInterfaceImpl::getById(const NamespaceString& nss, BSONElement e
uint64_t FLEQueryInterfaceImpl::countDocuments(const NamespaceString& nss) {
// Since count() does not work in a transaction, call count() by bypassing the transaction api
auto client = _serviceContext->makeClient("SEP-int-fle-crud");
-
- // TODO(SERVER-74660): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client.get());
- client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion clientRegion(client);
auto opCtx = cc().makeOperationContext();
auto as = AuthorizationSession::get(cc());
@@ -1801,13 +1794,6 @@ std::vector<std::vector<FLEEdgeCountInfo>> FLETagNoTXNQuery::getTags(
// Pop off the current op context so we can get a fresh set of read concern settings
auto client = _opCtx->getServiceContext()->makeClient("FLETagNoTXNQuery");
-
- // TODO(SERVER-74660): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client.get());
- client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion clientRegion(client);
auto opCtx = cc().makeOperationContext();
auto as = AuthorizationSession::get(cc());
diff --git a/src/mongo/db/free_mon/free_mon_mongod.cpp b/src/mongo/db/free_mon/free_mon_mongod.cpp
index 05005bf2c89..bf247415dc8 100644
--- a/src/mongo/db/free_mon/free_mon_mongod.cpp
+++ b/src/mongo/db/free_mon/free_mon_mongod.cpp
@@ -81,10 +81,6 @@ auto makeTaskExecutor(ServiceContext* /*serviceContext*/) {
tpOptions.maxThreads = 2;
tpOptions.onCreateThread = [](const std::string& threadName) {
Client::initThread(threadName.c_str());
-
- // TODO(SERVER-74659): Please revisit if this thread could be made killable.
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
};
return std::make_unique<executor::ThreadPoolTaskExecutor>(
std::make_unique<ThreadPool>(tpOptions), executor::makeNetworkInterface("FreeMonNet"));
diff --git a/src/mongo/db/free_mon/free_mon_processor.cpp b/src/mongo/db/free_mon/free_mon_processor.cpp
index 872b5d436f4..d42dc473d14 100644
--- a/src/mongo/db/free_mon/free_mon_processor.cpp
+++ b/src/mongo/db/free_mon/free_mon_processor.cpp
@@ -165,12 +165,6 @@ void FreeMonProcessor::run() {
Client::initThread("FreeMonProcessor");
Client* client = &cc();
- // TODO(SERVER-74659): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client);
- client->setSystemOperationUnkillableByStepdown(lk);
- }
-
while (true) {
auto item = _queue.dequeue(client->getServiceContext()->getPreciseClockSource());
if (!item.has_value()) {
diff --git a/src/mongo/db/ftdc/controller.cpp b/src/mongo/db/ftdc/controller.cpp
index 6ad4c10784b..c2c409be1d8 100644
--- a/src/mongo/db/ftdc/controller.cpp
+++ b/src/mongo/db/ftdc/controller.cpp
@@ -196,12 +196,6 @@ void FTDCController::doLoop() noexcept {
Client::initThread(kFTDCThreadName);
Client* client = &cc();
- // TODO(SERVER-74659): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client);
- client->setSystemOperationUnkillableByStepdown(lk);
- }
-
// Update config
{
stdx::lock_guard<Latch> lock(_mutex);
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index e95b94c33d3..86cf52d6f56 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -1652,6 +1652,7 @@ void IndexBuildsCoordinator::onStepUp(OperationContext* opCtx) {
PromiseAndFuture<void> promiseAndFuture;
_stepUpThread = stdx::thread([this, &promiseAndFuture] {
Client::initThread("IndexBuildsCoordinator-StepUp");
+
auto threadCtx = Client::getCurrent()->makeOperationContext();
threadCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
promiseAndFuture.promise.emplaceValue();
@@ -2548,13 +2549,6 @@ namespace {
template <typename Func>
void runOnAlternateContext(OperationContext* opCtx, std::string name, Func func) {
auto newClient = opCtx->getServiceContext()->makeClient(name);
-
- // TODO(SERVER-74657): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*newClient.get());
- newClient.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(newClient);
const auto newCtx = cc().makeOperationContext();
func(newCtx.get());
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 5a5773b1877..74aa4eb3265 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -93,9 +93,6 @@ ThreadPool::Options makeDefaultThreadPoolOptions() {
// Ensure all threads have a client.
options.onCreateThread = [](const std::string& threadName) {
Client::initThread(threadName.c_str());
-
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
};
return options;
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 81e6b1aaca0..b5aecf38196 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -95,13 +95,6 @@ void profile(OperationContext* opCtx, NetworkOp op) {
// killed or timed out. Those are the case we want to have profiling data.
auto newClient = opCtx->getServiceContext()->makeClient("profiling");
auto newCtx = newClient->makeOperationContext();
-
- // TODO(SERVER-74657): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*newClient.get());
- newClient.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
// We swap the lockers as that way we preserve locks held in transactions and any other
// options set for the locker like maxLockTimeout.
auto oldLocker = opCtx->getClient()->swapLockState(
diff --git a/src/mongo/db/keys_collection_manager.cpp b/src/mongo/db/keys_collection_manager.cpp
index b03f6417afb..31e97291948 100644
--- a/src/mongo/db/keys_collection_manager.cpp
+++ b/src/mongo/db/keys_collection_manager.cpp
@@ -240,6 +240,12 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
std::string threadName,
Milliseconds refreshInterval) {
ThreadClient tc(threadName, service);
+
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc.get()->setSystemOperationKillableByStepdown(lk);
+ }
+
ON_BLOCK_EXIT([this]() mutable { _hasSeenKeys.store(false); });
unsigned errorCount = 0;
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 291856a40df..e925e51cd42 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -421,12 +421,6 @@ MONGO_FAIL_POINT_DEFINE(shutdownAtStartup);
ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
Client::initThread("initandlisten");
- // TODO(SERVER-74659): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
serviceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10)));
DBDirectClientFactory::get(serviceContext).registerImplementation([](OperationContext* opCtx) {
@@ -1165,9 +1159,6 @@ auto makeReplicaSetNodeExecutor(ServiceContext* serviceContext) {
tpOptions.maxThreads = ThreadPool::Options::kUnlimited;
tpOptions.onCreateThread = [](const std::string& threadName) {
Client::initThread(threadName.c_str());
-
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
};
auto hookList = std::make_unique<rpc::EgressMetadataHookList>();
hookList->addHook(std::make_unique<rpc::VectorClockMetadataHook>(serviceContext));
@@ -1184,9 +1175,6 @@ auto makeReplicationExecutor(ServiceContext* serviceContext) {
tpOptions.maxThreads = 50;
tpOptions.onCreateThread = [](const std::string& threadName) {
Client::initThread(threadName.c_str());
-
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
};
auto hookList = std::make_unique<rpc::EgressMetadataHookList>();
hookList->addHook(std::make_unique<rpc::VectorClockMetadataHook>(serviceContext));
@@ -1334,13 +1322,9 @@ MONGO_INITIALIZER_GENERAL(setSSLManagerType, (), ("SSLManager"))
void shutdownTask(const ShutdownTaskArgs& shutdownArgs) {
// This client initiation pattern is only to be used here, with plans to eliminate this pattern
// down the line.
- if (!haveClient()) {
+ if (!haveClient())
Client::initThread(getThreadName());
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
auto const client = Client::getCurrent();
auto const serviceContext = client->getServiceContext();
diff --git a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
index 3e3d10cad3e..09467c988a5 100644
--- a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
+++ b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp
@@ -115,9 +115,7 @@ void PeriodicThreadToAbortExpiredTransactions::_init(ServiceContext* serviceCont
LOGV2_DEBUG(4684101, 2, "Periodic job canceled", "{reason}"_attr = ex.reason());
}
},
- getPeriod(gTransactionLifetimeLimitSeconds.load()),
- // TODO(SERVER-74656): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/);
+ getPeriod(gTransactionLifetimeLimitSeconds.load()));
_anchor = std::make_shared<PeriodicJobAnchor>(periodicRunner->makeJob(std::move(job)));
diff --git a/src/mongo/db/pipeline/change_stream_expired_pre_image_remover.cpp b/src/mongo/db/pipeline/change_stream_expired_pre_image_remover.cpp
index 19ec2bb895b..15d4b3a7d34 100644
--- a/src/mongo/db/pipeline/change_stream_expired_pre_image_remover.cpp
+++ b/src/mongo/db/pipeline/change_stream_expired_pre_image_remover.cpp
@@ -90,6 +90,10 @@ public:
ThreadClient tc(name(), getGlobalServiceContext());
AuthorizationSession::get(cc())->grantInternalAuthorization(&cc());
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc.get()->setSystemOperationKillableByStepdown(lk);
+ }
while (true) {
LOGV2_DEBUG(6278517, 3, "Thread awake");
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index 4c8103b2e37..db53f060070 100644
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -71,13 +71,6 @@ DocumentSourceOut::~DocumentSourceOut() {
if (_tempNs.size() || (_timeseries && !_timeseriesViewCreated)) {
auto cleanupClient =
pExpCtx->opCtx->getServiceContext()->makeClient("$out_replace_coll_cleanup");
-
- // TODO(SERVER-74662): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*cleanupClient.get());
- cleanupClient.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(cleanupClient);
// Create a new operation context so that any interrupts on the current operation will
// not affect the dropCollection operation below.
diff --git a/src/mongo/db/process_health/config_server_health_observer.cpp b/src/mongo/db/process_health/config_server_health_observer.cpp
index 2044e68d674..bf011d28472 100644
--- a/src/mongo/db/process_health/config_server_health_observer.cpp
+++ b/src/mongo/db/process_health/config_server_health_observer.cpp
@@ -173,12 +173,6 @@ Future<ConfigServerHealthObserver::CheckResult> ConfigServerHealthObserver::_che
checkCtx->opCtx = checkCtx->client->makeOperationContext();
checkCtx->opCtx->setDeadlineAfterNowBy(kObserverTimeout, ErrorCodes::ExceededTimeLimit);
- // TODO(SERVER-74659): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*checkCtx->client.get());
- checkCtx->client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
LOGV2_DEBUG(5939001, 3, "Checking Config server health");
_runSmokeReadShardsCommand(checkCtx);
diff --git a/src/mongo/db/process_health/dns_health_observer.cpp b/src/mongo/db/process_health/dns_health_observer.cpp
index 146c7881737..b4a8bf0bf2b 100644
--- a/src/mongo/db/process_health/dns_health_observer.cpp
+++ b/src/mongo/db/process_health/dns_health_observer.cpp
@@ -71,13 +71,6 @@ Future<HealthCheckStatus> DnsHealthObserver::periodicCheckImpl(
if (!isFailPointActive) {
auto client = _svcCtx->makeClient("DNSHealthObserver");
-
- // TODO(SERVER-74659): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client.get());
- client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
auto opCtx = client->makeOperationContext();
auto const shardRegistry = Grid::get(_svcCtx)->shardRegistry();
auto shardIds = shardRegistry->getAllShardIds(opCtx.get());
diff --git a/src/mongo/db/process_health/progress_monitor.cpp b/src/mongo/db/process_health/progress_monitor.cpp
index fd16d76606c..d940a06c498 100644
--- a/src/mongo/db/process_health/progress_monitor.cpp
+++ b/src/mongo/db/process_health/progress_monitor.cpp
@@ -141,12 +141,6 @@ void ProgressMonitor::_progressMonitorLoop() {
Client::initThread("FaultManagerProgressMonitor"_sd, _svcCtx, nullptr);
static const int kSleepsPerInterval = 10;
- // TODO(SERVER-74659): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
while (!_terminate.load()) {
progressMonitorCheck(_crashCb);
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 0cb2c057f8f..daa0389c9fa 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -226,11 +226,6 @@ void BackgroundSync::_run() {
Client::initThread("BackgroundSync");
AuthorizationSession::get(cc())->grantInternalAuthorization(&cc());
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
while (!inShutdown()) {
try {
_runProducer();
diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp
index 81be6651282..4bf8188010b 100644
--- a/src/mongo/db/repl/noop_writer.cpp
+++ b/src/mongo/db/repl/noop_writer.cpp
@@ -83,13 +83,6 @@ public:
private:
void run(Seconds waitTime, NoopWriteFn noopWrite) {
Client::initThread("NoopWriter");
-
- // TODO(SERVER-74656): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
while (true) {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
diff --git a/src/mongo/db/repl/oplog_applier.cpp b/src/mongo/db/repl/oplog_applier.cpp
index 3e8364baa34..af33eaf9633 100644
--- a/src/mongo/db/repl/oplog_applier.cpp
+++ b/src/mongo/db/repl/oplog_applier.cpp
@@ -171,9 +171,9 @@ std::unique_ptr<ThreadPool> makeReplWriterPool(int threadCount,
auto client = Client::getCurrent();
AuthorizationSession::get(*client)->grantInternalAuthorization(client);
- if (!isKillableByStepdown) {
+ if (isKillableByStepdown) {
stdx::lock_guard<Client> lk(*client);
- client->setSystemOperationUnkillableByStepdown(lk);
+ client->setSystemOperationKillableByStepdown(lk);
}
};
auto pool = std::make_unique<ThreadPool>(options);
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp
index 10239d41777..baff24f4ba7 100644
--- a/src/mongo/db/repl/oplog_applier_impl.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl.cpp
@@ -296,11 +296,6 @@ void ApplyBatchFinalizerForJournal::record(const OpTimeAndWallTime& newOpTimeAnd
void ApplyBatchFinalizerForJournal::_run() {
Client::initThread("ApplyBatchFinalizerForJournal");
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
while (true) {
OpTimeAndWallTime latestOpTimeAndWallTime = {OpTime(), Date_t()};
diff --git a/src/mongo/db/repl/oplog_batcher.cpp b/src/mongo/db/repl/oplog_batcher.cpp
index a652d7346e3..86415c4f1ad 100644
--- a/src/mongo/db/repl/oplog_batcher.cpp
+++ b/src/mongo/db/repl/oplog_batcher.cpp
@@ -298,13 +298,6 @@ void OplogBatcher::_consume(OperationContext* opCtx, OplogBuffer* oplogBuffer) {
void OplogBatcher::_run(StorageInterface* storageInterface) {
Client::initThread("ReplBatcher");
- {
- // The OplogBatcher's thread has its own shutdown sequence triggered by the OplogApplier,
- // so we don't want it to be killed in other ways.
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
BatchLimits batchLimits;
while (true) {
diff --git a/src/mongo/db/repl/primary_only_service.cpp b/src/mongo/db/repl/primary_only_service.cpp
index 4bf6d12cc49..ab4f061d84d 100644
--- a/src/mongo/db/repl/primary_only_service.cpp
+++ b/src/mongo/db/repl/primary_only_service.cpp
@@ -335,6 +335,9 @@ void PrimaryOnlyService::startup(OperationContext* opCtx) {
auto client = Client::getCurrent();
AuthorizationSession::get(*client)->grantInternalAuthorization(&cc());
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+
// Associate this Client with this PrimaryOnlyService
primaryOnlyServiceStateForClient(client).primaryOnlyService = this;
};
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 427c05f6dbd..313ac3773f6 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -146,12 +146,6 @@ auto makeThreadPool(const std::string& poolName, const std::string& threadName)
threadPoolOptions.poolName = poolName;
threadPoolOptions.onCreateThread = [](const std::string& threadName) {
Client::initThread(threadName.c_str());
-
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
AuthorizationSession::get(cc())->grantInternalAuthorization(&cc());
};
return std::make_unique<ThreadPool>(threadPoolOptions);
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 60ba492573f..1113eb9bc6c 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -2651,11 +2651,6 @@ void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::_killOpThreadFn()
invariant(!cc().isFromUserConnection());
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
LOGV2(21343, "Starting to kill user operations");
auto uniqueOpCtx = cc().makeOperationContext();
OperationContext* opCtx = uniqueOpCtx.get();
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 6b50af1e0b7..744e1cf824f 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -116,10 +116,7 @@ void ReplCoordTest::addSelf(const HostAndPort& selfHost) {
void ReplCoordTest::init() {
invariant(!_repl);
invariant(!_callShutdown);
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
+
auto service = getGlobalServiceContext();
_storageInterface = new StorageInterfaceMock();
StorageInterface::set(service, std::unique_ptr<StorageInterface>(_storageInterface));
@@ -163,9 +160,6 @@ void ReplCoordTest::init() {
executor::ThreadPoolMock::Options tpOptions;
tpOptions.onCreateThread = []() {
Client::initThread("replexec");
-
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
};
auto pool = std::make_unique<executor::ThreadPoolMock>(_net, seed, tpOptions);
auto replExec =
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index 5eca6a6e47c..f88973b38b9 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -88,10 +88,7 @@ public:
void RollbackTest::setUp() {
ServiceContextMongoDTest::setUp();
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
+
_storageInterface = new StorageInterfaceRollback();
auto serviceContext = getServiceContext();
auto consistencyMarkers = std::make_unique<ReplicationConsistencyMarkersMock>();
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 01e9a24cbea..00173186d96 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -224,12 +224,6 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
auto opCtx = cc().makeOperationContext();
opCtx->setEnforceConstraints(false);
- // TODO(SERVER-74656): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
// DocumentValidationSettings::kDisableInternalValidation is currently inert.
// But, it's logically ok to disable internal validation as this function gets called
// only during initial sync.
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 37af1be31a2..9b74f405eb0 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -159,12 +159,6 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor,
ReplicationCoordinator* replCoord) {
Client::initThread("SyncSourceFeedback");
- // TODO(SERVER-74656): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
HostAndPort syncTarget;
// keepAliveInterval indicates how frequently to forward progress in the absence of updates.
diff --git a/src/mongo/db/repl/tenant_file_importer_service.cpp b/src/mongo/db/repl/tenant_file_importer_service.cpp
index 2c6bca06031..9764eb86820 100644
--- a/src/mongo/db/repl/tenant_file_importer_service.cpp
+++ b/src/mongo/db/repl/tenant_file_importer_service.cpp
@@ -134,13 +134,6 @@ void TenantFileImporterService::startMigration(const UUID& migrationId) {
_workerThread = std::make_unique<stdx::thread>([this, migrationId] {
Client::initThread("TenantFileImporterService");
-
- // TODO(SERVER-74661): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
try {
_handleEvents(migrationId);
} catch (const DBException& err) {
diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp
index 0c193a2ab11..0b020e66722 100644
--- a/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp
+++ b/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp
@@ -72,10 +72,6 @@ TenantMigrationAccessBlockerRegistry::TenantMigrationAccessBlockerRegistry() {
threadPoolOptions.poolName = "TenantMigrationBlockerAsyncThreadPool";
threadPoolOptions.onCreateThread = [](const std::string& threadName) {
Client::initThread(threadName.c_str());
-
- // TODO(SERVER-74661): Please revisit if this thread could be made killable.
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
};
_asyncBlockingOperationsExecutor = std::make_shared<executor::ThreadPoolTaskExecutor>(
std::make_unique<ThreadPool>(threadPoolOptions),
diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp
index c432b856d4e..596ed32c807 100644
--- a/src/mongo/db/repl/tenant_migration_donor_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp
@@ -382,6 +382,17 @@ TenantMigrationDonorService::Instance::_makeRecipientCmdExecutor() {
Client::initThread(threadName.c_str());
auto client = Client::getCurrent();
AuthorizationSession::get(*client)->grantInternalAuthorization(&cc());
+
+ // Ideally, we should also associate the client created by _recipientCmdExecutor with the
+ // TenantMigrationDonorService to make the opCtxs created by the task executor get
+ // registered in the TenantMigrationDonorService, and killed on stepdown. But that would
+ // require passing the pointer to the TenantMigrationService into the Instance and making
+ // constructInstance not const so we can set the client's decoration here. Right now there
+ // is no need for that since the task executor is only used with scheduleRemoteCommand and
+ // no opCtx will be created (the cancellation token is responsible for canceling the
+ // outstanding work on the task executor).
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
};
auto hookList = std::make_unique<rpc::EgressMetadataHookList>();
diff --git a/src/mongo/db/repl/topology_version_observer.cpp b/src/mongo/db/repl/topology_version_observer.cpp
index 916d401ed25..07eb6c5f327 100644
--- a/src/mongo/db/repl/topology_version_observer.cpp
+++ b/src/mongo/db/repl/topology_version_observer.cpp
@@ -178,12 +178,6 @@ void TopologyVersionObserver::_workerThreadBody() noexcept try {
invariant(_serviceContext);
ThreadClient tc(kTopologyVersionObserverName, _serviceContext);
- // TODO(SERVER-74656): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
auto getTopologyVersion = [&]() -> boost::optional<TopologyVersion> {
// Only the observer thread updates `_cache`, thus there is no need to hold the lock before
// accessing `_cache` here.
diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp
index 01ad3f0aa27..c61f2cbde99 100644
--- a/src/mongo/db/repl/transaction_oplog_application.cpp
+++ b/src/mongo/db/repl/transaction_oplog_application.cpp
@@ -786,13 +786,6 @@ void reconstructPreparedTransactions(OperationContext* opCtx, repl::OplogApplica
// transaction oplog entry.
auto newClient =
opCtx->getServiceContext()->makeClient("reconstruct-prepared-transactions");
-
- // TODO(SERVER-74656): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*newClient.get());
- newClient.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(newClient);
const auto newOpCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/repl/wait_for_majority_service.cpp b/src/mongo/db/repl/wait_for_majority_service.cpp
index a55bfcc701e..37d6407e0fe 100644
--- a/src/mongo/db/repl/wait_for_majority_service.cpp
+++ b/src/mongo/db/repl/wait_for_majority_service.cpp
@@ -107,16 +107,6 @@ void WaitForMajorityServiceImplBase::startup(ServiceContext* ctx) {
ClientStrand::make(ctx->makeClient(kWaitClientName + _getReadOrWrite()));
_waitForMajorityCancellationClient =
ClientStrand::make(ctx->makeClient(kCancelClientName + _getReadOrWrite()));
- // TODO(SERVER-74656): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*_waitForMajorityClient->getClientPointer());
- _waitForMajorityClient->getClientPointer()->setSystemOperationUnkillableByStepdown(lk);
- }
- {
- stdx::lock_guard<Client> lk(*_waitForMajorityCancellationClient->getClientPointer());
- _waitForMajorityCancellationClient->getClientPointer()
- ->setSystemOperationUnkillableByStepdown(lk);
- }
_backgroundWorkComplete = _periodicallyWaitForMajority();
_pool->startup();
_state = State::kRunning;
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 8110b4a5da9..186d1dc66b6 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -440,13 +440,6 @@ void Balancer::_consumeActionStreamLoop() {
});
Client::initThread("BalancerSecondary");
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
auto opCtx = cc().makeOperationContext();
executor::ScopedTaskExecutor executor(
Grid::get(opCtx.get())->getExecutorPool()->getFixedExecutor());
@@ -457,13 +450,6 @@ void Balancer::_consumeActionStreamLoop() {
ActionsStreamPolicy* policy) {
invariant(_outstandingStreamingOps.addAndFetch(-1) >= 0);
ThreadClient tc("BalancerSecondaryThread::applyActionResponse", getGlobalServiceContext());
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
auto opCtx = tc->makeOperationContext();
policy->applyActionResult(opCtx.get(), action, response);
};
@@ -640,13 +626,6 @@ void Balancer::_mainThread() {
});
Client::initThread("Balancer");
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
auto opCtx = cc().makeOperationContext();
auto shardingContext = Grid::get(opCtx.get());
diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp
index 0db96506cdd..82db31f0437 100644
--- a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp
@@ -360,13 +360,6 @@ void BalancerCommandsSchedulerImpl::_workerThread() {
});
Client::initThread("BalancerCommandsScheduler");
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
bool stopWorkerRequested = false;
LOGV2(5847205, "Balancer scheduler thread started");
diff --git a/src/mongo/db/s/balancer_stats_registry.cpp b/src/mongo/db/s/balancer_stats_registry.cpp
index cacb45c3896..3ad4c8e0b98 100644
--- a/src/mongo/db/s/balancer_stats_registry.cpp
+++ b/src/mongo/db/s/balancer_stats_registry.cpp
@@ -89,12 +89,6 @@ void BalancerStatsRegistry::initializeAsync(OperationContext* opCtx) {
ThreadClient tc("BalancerStatsRegistry::asynchronousInitialization",
getGlobalServiceContext());
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
{
stdx::lock_guard lk{_stateMutex};
if (const auto currentState = _state.load(); currentState != State::kPrimaryIdle) {
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index f5f5b01d6d9..d4e95f25d03 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -704,6 +704,10 @@ void CollectionShardingRuntime::_cleanupBeforeInstallingNewCollectionMetadata(
ExecutorFuture<void>{Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()}
.then([svcCtx{opCtx->getServiceContext()}, oldUUID, oldShardVersion] {
ThreadClient tc{"CleanUpShardedMetadata", svcCtx};
+ {
+ stdx::lock_guard<Client> lk{*tc.get()};
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto uniqueOpCtx{tc->makeOperationContext()};
auto opCtx{uniqueOpCtx.get()};
diff --git a/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp b/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp
index dff59191d53..5f6709ae9e5 100644
--- a/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp
@@ -81,6 +81,11 @@ public:
// Use an ACR because we will perform a {multi: true} delete, which is otherwise not
// supported on a session.
auto newClient = opCtx->getServiceContext()->makeClient("RemoveChunksMetadata");
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
+
AlternativeClientRegion acr(newClient);
auto executor =
Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor();
diff --git a/src/mongo/db/s/config/configsvr_remove_tags_command.cpp b/src/mongo/db/s/config/configsvr_remove_tags_command.cpp
index db996fce388..8dd75292aa7 100644
--- a/src/mongo/db/s/config/configsvr_remove_tags_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_tags_command.cpp
@@ -79,6 +79,11 @@ public:
{
auto newClient = opCtx->getServiceContext()->makeClient("RemoveTagsMetadata");
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
+
AlternativeClientRegion acr(newClient);
auto executor =
Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor();
diff --git a/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp b/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp
index 2abbdd528b0..6209d5992d0 100644
--- a/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp
+++ b/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp
@@ -102,6 +102,11 @@ public:
auto newClient = opCtx->getServiceContext()->makeClient("RenameCollectionMetadata");
AuthorizationSession::get(newClient.get())
->grantInternalAuthorization(newClient.get());
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
+
AlternativeClientRegion acr(newClient);
auto executor =
Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor();
diff --git a/src/mongo/db/s/config/placement_history_cleaner.cpp b/src/mongo/db/s/config/placement_history_cleaner.cpp
index 2d8fded89b4..7e1a5215de3 100644
--- a/src/mongo/db/s/config/placement_history_cleaner.cpp
+++ b/src/mongo/db/s/config/placement_history_cleaner.cpp
@@ -139,9 +139,7 @@ void PlacementHistoryCleaner::onStepUpComplete(OperationContext* opCtx, long lon
PeriodicRunner::PeriodicJob placementHistoryCleanerJob(
"PlacementHistoryCleanUpJob",
[](Client* client) { runOnce(client, kminPlacementHistoryEntries); },
- kJobExecutionPeriod,
- // TODO(SERVER-74658): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/);
+ kJobExecutionPeriod);
_anchor = periodicRunner->makeJob(std::move(placementHistoryCleanerJob));
_anchor.start();
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 6482c797c0f..0ace7321d3d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -148,6 +148,10 @@ BSONObj commitOrAbortTransaction(OperationContext* opCtx,
// that have been run on this opCtx would have set the timeout in the locker on the opCtx, but
// commit should not have a lock timeout.
auto newClient = getGlobalServiceContext()->makeClient("ShardingCatalogManager");
+ {
+ stdx::lock_guard<Client> lk(*newClient);
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
AlternativeClientRegion acr(newClient);
auto newOpCtx = cc().makeOperationContext();
newOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
@@ -953,11 +957,6 @@ Status ShardingCatalogManager::_notifyClusterOnNewDatabases(
// Setup an AlternativeClientRegion and a non-interruptible Operation Context to ensure that
// the notification may be also sent out while the node is stepping down.
auto altClient = opCtx->getServiceContext()->makeClient("_notifyClusterOnNewDatabases");
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- mongo::stdx::lock_guard<mongo::Client> lk(*altClient.get());
- altClient.get()->setSystemOperationUnkillableByStepdown(lk);
- }
AlternativeClientRegion acr(altClient);
auto altOpCtxHolder = cc().makeOperationContext();
auto altOpCtx = altOpCtxHolder.get();
@@ -1153,6 +1152,10 @@ void ShardingCatalogManager::withTransaction(
AlternativeSessionRegion asr(opCtx);
auto* const client = asr.opCtx()->getClient();
+ {
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
asr.opCtx()->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
AuthorizationSession::get(client)->grantInternalAuthorization(client);
TxnNumber txnNumber = 0;
@@ -1286,11 +1289,6 @@ void ShardingCatalogManager::initializePlacementHistory(OperationContext* opCtx)
// internal client credentials).
{
auto altClient = opCtx->getServiceContext()->makeClient("initializePlacementHistory");
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*altClient.get());
- altClient.get()->setSystemOperationUnkillableByStepdown(lk);
- }
AuthorizationSession::get(altClient.get())->grantInternalAuthorization(altClient.get());
AlternativeClientRegion acr(altClient);
auto executor =
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 78af6bd3fc4..7f325f12742 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -320,6 +320,10 @@ void insertChunks(OperationContext* opCtx,
{
auto newClient =
opCtx->getServiceContext()->makeClient("CreateCollectionCoordinator::insertChunks");
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
AlternativeClientRegion acr(newClient);
auto executor =
diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp
index e8bc06a8a44..ba6992ca376 100644
--- a/src/mongo/db/s/drop_collection_coordinator.cpp
+++ b/src/mongo/db/s/drop_collection_coordinator.cpp
@@ -87,6 +87,10 @@ void DropCollectionCoordinator::dropCollectionLocally(OperationContext* opCtx,
// an alternative client.
auto newClient = opCtx->getServiceContext()->makeClient("removeRangeDeletions-" +
collectionUUID->toString());
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
AlternativeClientRegion acr{newClient};
auto executor =
Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor();
diff --git a/src/mongo/db/s/flush_resharding_state_change_command.cpp b/src/mongo/db/s/flush_resharding_state_change_command.cpp
index c0e1523ed6e..b9494be2d30 100644
--- a/src/mongo/db/s/flush_resharding_state_change_command.cpp
+++ b/src/mongo/db/s/flush_resharding_state_change_command.cpp
@@ -114,6 +114,11 @@ public:
ExecutorFuture<void>(Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor())
.then([svcCtx = opCtx->getServiceContext(), nss = ns()] {
ThreadClient tc("FlushReshardingStateChange", svcCtx);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
+
auto opCtx = tc->makeOperationContext();
onCollectionPlacementVersionMismatch(
opCtx.get(), nss, boost::none /* chunkVersionReceived */);
diff --git a/src/mongo/db/s/global_index/global_index_cloning_service.cpp b/src/mongo/db/s/global_index/global_index_cloning_service.cpp
index 78d4ffc27db..454d99e93c3 100644
--- a/src/mongo/db/s/global_index/global_index_cloning_service.cpp
+++ b/src/mongo/db/s/global_index/global_index_cloning_service.cpp
@@ -212,14 +212,8 @@ void GlobalIndexCloningService::CloningStateMachine::_init(
_metadata.getNss(), indexSpec.getName(), _metadata.getIndexCollectionUUID(), **executor);
auto client = _serviceContext->makeClient("globalIndexClonerServiceInit");
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client.get());
- client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion clientRegion(client);
+
auto opCtx = _serviceContext->makeOperationContext(Client::getCurrent());
auto routingInfo =
diff --git a/src/mongo/db/s/global_index/global_index_inserter.cpp b/src/mongo/db/s/global_index/global_index_inserter.cpp
index ec9d4883d63..8ebadf1dd90 100644
--- a/src/mongo/db/s/global_index/global_index_inserter.cpp
+++ b/src/mongo/db/s/global_index/global_index_inserter.cpp
@@ -77,6 +77,12 @@ void GlobalIndexInserter::processDoc(OperationContext* opCtx,
const auto& skipIdDocResults) {
auto client = service->makeClient("globalIndexInserter");
auto opCtx = service->makeOperationContext(client.get());
+
+ {
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
+
globalIndexInserterPauseAfterReadingSkipCollection.pauseWhileSet(opCtx.get());
if (!skipIdDocResults.empty()) {
diff --git a/src/mongo/db/s/migration_batch_fetcher.h b/src/mongo/db/s/migration_batch_fetcher.h
index f3e732ff2f3..8d9bc13ad13 100644
--- a/src/mongo/db/s/migration_batch_fetcher.h
+++ b/src/mongo/db/s/migration_batch_fetcher.h
@@ -161,6 +161,10 @@ private:
static void onCreateThread(const std::string& threadName) {
Client::initThread(threadName, getGlobalServiceContext(), nullptr);
+ {
+ stdx::lock_guard<Client> lk(cc());
+ cc().setSystemOperationKillableByStepdown(lk);
+ }
}
}; // namespace mongo
diff --git a/src/mongo/db/s/migration_batch_inserter.cpp b/src/mongo/db/s/migration_batch_inserter.cpp
index 66e38650136..37b2c0d07dd 100644
--- a/src/mongo/db/s/migration_batch_inserter.cpp
+++ b/src/mongo/db/s/migration_batch_inserter.cpp
@@ -87,6 +87,10 @@ void runWithoutSession(OperationContext* opCtx, Callable callable) {
void MigrationBatchInserter::onCreateThread(const std::string& threadName) {
Client::initThread(threadName, getGlobalServiceContext(), nullptr);
+ {
+ stdx::lock_guard<Client> lk(cc());
+ cc().setSystemOperationKillableByStepdown(lk);
+ }
}
void MigrationBatchInserter::run(Status status) const try {
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 47558dbb88a..723de44c83b 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -596,6 +596,11 @@ repl::OpTime MigrationDestinationManager::fetchAndApplyBatch(
stdx::thread applicationThread{[&] {
Client::initThread("batchApplier", opCtx->getServiceContext(), nullptr);
+ auto client = Client::getCurrent();
+ {
+ stdx::lock_guard lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
auto executor =
Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor();
auto applicationOpCtx = CancelableOperationContext(
@@ -1094,6 +1099,11 @@ void MigrationDestinationManager::_migrateThread(CancellationToken cancellationT
Client::initThread("migrateThread");
auto client = Client::getCurrent();
+ {
+ stdx::lock_guard lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
+
bool recovering = false;
while (true) {
const auto executor =
@@ -1287,6 +1297,11 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx,
outerOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
{
auto newClient = outerOpCtx->getServiceContext()->makeClient("MigrationCoordinator");
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
+
AlternativeClientRegion acr(newClient);
auto executor =
Grid::get(outerOpCtx->getServiceContext())->getExecutorPool()->getFixedExecutor();
@@ -1347,6 +1362,10 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx,
}
auto newClient = outerOpCtx->getServiceContext()->makeClient("MigrationCoordinator");
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
AlternativeClientRegion acr(newClient);
auto executor =
Grid::get(outerOpCtx->getServiceContext())->getExecutorPool()->getFixedExecutor();
@@ -1616,6 +1635,10 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx,
} else {
outerOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
auto newClient = outerOpCtx->getServiceContext()->makeClient("MigrationCoordinator");
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
AlternativeClientRegion acr(newClient);
auto executor =
Grid::get(outerOpCtx->getServiceContext())->getExecutorPool()->getFixedExecutor();
@@ -1645,6 +1668,10 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx,
outerOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
auto newClient = outerOpCtx->getServiceContext()->makeClient("MigrationCoordinator");
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
AlternativeClientRegion acr(newClient);
auto executor =
Grid::get(outerOpCtx->getServiceContext())->getExecutorPool()->getFixedExecutor();
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 3b98d03285b..066d898b26a 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -743,6 +743,10 @@ void MigrationSourceManager::_cleanup(bool completeMigration) noexcept {
}
auto newClient = _opCtx->getServiceContext()->makeClient("MigrationCoordinator");
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
AlternativeClientRegion acr(newClient);
auto newOpCtxPtr = cc().makeOperationContext();
auto newOpCtx = newOpCtxPtr.get();
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index db69fb0a243..f890664cf4a 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -220,6 +220,12 @@ void retryIdempotentWorkAsPrimaryUntilSuccessOrStepdown(
try {
auto newClient = opCtx->getServiceContext()->makeClient(newClientName);
+
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
+
auto newOpCtx = newClient->makeOperationContext();
AlternativeClientRegion altClient(newClient);
@@ -444,6 +450,10 @@ ExecutorFuture<void> cleanUpRange(ServiceContext* serviceContext,
const RangeDeletionTask& deletionTask) {
return AsyncTry([=]() mutable {
ThreadClient tc(kRangeDeletionThreadName, serviceContext);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto uniqueOpCtx = tc->makeOperationContext();
auto opCtx = uniqueOpCtx.get();
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
@@ -514,6 +524,10 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
return ExecutorFuture<void>(executor)
.then([=] {
ThreadClient tc(kRangeDeletionThreadName, serviceContext);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
uassert(
ErrorCodes::ResumableRangeDeleterDisabled,
@@ -526,6 +540,10 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
return cleanUpRange(serviceContext, executor, deletionTask)
.onError<ErrorCodes::KeyPatternShorterThanBound>([=](Status status) {
ThreadClient tc(kRangeDeletionThreadName, serviceContext);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto uniqueOpCtx = tc->makeOperationContext();
uniqueOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
@@ -547,6 +565,10 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
})
.onError([=](const Status status) {
ThreadClient tc(kRangeDeletionThreadName, serviceContext);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto uniqueOpCtx = tc->makeOperationContext();
auto opCtx = uniqueOpCtx.get();
@@ -586,6 +608,10 @@ void resubmitRangeDeletionsOnStepUp(ServiceContext* serviceContext) {
ExecutorFuture<void>(getMigrationUtilExecutor(serviceContext))
.then([serviceContext] {
ThreadClient tc("ResubmitRangeDeletions", serviceContext);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto opCtx = tc->makeOperationContext();
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
@@ -618,6 +644,10 @@ void resubmitRangeDeletionsOnStepUp(ServiceContext* serviceContext) {
})
.then([serviceContext] {
ThreadClient tc("ResubmitRangeDeletions", serviceContext);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto opCtx = tc->makeOperationContext();
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
@@ -1149,6 +1179,10 @@ ExecutorFuture<void> launchReleaseCriticalSectionOnRecipientFuture(
return ExecutorFuture<void>(executor).then([=] {
ThreadClient tc("releaseRecipientCritSec", serviceContext);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto uniqueOpCtx = tc->makeOperationContext();
auto opCtx = uniqueOpCtx.get();
@@ -1286,6 +1320,10 @@ void asyncRecoverMigrationUntilSuccessOrStepDown(OperationContext* opCtx,
ExecutorFuture<void>{Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()}
.then([svcCtx{opCtx->getServiceContext()}, nss] {
ThreadClient tc{"MigrationRecovery", svcCtx};
+ {
+ stdx::lock_guard<Client> lk{*tc.get()};
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto uniqueOpCtx{tc->makeOperationContext()};
auto opCtx{uniqueOpCtx.get()};
diff --git a/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp b/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp
index 4379554fae3..78431d197df 100644
--- a/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp
+++ b/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp
@@ -195,9 +195,7 @@ void PeriodicShardedIndexConsistencyChecker::_launchShardedIndexConsistencyCheck
"error"_attr = ex.toStatus());
}
},
- Milliseconds(shardedIndexConsistencyCheckIntervalMS),
- // TODO(SERVER-74658): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/);
+ Milliseconds(shardedIndexConsistencyCheckIntervalMS));
_shardedIndexConsistencyChecker = periodicRunner->makeJob(std::move(job));
_shardedIndexConsistencyChecker.start();
}
diff --git a/src/mongo/db/s/persistent_task_queue_test.cpp b/src/mongo/db/s/persistent_task_queue_test.cpp
index 9bc33346ae9..d9dd6e3562c 100644
--- a/src/mongo/db/s/persistent_task_queue_test.cpp
+++ b/src/mongo/db/s/persistent_task_queue_test.cpp
@@ -288,6 +288,11 @@ TEST_F(PersistentTaskQueueTest, TestKilledOperationContextWhileWaitingOnCV) {
auto result = stdx::async(stdx::launch::async, [this, &q, &barrier] {
ThreadClient tc("TestKilledOperationContextWhileWaitingOnCV", getServiceContext());
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
+
auto opCtx = tc->makeOperationContext();
barrier.countDownAndWait();
diff --git a/src/mongo/db/s/query_analysis_writer.cpp b/src/mongo/db/s/query_analysis_writer.cpp
index 0289bec8053..ce48eef2f14 100644
--- a/src/mongo/db/s/query_analysis_writer.cpp
+++ b/src/mongo/db/s/query_analysis_writer.cpp
@@ -290,9 +290,7 @@ void QueryAnalysisWriter::onStartup(OperationContext* opCtx) {
auto opCtx = client->makeOperationContext();
_flushQueries(opCtx.get());
},
- Seconds(gQueryAnalysisWriterIntervalSecs),
- // TODO(SERVER-74662): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/);
+ Seconds(gQueryAnalysisWriterIntervalSecs));
_periodicQueryWriter = periodicRunner->makeJob(std::move(queryWriterJob));
_periodicQueryWriter.start();
@@ -305,9 +303,7 @@ void QueryAnalysisWriter::onStartup(OperationContext* opCtx) {
auto opCtx = client->makeOperationContext();
_flushDiffs(opCtx.get());
},
- Seconds(gQueryAnalysisWriterIntervalSecs),
- // TODO(SERVER-74662): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/);
+ Seconds(gQueryAnalysisWriterIntervalSecs));
_periodicDiffWriter = periodicRunner->makeJob(std::move(diffWriterJob));
_periodicDiffWriter.start();
@@ -318,10 +314,6 @@ void QueryAnalysisWriter::onStartup(OperationContext* opCtx) {
threadPoolOptions.poolName = "QueryAnalysisWriterThreadPool";
threadPoolOptions.onCreateThread = [](const std::string& threadName) {
Client::initThread(threadName.c_str());
-
- // TODO(SERVER-74662): Please revisit if this thread could be made killable.
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
};
_executor = std::make_shared<executor::ThreadPoolTaskExecutor>(
std::make_unique<ThreadPool>(threadPoolOptions),
diff --git a/src/mongo/db/s/range_deleter_service.cpp b/src/mongo/db/s/range_deleter_service.cpp
index 94539b3127b..f392cd34717 100644
--- a/src/mongo/db/s/range_deleter_service.cpp
+++ b/src/mongo/db/s/range_deleter_service.cpp
@@ -133,6 +133,11 @@ void RangeDeleterService::ReadyRangeDeletionsProcessor::_completedRangeDeletion(
void RangeDeleterService::ReadyRangeDeletionsProcessor::_runRangeDeletions() {
Client::initThread(kRangeDeletionThreadName);
{
+ stdx::lock_guard<Client> lk(cc());
+ cc().setSystemOperationKillableByStepdown(lk);
+ }
+
+ {
stdx::lock_guard<Latch> lock(_mutex);
if (_state != kRunning) {
return;
diff --git a/src/mongo/db/s/range_deletion_util.h b/src/mongo/db/s/range_deletion_util.h
index 870284248b6..1042137a184 100644
--- a/src/mongo/db/s/range_deletion_util.h
+++ b/src/mongo/db/s/range_deletion_util.h
@@ -130,6 +130,10 @@ auto withTemporaryOperationContext(Callable&& callable,
const UUID& collectionUUID,
bool writeToRangeDeletionNamespace = false) {
ThreadClient tc(kRangeDeletionThreadName, getGlobalServiceContext());
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto uniqueOpCtx = Client::getCurrent()->makeOperationContext();
auto opCtx = uniqueOpCtx.get();
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
index 519aea48bf8..4fc4e42fbbc 100644
--- a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
@@ -362,12 +362,6 @@ SemiFuture<void> ReshardingCollectionCloner::run(
auto client =
cc().getServiceContext()->makeClient("ReshardingCollectionClonerCleanupClient");
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client.get());
- client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(client);
auto opCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
index c525f2ec38a..c75396abcdf 100644
--- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
@@ -130,6 +130,11 @@ public:
executor::ThreadPoolMock::Options threadPoolOptions;
threadPoolOptions.onCreateThread = [] {
Client::initThread("TestReshardingDonorOplogIterator");
+ auto& client = cc();
+ {
+ stdx::lock_guard<Client> lk(client);
+ client.setSystemOperationKillableByStepdown(lk);
+ }
};
auto executor = executor::makeThreadPoolTestExecutor(
@@ -170,6 +175,8 @@ public:
ServiceContext::UniqueClient makeKillableClient() {
auto client = getServiceContext()->makeClient("ReshardingDonorOplogIterator");
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
return client;
}
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp
index ff981eb549f..116d975da7c 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp
@@ -380,6 +380,11 @@ void clearFilteringMetadata(OperationContext* opCtx,
AsyncTry([svcCtx = opCtx->getServiceContext(), nss] {
ThreadClient tc("TriggerReshardingRecovery", svcCtx);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
+
auto opCtx = tc->makeOperationContext();
onCollectionPlacementVersionMismatch(
opCtx.get(), nss, boost::none /* chunkVersionReceived */);
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
index c25c379139a..bce5a6f8b1f 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
@@ -68,6 +68,10 @@ void runWithTransaction(OperationContext* opCtx,
unique_function<void(OperationContext*)> func) {
AlternativeSessionRegion asr(opCtx);
auto* const client = asr.opCtx()->getClient();
+ {
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
asr.opCtx()->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
AuthorizationSession::get(client)->grantInternalAuthorization(client);
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
index 51632cb40cc..b81e262d0a3 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
@@ -197,12 +197,6 @@ SemiFuture<void> ReshardingOplogApplier::run(
auto client =
cc().getServiceContext()->makeClient("ReshardingOplogApplierCleanupClient");
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client.get());
- client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(client);
auto opCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
index 1fc013463e1..330084774b3 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
@@ -375,6 +375,11 @@ protected:
Client::initThread(threadName.c_str());
auto* client = Client::getCurrent();
AuthorizationSession::get(*client)->grantInternalAuthorization(client);
+
+ {
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
};
auto hookList = std::make_unique<rpc::EgressMetadataHookList>();
diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
index 487e8717038..35d9161f7e8 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
@@ -156,6 +156,11 @@ public:
Client::initThread(threadName.c_str());
auto* client = Client::getCurrent();
AuthorizationSession::get(*client)->grantInternalAuthorization(client);
+
+ {
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
};
auto hookList = std::make_unique<rpc::EgressMetadataHookList>();
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
index b1e91410aab..97a9788ef62 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
@@ -164,13 +164,6 @@ ExecutorFuture<void> ReshardingOplogFetcher::_reschedule(
_reshardingUUID.toString(),
_donorShard.toString()),
_service());
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client.get());
- client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
return iterate(client.get(), factory);
})
.then([executor, cancelToken](bool moreToCome) {
@@ -335,12 +328,6 @@ bool ReshardingOplogFetcher::consume(Client* client,
auto opCtxRaii = factory.makeOperationContext(client.get());
auto opCtx = opCtxRaii.get();
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client.get());
- client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
// Noting some possible optimizations:
//
// * Batch more inserts into larger storage transactions.
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
index 500c4a2f990..8a015e52af5 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
@@ -307,12 +307,6 @@ SemiFuture<void> ReshardingTxnCloner::run(
auto client =
cc().getServiceContext()->makeClient("ReshardingTxnClonerCleanupClient");
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client.get());
- client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(client);
auto opCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
index 418657ec6b3..aae776a4029 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
@@ -341,6 +341,11 @@ protected:
Client::initThread(threadName.c_str());
auto* client = Client::getCurrent();
AuthorizationSession::get(*client)->grantInternalAuthorization(client);
+
+ {
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
};
auto hookList = std::make_unique<rpc::EgressMetadataHookList>();
diff --git a/src/mongo/db/s/resharding_test_commands.cpp b/src/mongo/db/s/resharding_test_commands.cpp
index df291db2f00..74688928784 100644
--- a/src/mongo/db/s/resharding_test_commands.cpp
+++ b/src/mongo/db/s/resharding_test_commands.cpp
@@ -72,6 +72,11 @@ public:
Client::initThread(threadName.c_str());
auto* client = Client::getCurrent();
AuthorizationSession::get(*client)->grantInternalAuthorization(client);
+
+ {
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
};
auto metrics = ReshardingMetrics::makeInstance(
diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp
index 9ea5e089ef4..ae9eec23cc2 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination.cpp
@@ -262,6 +262,12 @@ void SessionCatalogMigrationDestination::join() {
void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(ServiceContext* service) {
Client::initThread(
"sessionCatalogMigrationProducer-" + _migrationSessionId.toString(), service, nullptr);
+ auto client = Client::getCurrent();
+ {
+ stdx::lock_guard lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
+
bool oplogDrainedAfterCommiting = false;
ProcessOplogResult lastResult;
repl::OpTime lastOpTimeWaited;
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
index 78e4ceb8f98..ecc4832caa3 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
@@ -178,6 +178,11 @@ SharedSemiFuture<void> recoverRefreshDbVersion(OperationContext* opCtx,
serviceCtx = opCtx->getServiceContext(),
forwardableOpMetadata = ForwardableOperationMetadata(opCtx)] {
ThreadClient tc("DbMetadataRefreshThread", serviceCtx);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
+
const auto opCtxHolder =
CancelableOperationContext(tc->makeOperationContext(), cancellationToken, executor);
auto opCtx = opCtxHolder.get();
@@ -362,6 +367,10 @@ SharedSemiFuture<void> recoverRefreshCollectionPlacementVersion(
return ExecutorFuture<void>(executor)
.then([=] {
ThreadClient tc("RecoverRefreshThread", serviceContext);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
if (MONGO_unlikely(hangInRecoverRefreshThread.shouldFail())) {
hangInRecoverRefreshThread.pauseWhileSet();
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index fd654f84dee..30eed2fbf81 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -457,13 +457,6 @@ SemiFuture<CollectionAndChangedChunks> ShardServerCatalogCacheLoader::getChunksS
ThreadClient tc("ShardServerCatalogCacheLoader::getChunksSince",
getGlobalServiceContext());
auto context = _contexts.makeOperationContext(*tc);
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
{
// We may have missed an OperationContextGroup interrupt since this operation
// began but before the OperationContext was added to the group. So we'll check
@@ -505,12 +498,6 @@ SemiFuture<DatabaseType> ShardServerCatalogCacheLoader::getDatabase(StringData d
getGlobalServiceContext());
auto context = _contexts.makeOperationContext(*tc);
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
{
// We may have missed an OperationContextGroup interrupt since this operation began
// but before the OperationContext was added to the group. So we'll check that we're
@@ -1085,12 +1072,6 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString
getGlobalServiceContext());
auto context = _contexts.makeOperationContext(*tc);
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
bool taskFinished = false;
bool inShutdown = false;
try {
@@ -1170,12 +1151,6 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
ThreadClient tc("ShardServerCatalogCacheLoader::runDbTasks", getGlobalServiceContext());
auto context = _contexts.makeOperationContext(*tc);
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
bool taskFinished = false;
bool inShutdown = false;
try {
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index 2bfe11c2055..600b57ecf64 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -897,6 +897,12 @@ void runTransactionOnShardingCatalog(OperationContext* opCtx,
// passed Operation context. We opt for creating a new one to avoid any possible side
// effects.
auto newClient = opCtx->getServiceContext()->makeClient("ShardingCatalogTransaction");
+
+ {
+ stdx::lock_guard<Client> lk(*newClient.get());
+ newClient.get()->setSystemOperationKillableByStepdown(lk);
+ }
+
AuthorizationSession::get(newClient.get())->grantInternalAuthorization(newClient.get());
AlternativeClientRegion acr(newClient);
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index 2573113c24b..5b3ff46f4c8 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -230,13 +230,6 @@ private:
ThreadClient tc("updateShardIdentityConfigString", _serviceContext);
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
auto opCtx = tc->makeOperationContext();
ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), update);
} catch (const ExceptionForCat<ErrorCategory::ShutdownError>& e) {
diff --git a/src/mongo/db/s/shardsvr_move_primary_enter_critical_section_command.cpp b/src/mongo/db/s/shardsvr_move_primary_enter_critical_section_command.cpp
index cf84fd67a6c..0abdf48d19e 100644
--- a/src/mongo/db/s/shardsvr_move_primary_enter_critical_section_command.cpp
+++ b/src/mongo/db/s/shardsvr_move_primary_enter_critical_section_command.cpp
@@ -69,6 +69,10 @@ public:
// cause the failure of the second operation.
auto newClient = getGlobalServiceContext()->makeClient(
"ShardsvrMovePrimaryEnterCriticalSection");
+ {
+ stdx::lock_guard<Client> lk(*newClient);
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
AlternativeClientRegion acr(newClient);
auto newOpCtx = CancelableOperationContext(
cc().makeOperationContext(),
diff --git a/src/mongo/db/s/shardsvr_move_primary_exit_critical_section_command.cpp b/src/mongo/db/s/shardsvr_move_primary_exit_critical_section_command.cpp
index 4a1bf387c6a..81a0f4bd8aa 100644
--- a/src/mongo/db/s/shardsvr_move_primary_exit_critical_section_command.cpp
+++ b/src/mongo/db/s/shardsvr_move_primary_exit_critical_section_command.cpp
@@ -69,6 +69,10 @@ public:
// solution is to use an alternative client as well as a new operation context.
auto newClient =
getGlobalServiceContext()->makeClient("ShardsvrMovePrimaryExitCriticalSection");
+ {
+ stdx::lock_guard<Client> lk(*newClient);
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
AlternativeClientRegion acr(newClient);
auto newOpCtx = CancelableOperationContext(
cc().makeOperationContext(),
diff --git a/src/mongo/db/s/shardsvr_move_range_command.cpp b/src/mongo/db/s/shardsvr_move_range_command.cpp
index d7840b3b8b1..9fdacc8186c 100644
--- a/src/mongo/db/s/shardsvr_move_range_command.cpp
+++ b/src/mongo/db/s/shardsvr_move_range_command.cpp
@@ -109,6 +109,10 @@ public:
// executor thread after setting the shared state as ready.
auto scopedMigrationLocal(std::move(scopedMigration));
ThreadClient tc("MoveChunk", serviceContext);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto uniqueOpCtx = Client::getCurrent()->makeOperationContext();
auto executorOpCtx = uniqueOpCtx.get();
Status status = {ErrorCodes::InternalError, "Uninitialized value"};
diff --git a/src/mongo/db/s/shardsvr_participant_block_command.cpp b/src/mongo/db/s/shardsvr_participant_block_command.cpp
index 88aff1d7444..ad7f003df9b 100644
--- a/src/mongo/db/s/shardsvr_participant_block_command.cpp
+++ b/src/mongo/db/s/shardsvr_participant_block_command.cpp
@@ -128,6 +128,10 @@ public:
if (txnParticipant) {
auto newClient =
getGlobalServiceContext()->makeClient("ShardSvrParticipantBlockCmdClient");
+ {
+ stdx::lock_guard<Client> lk(*newClient);
+ newClient->setSystemOperationKillableByStepdown(lk);
+ }
AlternativeClientRegion acr(newClient);
auto cancelableOperationContext = CancelableOperationContext(
cc().makeOperationContext(),
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index bbd3ea5c61e..39892137e9a 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -84,13 +84,6 @@ ExecutorFuture<void> waitForMajorityWithHangFailpoint(
failpoint.pauseWhileSet();
} else {
ThreadClient tc(failPointName, service);
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
auto opCtx = tc->makeOperationContext();
failpoint.pauseWhileSet(opCtx.get());
}
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.h b/src/mongo/db/s/transaction_coordinator_futures_util.h
index 8ac1ab15aa5..1dd0554ee63 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.h
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.h
@@ -101,12 +101,6 @@ public:
ThreadClient tc("TransactionCoordinator", _serviceContext);
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
auto uniqueOpCtxIter = [&] {
stdx::lock_guard lk(_mutex);
return _activeOpContexts.emplace(_activeOpContexts.begin(),
diff --git a/src/mongo/db/session/kill_sessions_local.cpp b/src/mongo/db/session/kill_sessions_local.cpp
index 71a00432876..b40367cec27 100644
--- a/src/mongo/db/session/kill_sessions_local.cpp
+++ b/src/mongo/db/session/kill_sessions_local.cpp
@@ -192,13 +192,6 @@ void killSessionsAbortAllPreparedTransactions(OperationContext* opCtx) {
void yieldLocksForPreparedTransactions(OperationContext* opCtx) {
// Create a new opCtx because we need an empty locker to refresh the locks.
auto newClient = opCtx->getServiceContext()->makeClient("prepared-txns-yield-locks");
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*newClient.get());
- newClient.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(newClient);
auto newOpCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/session/logical_session_cache_impl.cpp b/src/mongo/db/session/logical_session_cache_impl.cpp
index b9b540222e6..7bc51fd1137 100644
--- a/src/mongo/db/session/logical_session_cache_impl.cpp
+++ b/src/mongo/db/session/logical_session_cache_impl.cpp
@@ -70,19 +70,13 @@ LogicalSessionCacheImpl::LogicalSessionCacheImpl(std::unique_ptr<ServiceLiaison>
_stats.setLastTransactionReaperJobTimestamp(_service->now());
if (!disableLogicalSessionCacheRefresh) {
- _service->scheduleJob(
- {"LogicalSessionCacheRefresh",
- [this](Client* client) { _periodicRefresh(client); },
- Milliseconds(logicalSessionRefreshMillis),
- // TODO(SERVER-74659): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/});
-
- _service->scheduleJob(
- {"LogicalSessionCacheReap",
- [this](Client* client) { _periodicReap(client); },
- Milliseconds(logicalSessionRefreshMillis),
- // TODO(SERVER-74659): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/});
+ _service->scheduleJob({"LogicalSessionCacheRefresh",
+ [this](Client* client) { _periodicRefresh(client); },
+ Milliseconds(logicalSessionRefreshMillis)});
+
+ _service->scheduleJob({"LogicalSessionCacheReap",
+ [this](Client* client) { _periodicReap(client); },
+ Milliseconds(logicalSessionRefreshMillis)});
}
}
diff --git a/src/mongo/db/session/session_catalog_mongod.cpp b/src/mongo/db/session/session_catalog_mongod.cpp
index 633d0dca266..5acb0ce4d2f 100644
--- a/src/mongo/db/session/session_catalog_mongod.cpp
+++ b/src/mongo/db/session/session_catalog_mongod.cpp
@@ -110,13 +110,6 @@ void killSessionTokens(OperationContext* opCtx,
invariant(status);
ThreadClient tc("Kill-Sessions", service);
-
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
auto uniqueOpCtx = tc->makeOperationContext();
const auto opCtx = uniqueOpCtx.get();
const auto catalog = SessionCatalog::get(opCtx);
@@ -521,12 +514,6 @@ void MongoDSessionCatalog::onStepUp(OperationContext* opCtx) {
{
// Create a new opCtx because we need an empty locker to refresh the locks.
auto newClient = opCtx->getServiceContext()->makeClient("restore-prepared-txn");
-
- {
- stdx::lock_guard<Client> lk(*newClient.get());
- newClient.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(newClient);
for (const auto& sessionInfo : sessionsToReacquireLocks) {
auto newOpCtx = cc().makeOperationContext();
diff --git a/src/mongo/db/session/session_killer.cpp b/src/mongo/db/session/session_killer.cpp
index 03882219ddb..fa3c6adda25 100644
--- a/src/mongo/db/session/session_killer.cpp
+++ b/src/mongo/db/session/session_killer.cpp
@@ -51,12 +51,6 @@ SessionKiller::SessionKiller(ServiceContext* sc, KillFunc killer)
ThreadClient tc("SessionKiller", sc);
- // TODO(SERVER-74658): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
stdx::unique_lock<Latch> lk(_mutex);
// While we're not in shutdown
diff --git a/src/mongo/db/storage/checkpointer.cpp b/src/mongo/db/storage/checkpointer.cpp
index 6b633075ae7..2595526973e 100644
--- a/src/mongo/db/storage/checkpointer.cpp
+++ b/src/mongo/db/storage/checkpointer.cpp
@@ -73,11 +73,6 @@ void Checkpointer::run() {
ThreadClient tc(name(), getGlobalServiceContext());
LOGV2_DEBUG(22307, 1, "Starting thread", "threadName"_attr = name());
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
while (true) {
auto opCtx = tc->makeOperationContext();
diff --git a/src/mongo/db/storage/control/journal_flusher.cpp b/src/mongo/db/storage/control/journal_flusher.cpp
index 0ed04a9a747..ebfc1c2718a 100644
--- a/src/mongo/db/storage/control/journal_flusher.cpp
+++ b/src/mongo/db/storage/control/journal_flusher.cpp
@@ -80,12 +80,6 @@ void JournalFlusher::run() {
ThreadClient tc(name(), getGlobalServiceContext());
LOGV2_DEBUG(4584701, 1, "starting {name} thread", "name"_attr = name());
- // TODO(SERVER-74657): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
// The thread must not run and access the service context to create an opCtx while unit test
// infrastructure is still being set up and expects sole access to the service context (there is
// no conurrency control on the service context during this phase).
diff --git a/src/mongo/db/storage/disk_space_monitor.cpp b/src/mongo/db/storage/disk_space_monitor.cpp
index eb575d03327..0d55c1256da 100644
--- a/src/mongo/db/storage/disk_space_monitor.cpp
+++ b/src/mongo/db/storage/disk_space_monitor.cpp
@@ -72,11 +72,7 @@ void DiskSpaceMonitor::_start(ServiceContext* svcCtx) {
LOGV2(7333401, "Starting the DiskSpaceMonitor");
invariant(!_job, "DiskSpaceMonitor is already started");
_job = svcCtx->getPeriodicRunner()->makeJob(PeriodicRunner::PeriodicJob{
- "DiskSpaceMonitor",
- [this](Client* client) { _run(client); },
- Seconds(1),
- // TODO(SERVER-74657): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/});
+ "DiskSpaceMonitor", [this](Client* client) { _run(client); }, Seconds(1)});
_job.start();
}
diff --git a/src/mongo/db/storage/flow_control.cpp b/src/mongo/db/storage/flow_control.cpp
index 8f8a0b6cc6b..1866f16af6c 100644
--- a/src/mongo/db/storage/flow_control.cpp
+++ b/src/mongo/db/storage/flow_control.cpp
@@ -150,9 +150,7 @@ FlowControl::FlowControl(ServiceContext* service, repl::ReplicationCoordinator*
[this](Client* client) {
FlowControlTicketholder::get(client->getServiceContext())->refreshTo(getNumTickets());
},
- Seconds(1),
- // TODO(SERVER-74657): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/});
+ Seconds(1)});
_jobAnchor.start();
}
diff --git a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
index 4a9b5a23b35..2388800265d 100644
--- a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
+++ b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp
@@ -113,11 +113,6 @@ void OplogCapMaintainerThread::run() {
LOGV2_DEBUG(5295000, 1, "Oplog cap maintainer thread started", "threadName"_attr = _name);
ThreadClient tc(_name, getGlobalServiceContext());
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
while (!globalInShutdownDeprecated()) {
if (MONGO_unlikely(hangOplogCapMaintainerThread.shouldFail())) {
LOGV2(5095500, "Hanging the oplog cap maintainer thread due to fail point");
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index 3de218ecf49..490990dcbbc 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -1351,9 +1351,7 @@ void StorageEngineImpl::TimestampMonitor::_startup() {
throw;
}
},
- Seconds(1),
- // TODO(SERVER-74657): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/);
+ Seconds(1));
_job = _periodicRunner->makeJob(std::move(job));
_job.start();
diff --git a/src/mongo/db/storage/ticketholder_monitor.cpp b/src/mongo/db/storage/ticketholder_monitor.cpp
index 46ed28310a4..d77c6592ff7 100644
--- a/src/mongo/db/storage/ticketholder_monitor.cpp
+++ b/src/mongo/db/storage/ticketholder_monitor.cpp
@@ -38,11 +38,7 @@ TicketHolderMonitor::TicketHolderMonitor(ServiceContext* svcCtx,
: _readTicketHolder(readTicketHolder),
_writeTicketHolder(writeTicketHolder),
_job(svcCtx->getPeriodicRunner()->makeJob(PeriodicRunner::PeriodicJob{
- "TicketHolderMonitor",
- [this](Client* client) { _run(client); },
- interval,
- // TODO(SERVER-74657): Please revisit if this periodic job could be made killable.
- false /*isKillableByStepdown*/})) {}
+ "TicketHolderMonitor", [this](Client* client) { _run(client); }, interval})) {}
void TicketHolderMonitor::start() {
_job.start();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 2fefa77da8a..9ceb937a61c 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -251,13 +251,6 @@ public:
virtual void run() {
ThreadClient tc(name(), getGlobalServiceContext());
-
- // TODO(SERVER-74657): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*tc.get());
- tc.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
LOGV2_DEBUG(22303, 1, "starting {name} thread", "name"_attr = name());
while (!_shuttingDown.load()) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
index 068e6d15098..320246d2f45 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
@@ -194,12 +194,6 @@ void WiredTigerOplogManager::_updateOplogVisibilityLoop(WiredTigerSessionCache*
WiredTigerRecordStore* oplogRecordStore) {
Client::initThread("OplogVisibilityThread");
- // TODO(SERVER-74657): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(cc());
- cc().setSystemOperationUnkillableByStepdown(lk);
- }
-
// This thread updates the oplog read timestamp, the timestamp used to read from the oplog with
// forward cursors. The timestamp is used to hide oplog entries that might be committed but have
// uncommitted entries behind them. This prevents cursors from seeing 'holes' in the oplog and
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict_test.cpp
index cb94e2a20e3..aabbcbfc369 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict_test.cpp
@@ -69,6 +69,12 @@ public:
kvEngine = makeKVEngine(serviceContext, home.path(), &cs);
opCtx->setRecoveryUnit(std::unique_ptr<RecoveryUnit>(kvEngine->newRecoveryUnit()),
WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork);
+
+ // Sets internal states to pass invariants inside 'wiredTigerPrepareConflictRetry()'.
+ {
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
+ }
}
unittest::TempDir home{"temp"};
diff --git a/src/mongo/db/transaction/internal_transactions_reap_service.cpp b/src/mongo/db/transaction/internal_transactions_reap_service.cpp
index b547a54b6a7..90dc23baffe 100644
--- a/src/mongo/db/transaction/internal_transactions_reap_service.cpp
+++ b/src/mongo/db/transaction/internal_transactions_reap_service.cpp
@@ -114,6 +114,10 @@ void InternalTransactionsReapService::onShutdown() {
void InternalTransactionsReapService::_reapInternalTransactions(ServiceContext* service) try {
ThreadClient tc("reap-internal-transactions", service);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
auto uniqueOpCtx = tc->makeOperationContext();
auto opCtx = uniqueOpCtx.get();
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
diff --git a/src/mongo/db/transaction/transaction_api.cpp b/src/mongo/db/transaction/transaction_api.cpp
index 6c3ed0f9c13..627beebf9ea 100644
--- a/src/mongo/db/transaction/transaction_api.cpp
+++ b/src/mongo/db/transaction/transaction_api.cpp
@@ -417,6 +417,9 @@ void primeInternalClient(Client* client) {
if (as) {
as->grantInternalAuthorization(client);
}
+
+ stdx::lock_guard<Client> lk(*client);
+ client->setSystemOperationKillableByStepdown(lk);
}
Future<DbResponse> DefaultSEPTransactionClientBehaviors::handleRequest(
diff --git a/src/mongo/db/transaction/transaction_participant.cpp b/src/mongo/db/transaction/transaction_participant.cpp
index beb399820e8..25589761451 100644
--- a/src/mongo/db/transaction/transaction_participant.cpp
+++ b/src/mongo/db/transaction/transaction_participant.cpp
@@ -572,13 +572,6 @@ TransactionParticipant::getOldestActiveTimestamp(Timestamp stableTimestamp) {
// the server, and it both blocks this thread from querying config.transactions and waits for
// this thread to terminate.
auto client = getGlobalServiceContext()->makeClient("OldestActiveTxnTimestamp");
-
- // TODO(SERVER-74656): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*client.get());
- client.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(client);
try {
@@ -2020,14 +2013,8 @@ void TransactionParticipant::Participant::_commitSplitPreparedTxnOnPrimary(
auto splitClientOwned = userOpCtx->getServiceContext()->makeClient("tempSplitClient");
auto splitOpCtx = splitClientOwned->makeOperationContext();
-
- // TODO(SERVER-74656): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*splitClientOwned.get());
- splitClientOwned.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(splitClientOwned);
+
std::unique_ptr<MongoDSessionCatalog::Session> checkedOutSession;
repl::UnreplicatedWritesBlock notReplicated(splitOpCtx.get());
@@ -2249,14 +2236,8 @@ void TransactionParticipant::Participant::_abortSplitPreparedTxnOnPrimary(
auto splitClientOwned = opCtx->getServiceContext()->makeClient("tempSplitClient");
auto splitOpCtx = splitClientOwned->makeOperationContext();
-
- // TODO(SERVER-74656): Please revisit if this thread could be made killable.
- {
- stdx::lock_guard<Client> lk(*splitClientOwned.get());
- splitClientOwned.get()->setSystemOperationUnkillableByStepdown(lk);
- }
-
AlternativeClientRegion acr(splitClientOwned);
+
std::unique_ptr<MongoDSessionCatalog::Session> checkedOutSession;
repl::UnreplicatedWritesBlock notReplicated(splitOpCtx.get());
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index b2a94b47ad1..8557eba416b 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -352,6 +352,11 @@ void TTLMonitor::run() {
ThreadClient tc(name(), getGlobalServiceContext());
AuthorizationSession::get(cc())->grantInternalAuthorization(&cc());
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc.get()->setSystemOperationKillableByStepdown(lk);
+ }
+
while (true) {
{
auto startTime = Date_t::now();
@@ -612,6 +617,11 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx,
ExecutorFuture<void>(executor)
.then([serviceContext = opCtx->getServiceContext(), nss, staleInfo] {
ThreadClient tc("TTLShardVersionRecovery", serviceContext);
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
+
auto uniqueOpCtx = tc->makeOperationContext();
auto opCtx = uniqueOpCtx.get();
diff --git a/src/mongo/db/vector_clock_mongod.cpp b/src/mongo/db/vector_clock_mongod.cpp
index 4ea2f7a2a5f..bee92fd5d2d 100644
--- a/src/mongo/db/vector_clock_mongod.cpp
+++ b/src/mongo/db/vector_clock_mongod.cpp
@@ -373,6 +373,12 @@ Future<void> VectorClockMongoD::_doWhileQueueNotEmptyOrError(ServiceContext* ser
}();
ThreadClient tc("VectorClockStateOperation", service);
+
+ {
+ stdx::lock_guard<Client> lk(*tc.get());
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
+
const auto opCtxHolder = tc->makeOperationContext();
auto* const opCtx = opCtxHolder.get();