summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Doc/library/concurrent.futures.rst10
-rw-r--r--Lib/concurrent/futures/thread.py7
-rw-r--r--Lib/test/test_concurrent_futures.py6
-rw-r--r--Misc/NEWS3
4 files changed, 24 insertions, 2 deletions
diff --git a/Doc/library/concurrent.futures.rst b/Doc/library/concurrent.futures.rst
index 08c926a6d4..e487817dfb 100644
--- a/Doc/library/concurrent.futures.rst
+++ b/Doc/library/concurrent.futures.rst
@@ -115,11 +115,19 @@ And::
executor.submit(wait_on_future)
-.. class:: ThreadPoolExecutor(max_workers)
+.. class:: ThreadPoolExecutor(max_workers=None)
An :class:`Executor` subclass that uses a pool of at most *max_workers*
threads to execute calls asynchronously.
+ .. versionchanged:: 3.5
+ If *max_workers* is ``None`` or
+ not given, it will default to the number of processors on the machine,
+ multiplied by ``5``, assuming that :class:`ThreadPoolExecutor` is often
+ used to overlap I/O instead of CPU work and the number of workers
+ should be higher than the number of workers
+ for :class:`ProcessPoolExecutor`.
+
.. _threadpoolexecutor-example:
diff --git a/Lib/concurrent/futures/thread.py b/Lib/concurrent/futures/thread.py
index 8d6081cf15..3ae442d987 100644
--- a/Lib/concurrent/futures/thread.py
+++ b/Lib/concurrent/futures/thread.py
@@ -10,6 +10,7 @@ from concurrent.futures import _base
import queue
import threading
import weakref
+import os
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
@@ -80,13 +81,17 @@ def _worker(executor_reference, work_queue):
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
- def __init__(self, max_workers):
+ def __init__(self, max_workers=None):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
+ if max_workers is None:
+ # Use this number because ThreadPoolExecutor is often
+ # used to overlap I/O instead of CPU work.
+ max_workers = (os.cpu_count() or 1) * 5
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
diff --git a/Lib/test/test_concurrent_futures.py b/Lib/test/test_concurrent_futures.py
index 83184c3267..11560e65ac 100644
--- a/Lib/test/test_concurrent_futures.py
+++ b/Lib/test/test_concurrent_futures.py
@@ -11,6 +11,7 @@ test.support.import_module('threading')
from test.script_helper import assert_python_ok
+import os
import sys
import threading
import time
@@ -444,6 +445,11 @@ class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
+ def test_default_workers(self):
+ executor = self.executor_type()
+ self.assertEqual(executor._max_workers,
+ (os.cpu_count() or 1) * 5)
+
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
diff --git a/Misc/NEWS b/Misc/NEWS
index 71ab268c73..a02e8a6e65 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -129,6 +129,9 @@ Core and Builtins
Library
-------
+- Issue #21527: Add a default number of workers to ThreadPoolExecutor equal
+ to 5 times the number of CPUs. Patch by Claudiu Popa.
+
- Issue #22216: smtplib now resets its state more completely after a quit. The
most obvious consequence of the previous behavior was a STARTTLS failure
during a connect/starttls/quit/connect/starttls sequence.