summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSam Thursfield <sam.thursfield@codethink.co.uk>2015-11-09 12:43:40 +0000
committerSam Thursfield <sam.thursfield@codethink.co.uk>2015-11-09 12:45:51 +0000
commit6e42b112b1cebaa486c926fda759a1144cd0fe45 (patch)
treed7b296c744ab3f5fcca82a6c2fbd141a0d0f76fd
parent4e7932fa1b59a7e1d372c1a26450ee45cfe4535c (diff)
downloadmorph-6e42b112b1cebaa486c926fda759a1144cd0fe45.tar.gz
Only run as many parallel jobs as there are available CPU cores
YBD has been doing this for a while and seems like it is faster as a result. Change-Id: I8f95a53195cdbc2c75c06a8abe9eb089a84b1c1b
-rw-r--r--morphlib/util.py10
1 files changed, 6 insertions, 4 deletions
diff --git a/morphlib/util.py b/morphlib/util.py
index e34799df..ec83df25 100644
--- a/morphlib/util.py
+++ b/morphlib/util.py
@@ -93,10 +93,12 @@ def make_concurrency(cores=None):
'''
n = cpu_count() if cores is None else cores
- # Experimental results (ref. Kinnison) says a factor of 1.5
- # gives about the optimal result for build times, since much of
- # builds are I/O bound, not CPU bound.
- return max(int(n * 1.5 + 0.5), 1)
+ # Research shows that max-jobs should be == cores by default, up to max.
+ # 20 cores.
+ #
+ # http://listmaster.pepperfish.net/pipermail/
+ # baserock-dev-baserock.org/2015-September/013264.html
+ return min(n, 20)
def create_cachedir(settings): # pragma: no cover