summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Maw <jonathan.maw@codethink.co.uk>2018-08-16 18:18:21 +0100
committerTristan Van Berkom <tristan.vanberkom@codethink.co.uk>2018-08-23 14:55:06 +0900
commitf8cfcb7cbfe15e1014d3032d760efc131bae6038 (patch)
tree146d35889986e59ea3492f5e924d1acb1a5ca7fd
parenta53b7e2193fa6b6fc7ea738fe1eb6599e3edfc5e (diff)
downloadbuildstream-f8cfcb7cbfe15e1014d3032d760efc131bae6038.tar.gz
Fix get_cache_size to store it in the ArtifactCache, not the context
-rw-r--r--buildstream/_artifactcache/artifactcache.py67
-rw-r--r--buildstream/_artifactcache/cascache.py4
-rw-r--r--buildstream/_context.py75
-rw-r--r--buildstream/_frontend/app.py10
-rw-r--r--buildstream/_scheduler/queues/buildqueue.py2
-rw-r--r--buildstream/_scheduler/scheduler.py4
-rw-r--r--tests/testutils/artifactshare.py3
7 files changed, 80 insertions, 85 deletions
diff --git a/buildstream/_artifactcache/artifactcache.py b/buildstream/_artifactcache/artifactcache.py
index a7af94719..fad160d1a 100644
--- a/buildstream/_artifactcache/artifactcache.py
+++ b/buildstream/_artifactcache/artifactcache.py
@@ -82,7 +82,6 @@ class ArtifactCache():
self.extractdir = os.path.join(context.artifactdir, 'extract')
self.tmpdir = os.path.join(context.artifactdir, 'tmp')
- self.max_size = context.cache_quota
self.estimated_size = None
self.global_remote_specs = []
@@ -90,6 +89,8 @@ class ArtifactCache():
self._local = False
self.cache_size = None
+ self.cache_quota = None
+ self.cache_lower_threshold = None
os.makedirs(self.extractdir, exist_ok=True)
os.makedirs(self.tmpdir, exist_ok=True)
@@ -227,7 +228,7 @@ class ArtifactCache():
def clean(self):
artifacts = self.list_artifacts()
- while self.calculate_cache_size() >= self.context.cache_quota - self.context.cache_lower_threshold:
+ while self.calculate_cache_size() >= self.cache_quota - self.cache_lower_threshold:
try:
to_remove = artifacts.pop(0)
except IndexError:
@@ -241,7 +242,7 @@ class ArtifactCache():
"Please increase the cache-quota in {}."
.format(self.context.config_origin or default_conf))
- if self.calculate_cache_size() > self.context.cache_quota:
+ if self.calculate_cache_size() > self.cache_quota:
raise ArtifactError("Cache too full. Aborting.",
detail=detail,
reason="cache-too-full")
@@ -551,6 +552,66 @@ class ArtifactCache():
def _set_cache_size(self, cache_size):
self.estimated_size = cache_size
+ def _calculate_cache_quota(self):
+ # Headroom intended to give BuildStream a bit of leeway.
+ # This acts as the minimum size of cache_quota and also
+ # is taken from the user requested cache_quota.
+ #
+ if 'BST_TEST_SUITE' in os.environ:
+ headroom = 0
+ else:
+ headroom = 2e9
+
+ artifactdir_volume = self.context.artifactdir
+ while not os.path.exists(artifactdir_volume):
+ artifactdir_volume = os.path.dirname(artifactdir_volume)
+
+ try:
+ cache_quota = utils._parse_size(self.context.config_cache_quota, artifactdir_volume)
+ except utils.UtilError as e:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}\nPlease specify the value in bytes or as a % of full disk space.\n"
+ "\nValid values are, for example: 800M 10G 1T 50%\n"
+ .format(str(e))) from e
+
+ stat = os.statvfs(artifactdir_volume)
+ available_space = (stat.f_bsize * stat.f_bavail)
+
+ cache_size = self.calculate_cache_size()
+
+ # Ensure system has enough storage for the cache_quota
+ #
+ # If cache_quota is none, set it to the maximum it could possibly be.
+ #
+ # Also check that cache_quota is atleast as large as our headroom.
+ #
+ if cache_quota is None: # Infinity, set to max system storage
+ cache_quota = cache_size + available_space
+ if cache_quota < headroom: # Check minimum
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "Invalid cache quota ({}): ".format(utils._pretty_size(cache_quota)) +
+ "BuildStream requires a minimum cache quota of 2G.")
+ elif cache_quota > cache_size + available_space: # Check maximum
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ ("Your system does not have enough available " +
+ "space to support the cache quota specified.\n" +
+ "You currently have:\n" +
+ "- {used} of cache in use at {local_cache_path}\n" +
+ "- {available} of available system storage").format(
+ used=utils._pretty_size(cache_size),
+ local_cache_path=self.context.artifactdir,
+ available=utils._pretty_size(available_space)))
+
+ # Place a slight headroom (2e9 (2GB) on the cache_quota) into
+ # cache_quota to try and avoid exceptions.
+ #
+ # Of course, we might still end up running out during a build
+ # if we end up writing more than 2G, but hey, this stuff is
+ # already really fuzzy.
+ #
+ self.cache_quota = cache_quota - headroom
+ self.cache_lower_threshold = self.cache_quota / 2
+
# _configured_remote_artifact_cache_specs():
#
diff --git a/buildstream/_artifactcache/cascache.py b/buildstream/_artifactcache/cascache.py
index 5c56e93b7..f323b93a4 100644
--- a/buildstream/_artifactcache/cascache.py
+++ b/buildstream/_artifactcache/cascache.py
@@ -60,6 +60,8 @@ class CASCache(ArtifactCache):
os.makedirs(os.path.join(self.casdir, 'refs', 'heads'), exist_ok=True)
os.makedirs(os.path.join(self.casdir, 'objects'), exist_ok=True)
+ self._calculate_cache_quota()
+
self._enable_push = enable_push
# Per-project list of _CASRemote instances.
@@ -326,7 +328,7 @@ class CASCache(ArtifactCache):
request.write_offset = offset
# max. 64 kB chunks
request.data = f.read(chunk_size)
- request.resource_name = resource_name
+ request.resource_name = resource_name # pylint: disable=cell-var-from-loop
request.finish_write = remaining <= 0
yield request
offset += chunk_size
diff --git a/buildstream/_context.py b/buildstream/_context.py
index cb19ddc45..a94d374cf 100644
--- a/buildstream/_context.py
+++ b/buildstream/_context.py
@@ -64,12 +64,6 @@ class Context():
# The locations from which to push and pull prebuilt artifacts
self.artifact_cache_specs = []
- # The artifact cache quota
- self.cache_quota = None
-
- # The lower threshold to which we aim to reduce the cache size
- self.cache_lower_threshold = None
-
# The directory to store build logs
self.logdir = None
@@ -124,8 +118,8 @@ class Context():
self._workspaces = None
self._log_handle = None
self._log_filename = None
- self._config_cache_quota = None
- self._artifactdir_volume = None
+ self.config_cache_quota = 'infinity'
+ self.artifactdir_volume = None
# load()
#
@@ -185,23 +179,7 @@ class Context():
cache = _yaml.node_get(defaults, Mapping, 'cache')
_yaml.node_validate(cache, ['quota'])
- artifactdir_volume = self.artifactdir
- while not os.path.exists(artifactdir_volume):
- artifactdir_volume = os.path.dirname(artifactdir_volume)
-
- self._artifactdir_volume = artifactdir_volume
-
- # We read and parse the cache quota as specified by the user
- cache_quota = _yaml.node_get(cache, str, 'quota', default_value='infinity')
- try:
- cache_quota = utils._parse_size(cache_quota, self._artifactdir_volume)
- except utils.UtilError as e:
- raise LoadError(LoadErrorReason.INVALID_DATA,
- "{}\nPlease specify the value in bytes or as a % of full disk space.\n"
- "\nValid values are, for example: 800M 10G 1T 50%\n"
- .format(str(e))) from e
-
- self._config_cache_quota = cache_quota
+ self.config_cache_quota = _yaml.node_get(cache, str, 'quota', default_value='infinity')
# Load artifact share configuration
self.artifact_cache_specs = ArtifactCache.specs_from_config_node(defaults)
@@ -525,53 +503,6 @@ class Context():
def get_log_filename(self):
return self._log_filename
- def set_cache_quota(self, cache_size):
- # Headroom intended to give BuildStream a bit of leeway.
- # This acts as the minimum size of cache_quota and also
- # is taken from the user requested cache_quota.
- #
- if 'BST_TEST_SUITE' in os.environ:
- headroom = 0
- else:
- headroom = 2e9
-
- stat = os.statvfs(self._artifactdir_volume)
- available_space = (stat.f_bsize * stat.f_bavail)
-
- # Ensure system has enough storage for the cache_quota
- #
- # If cache_quota is none, set it to the maximum it could possibly be.
- #
- # Also check that cache_quota is atleast as large as our headroom.
- #
- cache_quota = self._config_cache_quota
- if cache_quota is None: # Infinity, set to max system storage
- cache_quota = cache_size + available_space
- if cache_quota < headroom: # Check minimum
- raise LoadError(LoadErrorReason.INVALID_DATA,
- "Invalid cache quota ({}): ".format(utils._pretty_size(cache_quota)) +
- "BuildStream requires a minimum cache quota of 2G.")
- elif cache_quota > cache_size + available_space: # Check maximum
- raise LoadError(LoadErrorReason.INVALID_DATA,
- ("Your system does not have enough available " +
- "space to support the cache quota specified.\n" +
- "You currently have:\n" +
- "- {used} of cache in use at {local_cache_path}\n" +
- "- {available} of available system storage").format(
- used=utils._pretty_size(cache_size),
- local_cache_path=self.artifactdir,
- available=utils._pretty_size(available_space)))
-
- # Place a slight headroom (2e9 (2GB) on the cache_quota) into
- # cache_quota to try and avoid exceptions.
- #
- # Of course, we might still end up running out during a build
- # if we end up writing more than 2G, but hey, this stuff is
- # already really fuzzy.
- #
- self.cache_quota = cache_quota - headroom
- self.cache_lower_threshold = self.cache_quota / 2
-
# _record_message()
#
# Records the message if recording is enabled
diff --git a/buildstream/_frontend/app.py b/buildstream/_frontend/app.py
index 2dd22dc9d..be9aae4e5 100644
--- a/buildstream/_frontend/app.py
+++ b/buildstream/_frontend/app.py
@@ -199,12 +199,10 @@ class App():
option_value = self._main_options.get(cli_option)
if option_value is not None:
setattr(self.context, context_attr, option_value)
-
- Platform.create_instance(self.context)
-
- platform = Platform.get_platform()
- cache_size = platform._artifact_cache.calculate_cache_size()
- self.context.set_cache_quota(cache_size)
+ try:
+ Platform.create_instance(self.context)
+ except BstError as e:
+ self._error_exit(e, "Error instantiating platform")
# Create the logger right before setting the message handler
self.logger = LogLine(self.context,
diff --git a/buildstream/_scheduler/queues/buildqueue.py b/buildstream/_scheduler/queues/buildqueue.py
index 376ef5ae2..1f2caa9a7 100644
--- a/buildstream/_scheduler/queues/buildqueue.py
+++ b/buildstream/_scheduler/queues/buildqueue.py
@@ -61,7 +61,7 @@ class BuildQueue(Queue):
cache = element._get_artifact_cache()
cache._add_artifact_size(artifact_size)
- if cache.get_approximate_cache_size() > self._scheduler.context.cache_quota:
+ if cache.get_approximate_cache_size() > cache.cache_quota:
self._scheduler._check_cache_size_real()
def done(self, job, element, result, success):
diff --git a/buildstream/_scheduler/scheduler.py b/buildstream/_scheduler/scheduler.py
index 5783e5a67..f1c4287e6 100644
--- a/buildstream/_scheduler/scheduler.py
+++ b/buildstream/_scheduler/scheduler.py
@@ -29,6 +29,7 @@ from contextlib import contextmanager
# Local imports
from .resources import Resources, ResourceType
from .jobs import CacheSizeJob, CleanupJob
+from .._platform import Platform
# A decent return code for Scheduler.run()
@@ -314,7 +315,8 @@ class Scheduler():
self._sched()
def _run_cleanup(self, cache_size):
- if cache_size and cache_size < self.context.cache_quota:
+ platform = Platform.get_platform()
+ if cache_size and cache_size < platform.artifactcache.cache_quota:
return
job = CleanupJob(self, 'cleanup', 'cleanup',
diff --git a/tests/testutils/artifactshare.py b/tests/testutils/artifactshare.py
index 76b729e33..05e87a499 100644
--- a/tests/testutils/artifactshare.py
+++ b/tests/testutils/artifactshare.py
@@ -140,6 +140,7 @@ class ArtifactShare():
return statvfs_result(f_blocks=self.total_space,
f_bfree=self.free_space - repo_size,
+ f_bavail=self.free_space - repo_size,
f_bsize=1)
@@ -156,4 +157,4 @@ def create_artifact_share(directory, *, total_space=None, free_space=None):
share.close()
-statvfs_result = namedtuple('statvfs_result', 'f_blocks f_bfree f_bsize')
+statvfs_result = namedtuple('statvfs_result', 'f_blocks f_bfree f_bsize f_bavail')