summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorValentin David <valentin.david@codethink.co.uk>2018-11-15 15:10:14 +0100
committerValentin David <valentin.david@codethink.co.uk>2018-11-28 15:29:52 +0100
commitba9afa9888d036b91954b179a57925884dd29483 (patch)
treee59227db3b6e80242d6c806fe82d0c7f63e5c9bf
parent353b90dda760f320ec5b97c0bb56dce2ed7ea68f (diff)
downloadbuildstream-valentindavid/cache_server_fill_up.tar.gz
Lock cache cleanup in cas servervalentindavid/cache_server_fill_up
Cleaning up in parallel might slow down the cleaning process
-rw-r--r--buildstream/_artifactcache/casserver.py102
1 files changed, 56 insertions, 46 deletions
diff --git a/buildstream/_artifactcache/casserver.py b/buildstream/_artifactcache/casserver.py
index 84d22cc51..ed0266585 100644
--- a/buildstream/_artifactcache/casserver.py
+++ b/buildstream/_artifactcache/casserver.py
@@ -25,6 +25,7 @@ import sys
import tempfile
import uuid
import errno
+import threading
import click
import grpc
@@ -450,11 +451,25 @@ def _digest_from_upload_resource_name(resource_name):
class _CacheCleaner:
+ __cleanup_cache_lock = threading.Lock()
+
def __init__(self, cas, max_head_size, min_head_size=int(2e9)):
self.__cas = cas
self.__max_head_size = max_head_size
self.__min_head_size = min_head_size
+ def __has_space(self, object_size):
+ stats = os.statvfs(self.__cas.casdir)
+ free_disk_space = (stats.f_bavail * stats.f_bsize) - self.__min_head_size
+ total_disk_space = (stats.f_blocks * stats.f_bsize) - self.__min_head_size
+
+ if object_size > total_disk_space:
+ raise ArtifactTooLargeException("Artifact of size: {} is too large for "
+ "the filesystem which mounts the remote "
+ "cache".format(object_size))
+
+ return object_size <= free_disk_space
+
# _clean_up_cache()
#
# Keep removing Least Recently Pushed (LRP) artifacts in a cache until there
@@ -467,51 +482,46 @@ class _CacheCleaner:
# int: The total bytes removed on the filesystem
#
def clean_up(self, object_size):
- stats = os.statvfs(self.__cas.casdir)
- free_disk_space = (stats.f_bavail * stats.f_bsize) - self.__min_head_size
- total_disk_space = (stats.f_blocks * stats.f_bsize) - self.__min_head_size
-
- if object_size > total_disk_space:
- raise ArtifactTooLargeException("Artifact of size: {} is too large for "
- "the filesystem which mounts the remote "
- "cache".format(object_size))
-
- if object_size <= free_disk_space:
- # No need to clean up
+ if self.__has_space(object_size):
return 0
- stats = os.statvfs(self.__cas.casdir)
- target_disk_space = (stats.f_bavail * stats.f_bsize) - self.__max_head_size
-
- # obtain a list of LRP artifacts
- LRP_objects = self.__cas.list_objects()
-
- removed_size = 0 # in bytes
- last_mtime = 0
-
- while object_size - removed_size > target_disk_space:
- try:
- last_mtime, to_remove = LRP_objects.pop(0) # The first element in the list is the LRP artifact
- except IndexError:
- # This exception is caught if there are no more artifacts in the list
- # LRP_artifacts. This means the the artifact is too large for the filesystem
- # so we abort the process
- raise ArtifactTooLargeException("Artifact of size {} is too large for "
- "the filesystem which mounts the remote "
- "cache".format(object_size))
-
- try:
- size = os.stat(to_remove).st_size
- os.unlink(to_remove)
- removed_size += size
- except FileNotFoundError:
- pass
-
- self.__cas.clean_up_refs_until(last_mtime)
-
- if removed_size > 0:
- logging.info("Successfully removed {} bytes from the cache".format(removed_size))
- else:
- logging.info("No artifacts were removed from the cache.")
-
- return removed_size
+ with _CacheCleaner.__cleanup_cache_lock:
+ if self.__has_space(object_size):
+ # Another thread has done the cleanup for us
+ return 0
+
+ stats = os.statvfs(self.__cas.casdir)
+ target_disk_space = (stats.f_bavail * stats.f_bsize) - self.__max_head_size
+
+ # obtain a list of LRP artifacts
+ LRP_objects = self.__cas.list_objects()
+
+ removed_size = 0 # in bytes
+ last_mtime = 0
+
+ while object_size - removed_size > target_disk_space:
+ try:
+ last_mtime, to_remove = LRP_objects.pop(0) # The first element in the list is the LRP artifact
+ except IndexError:
+ # This exception is caught if there are no more artifacts in the list
+ # LRP_artifacts. This means the the artifact is too large for the filesystem
+ # so we abort the process
+ raise ArtifactTooLargeException("Artifact of size {} is too large for "
+ "the filesystem which mounts the remote "
+ "cache".format(object_size))
+
+ try:
+ size = os.stat(to_remove).st_size
+ os.unlink(to_remove)
+ removed_size += size
+ except FileNotFoundError:
+ pass
+
+ self.__cas.clean_up_refs_until(last_mtime)
+
+ if removed_size > 0:
+ logging.info("Successfully removed {} bytes from the cache".format(removed_size))
+ else:
+ logging.info("No artifacts were removed from the cache.")
+
+ return removed_size