diff options
author | Tristan Maat <tristan.maat@codethink.co.uk> | 2019-10-15 11:38:29 +0100 |
---|---|---|
committer | Tristan Maat <tristan.maat@codethink.co.uk> | 2019-12-03 10:45:08 +0000 |
commit | e660e0946ca9a6a58e4aae1d856eb14db99f3f68 (patch) | |
tree | a72c6131fdf92afeab7b271a9a2e4d61d84daad5 | |
parent | cefbcd054f62f7db22361f192efe9555b48518f0 (diff) | |
download | buildstream-e660e0946ca9a6a58e4aae1d856eb14db99f3f68.tar.gz |
casserver.py: Proxy CAS requests to buildbox-casd
-rw-r--r-- | src/buildstream/_cas/casdprocessmanager.py | 8 | ||||
-rw-r--r-- | src/buildstream/_cas/casserver.py | 418 | ||||
-rw-r--r-- | src/buildstream/utils.py | 38 |
3 files changed, 219 insertions, 245 deletions
diff --git a/src/buildstream/_cas/casdprocessmanager.py b/src/buildstream/_cas/casdprocessmanager.py index e4a58d7d5..68bb88ef0 100644 --- a/src/buildstream/_cas/casdprocessmanager.py +++ b/src/buildstream/_cas/casdprocessmanager.py @@ -28,6 +28,7 @@ import grpc from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2_grpc from .._protos.build.buildgrid import local_cas_pb2_grpc +from .._protos.google.bytestream import bytestream_pb2_grpc from .. import _signals, utils from .._exceptions import CASCacheError @@ -177,6 +178,7 @@ class CASDChannel: self._connection_string = connection_string self._start_time = start_time self._casd_channel = None + self._bytestream = None self._casd_cas = None self._local_cas = None @@ -192,6 +194,7 @@ class CASDChannel: time.sleep(0.01) self._casd_channel = grpc.insecure_channel(self._connection_string) + self._bytestream = bytestream_pb2_grpc.ByteStreamStub(self._casd_channel) self._casd_cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self._casd_channel) self._local_cas = local_cas_pb2_grpc.LocalContentAddressableStorageStub(self._casd_channel) @@ -213,6 +216,11 @@ class CASDChannel: self._establish_connection() return self._local_cas + def get_bytestream(self): + if self._casd_channel is None: + self._establish_connection() + return self._bytestream + # is_closed(): # # Return whether this connection is closed or not. diff --git a/src/buildstream/_cas/casserver.py b/src/buildstream/_cas/casserver.py index 06c14601c..e4acbde55 100644 --- a/src/buildstream/_cas/casserver.py +++ b/src/buildstream/_cas/casserver.py @@ -18,23 +18,24 @@ # Jürg Billeter <juerg.billeter@codethink.co.uk> from concurrent import futures -from contextlib import contextmanager from enum import Enum +import contextlib import logging import os import signal import sys -import tempfile import uuid -import errno import grpc from google.protobuf.message import DecodeError import click -from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc -from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc -from .._protos.google.rpc import code_pb2 +from .._protos.build.bazel.remote.execution.v2 import ( + remote_execution_pb2, + remote_execution_pb2_grpc, +) +from .._protos.google.bytestream import bytestream_pb2_grpc +from .._protos.build.buildgrid import local_cas_pb2 from .._protos.buildstream.v2 import ( buildstream_pb2, buildstream_pb2_grpc, @@ -44,10 +45,16 @@ from .._protos.buildstream.v2 import ( source_pb2_grpc, ) -from .. import utils -from .._exceptions import CASError, CASCacheError +# Note: We'd ideally like to avoid imports from the core codebase as +# much as possible, since we're expecting to eventually split this +# module off into its own project. +# +# Not enough that we'd like to duplicate code, but enough that we want +# to make it very obvious what we're using, so in this case we import +# the specific methods we'll be using. +from ..utils import save_file_atomic, _remove_path_with_parents +from .casdprocessmanager import CASDProcessManager -from .cascache import CASCache # The default limit for gRPC messages is 4 MiB. # Limit payload to 1 MiB to leave sufficient headroom for metadata. @@ -102,11 +109,14 @@ def create_server(repo, *, enable_push, quota, index_only, log_level=LogLevel.Le handler.setFormatter(logging.Formatter(fmt="%(levelname)s: %(funcName)s: %(message)s")) logger.addHandler(handler) - cas = CASCache(os.path.abspath(repo), cache_quota=quota, protect_session_blobs=False) + casd_manager = CASDProcessManager( + os.path.abspath(repo), os.path.join(os.path.abspath(repo), "logs"), log_level, quota, False + ) + casd_channel = casd_manager.create_channel() try: - artifactdir = os.path.join(os.path.abspath(repo), "artifacts", "refs") - sourcedir = os.path.join(os.path.abspath(repo), "source_protos") + root = os.path.abspath(repo) + sourcedir = os.path.join(root, "source_protos") # Use max_workers default from Python 3.5+ max_workers = (os.cpu_count() or 1) * 5 @@ -114,21 +124,21 @@ def create_server(repo, *, enable_push, quota, index_only, log_level=LogLevel.Le if not index_only: bytestream_pb2_grpc.add_ByteStreamServicer_to_server( - _ByteStreamServicer(cas, enable_push=enable_push), server + _ByteStreamServicer(casd_channel, enable_push=enable_push), server ) remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server( - _ContentAddressableStorageServicer(cas, enable_push=enable_push), server + _ContentAddressableStorageServicer(casd_channel, enable_push=enable_push), server ) remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(_CapabilitiesServicer(), server) buildstream_pb2_grpc.add_ReferenceStorageServicer_to_server( - _ReferenceStorageServicer(cas, enable_push=enable_push), server + _ReferenceStorageServicer(casd_channel, root, enable_push=enable_push), server ) artifact_pb2_grpc.add_ArtifactServiceServicer_to_server( - _ArtifactServicer(cas, artifactdir, update_cas=not index_only), server + _ArtifactServicer(casd_channel, root, update_cas=not index_only), server ) source_pb2_grpc.add_SourceServiceServicer_to_server(_SourceServicer(sourcedir), server) @@ -143,7 +153,8 @@ def create_server(repo, *, enable_push, quota, index_only, log_level=LogLevel.Le yield server finally: - cas.release_resources() + casd_channel.close() + casd_manager.release_resources() @click.command(short_help="CAS Artifact Server") @@ -212,221 +223,41 @@ def server_main(repo, port, server_key, server_cert, client_certs, enable_push, class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer): - def __init__(self, cas, *, enable_push): + def __init__(self, casd, *, enable_push): super().__init__() - self.cas = cas + self.bytestream = casd.get_bytestream() self.enable_push = enable_push self.logger = logging.getLogger("buildstream._cas.casserver") def Read(self, request, context): self.logger.debug("Reading %s", request.resource_name) - resource_name = request.resource_name - client_digest = _digest_from_download_resource_name(resource_name) - if client_digest is None: - context.set_code(grpc.StatusCode.NOT_FOUND) - return - - if request.read_offset > client_digest.size_bytes: - context.set_code(grpc.StatusCode.OUT_OF_RANGE) - return - - try: - with open(self.cas.objpath(client_digest), "rb") as f: - if os.fstat(f.fileno()).st_size != client_digest.size_bytes: - context.set_code(grpc.StatusCode.NOT_FOUND) - return - - os.utime(f.fileno()) - - if request.read_offset > 0: - f.seek(request.read_offset) - - remaining = client_digest.size_bytes - request.read_offset - while remaining > 0: - chunk_size = min(remaining, _MAX_PAYLOAD_BYTES) - remaining -= chunk_size - - response = bytestream_pb2.ReadResponse() - # max. 64 kB chunks - response.data = f.read(chunk_size) - yield response - except FileNotFoundError: - context.set_code(grpc.StatusCode.NOT_FOUND) + return self.bytestream.Read(request) def Write(self, request_iterator, context): # Note that we can't easily give more information because the # data is stuck in an iterator that will be consumed if read. self.logger.debug("Writing data") - response = bytestream_pb2.WriteResponse() - - if not self.enable_push: - context.set_code(grpc.StatusCode.PERMISSION_DENIED) - return response - - offset = 0 - finished = False - resource_name = None - with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out: - for request in request_iterator: - if finished or request.write_offset != offset: - context.set_code(grpc.StatusCode.FAILED_PRECONDITION) - return response - - if resource_name is None: - # First request - resource_name = request.resource_name - client_digest = _digest_from_upload_resource_name(resource_name) - if client_digest is None: - context.set_code(grpc.StatusCode.NOT_FOUND) - return response - - while True: - if client_digest.size_bytes == 0: - break - - try: - os.posix_fallocate(out.fileno(), 0, client_digest.size_bytes) - break - except OSError as e: - # Multiple upload can happen in the same time - if e.errno != errno.ENOSPC: - raise - - elif request.resource_name: - # If it is set on subsequent calls, it **must** match the value of the first request. - if request.resource_name != resource_name: - context.set_code(grpc.StatusCode.FAILED_PRECONDITION) - return response - - if (offset + len(request.data)) > client_digest.size_bytes: - context.set_code(grpc.StatusCode.FAILED_PRECONDITION) - return response - - out.write(request.data) - offset += len(request.data) - if request.finish_write: - if client_digest.size_bytes != offset: - context.set_code(grpc.StatusCode.FAILED_PRECONDITION) - return response - out.flush() - - try: - digest = self.cas.add_object(path=out.name, link_directly=True) - except CASCacheError as e: - if e.reason == "cache-too-full": - context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) - else: - context.set_code(grpc.StatusCode.INTERNAL) - return response - - if digest.hash != client_digest.hash: - context.set_code(grpc.StatusCode.FAILED_PRECONDITION) - return response - - finished = True - - assert finished - - response.committed_size = offset - return response + return self.bytestream.Write(request_iterator) class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer): - def __init__(self, cas, *, enable_push): + def __init__(self, casd, *, enable_push): super().__init__() - self.cas = cas + self.cas = casd.get_cas() self.enable_push = enable_push self.logger = logging.getLogger("buildstream._cas.casserver") def FindMissingBlobs(self, request, context): self.logger.info("Finding '%s'", request.blob_digests) - response = remote_execution_pb2.FindMissingBlobsResponse() - for digest in request.blob_digests: - objpath = self.cas.objpath(digest) - try: - os.utime(objpath) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - d = response.missing_blob_digests.add() - d.hash = digest.hash - d.size_bytes = digest.size_bytes - - return response + return self.cas.FindMissingBlobs(request) def BatchReadBlobs(self, request, context): self.logger.info("Reading '%s'", request.digests) - response = remote_execution_pb2.BatchReadBlobsResponse() - batch_size = 0 - - for digest in request.digests: - batch_size += digest.size_bytes - if batch_size > _MAX_PAYLOAD_BYTES: - context.set_code(grpc.StatusCode.INVALID_ARGUMENT) - return response - - blob_response = response.responses.add() - blob_response.digest.hash = digest.hash - blob_response.digest.size_bytes = digest.size_bytes - try: - objpath = self.cas.objpath(digest) - with open(objpath, "rb") as f: - if os.fstat(f.fileno()).st_size != digest.size_bytes: - blob_response.status.code = code_pb2.NOT_FOUND - continue - - os.utime(f.fileno()) - - blob_response.data = f.read(digest.size_bytes) - except FileNotFoundError: - blob_response.status.code = code_pb2.NOT_FOUND - - return response + return self.cas.BatchReadBlobs(request) def BatchUpdateBlobs(self, request, context): self.logger.info("Updating: '%s'", [request.digest for request in request.requests]) - response = remote_execution_pb2.BatchUpdateBlobsResponse() - - if not self.enable_push: - context.set_code(grpc.StatusCode.PERMISSION_DENIED) - return response - - batch_size = 0 - - for blob_request in request.requests: - digest = blob_request.digest - - batch_size += digest.size_bytes - if batch_size > _MAX_PAYLOAD_BYTES: - context.set_code(grpc.StatusCode.INVALID_ARGUMENT) - return response - - blob_response = response.responses.add() - blob_response.digest.hash = digest.hash - blob_response.digest.size_bytes = digest.size_bytes - - if len(blob_request.data) != digest.size_bytes: - blob_response.status.code = code_pb2.FAILED_PRECONDITION - continue - - with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out: - out.write(blob_request.data) - out.flush() - - try: - server_digest = self.cas.add_object(path=out.name) - except CASCacheError as e: - if e.reason == "cache-too-full": - blob_response.status.code = code_pb2.RESOURCE_EXHAUSTED - else: - blob_response.status.code = code_pb2.INTERNAL - continue - - if server_digest.hash != digest.hash: - blob_response.status.code = code_pb2.FAILED_PRECONDITION - - return response + return self.cas.BatchUpdateBlobs(request) class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer): @@ -451,29 +282,80 @@ class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer): class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer): - def __init__(self, cas, *, enable_push): + def __init__(self, casd, cas_root, *, enable_push): super().__init__() - self.cas = cas + self.cas = casd.get_cas() + self.root = cas_root self.enable_push = enable_push self.logger = logging.getLogger("buildstream._cas.casserver") + self.tmpdir = os.path.join(self.root, "tmp") + self.casdir = os.path.join(self.root, "cas") + self.refdir = os.path.join(self.casdir, "refs", "heads") + os.makedirs(self.tmpdir, exist_ok=True) + + # ref_path(): + # + # Get the path to a digest's file. + # + # Args: + # ref - The ref of the digest. + # + # Returns: + # str - The path to the digest's file. + # + def ref_path(self, ref: str) -> str: + return os.path.join(self.refdir, ref) + + # set_ref(): + # + # Create or update a ref with a new digest. + # + # Args: + # ref - The ref of the digest. + # tree - The digest to write. + # + def set_ref(self, ref: str, tree): + ref_path = self.ref_path(ref) + + os.makedirs(os.path.dirname(ref_path), exist_ok=True) + with save_file_atomic(ref_path, "wb", tempdir=self.tmpdir) as f: + f.write(tree.SerializeToString()) + + # resolve_ref(): + # + # Resolve a ref to a digest. + # + # Args: + # ref (str): The name of the ref + # + # Returns: + # (Digest): The digest stored in the ref + # + def resolve_ref(self, ref): + ref_path = self.ref_path(ref) + + with open(ref_path, "rb") as f: + os.utime(ref_path) + + digest = remote_execution_pb2.Digest() + digest.ParseFromString(f.read()) + return digest def GetReference(self, request, context): self.logger.debug("'%s'", request.key) response = buildstream_pb2.GetReferenceResponse() try: - tree = self.cas.resolve_ref(request.key, update_mtime=True) - try: - self.cas.update_tree_mtime(tree) - except FileNotFoundError: - self.cas.remove(request.key) - context.set_code(grpc.StatusCode.NOT_FOUND) - return response - - response.digest.hash = tree.hash - response.digest.size_bytes = tree.size_bytes - except CASError: + digest = self.resolve_ref(request.key) + except FileNotFoundError: + with contextlib.suppress(FileNotFoundError): + _remove_path_with_parents(self.refdir, request.key) + context.set_code(grpc.StatusCode.NOT_FOUND) + return response + + response.digest.hash = digest.hash + response.digest.size_bytes = digest.size_bytes return response @@ -486,7 +368,7 @@ class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer): return response for key in request.keys: - self.cas.set_ref(key, request.digest) + self.set_ref(key, request.digest) return response @@ -500,14 +382,46 @@ class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer): class _ArtifactServicer(artifact_pb2_grpc.ArtifactServiceServicer): - def __init__(self, cas, artifactdir, *, update_cas=True): + def __init__(self, casd, root, *, update_cas=True): super().__init__() - self.cas = cas - self.artifactdir = artifactdir + self.cas = casd.get_cas() + self.local_cas = casd.get_local_cas() + self.root = root + self.artifactdir = os.path.join(root, "artifacts", "refs") self.update_cas = update_cas - os.makedirs(artifactdir, exist_ok=True) self.logger = logging.getLogger("buildstream._cas.casserver") + # object_path(): + # + # Get the path to an object's file. + # + # Args: + # digest - The digest of the object. + # + # Returns: + # str - The path to the object's file. + # + def object_path(self, digest) -> str: + return os.path.join(self.root, "cas", "objects", digest.hash[:2], digest.hash[2:]) + + # resolve_digest(): + # + # Read the directory corresponding to a digest. + # + # Args: + # digest - The digest corresponding to a directory. + # + # Returns: + # remote_execution_pb2.Directory - The directory. + # + # Raises: + # FileNotFoundError - If the digest object doesn't exist. + def resolve_digest(self, digest): + directory = remote_execution_pb2.Directory() + with open(self.object_path(digest), "rb") as f: + directory.ParseFromString(f.read()) + return directory + def GetArtifact(self, request, context): self.logger.info("'%s'", request.cache_key) artifact_path = os.path.join(self.artifactdir, request.cache_key) @@ -518,6 +432,8 @@ class _ArtifactServicer(artifact_pb2_grpc.ArtifactServiceServicer): with open(artifact_path, "rb") as f: artifact.ParseFromString(f.read()) + os.utime(artifact_path) + # Artifact-only servers will not have blobs on their system, # so we can't reasonably update their mtimes. Instead, we exit # early, and let the CAS server deal with its blobs. @@ -536,26 +452,40 @@ class _ArtifactServicer(artifact_pb2_grpc.ArtifactServiceServicer): try: if str(artifact.files): - self.cas.update_tree_mtime(artifact.files) + request = local_cas_pb2.FetchTreeRequest() + request.root_digest.CopyFrom(artifact.files) + request.fetch_file_blobs = True + self.local_cas.FetchTree(request) if str(artifact.buildtree): - # buildtrees might not be there try: - self.cas.update_tree_mtime(artifact.buildtree) - except FileNotFoundError: - pass + request = local_cas_pb2.FetchTreeRequest() + request.root_digest.CopyFrom(artifact.buildtree) + request.fetch_file_blobs = True + self.local_cas.FetchTree(request) + except grpc.RpcError as err: + # buildtrees might not be there + if err.code() != grpc.StatusCode.NOT_FOUND: + raise if str(artifact.public_data): - os.utime(self.cas.objpath(artifact.public_data)) + request = remote_execution_pb2.FindMissingBlobsRequest() + d = request.blob_digests.add() + d.CopyFrom(artifact.public_data) + self.cas.FindMissingBlobs(request) + request = remote_execution_pb2.FindMissingBlobsRequest() for log_file in artifact.logs: - os.utime(self.cas.objpath(log_file.digest)) - - except FileNotFoundError: - os.unlink(artifact_path) - context.abort(grpc.StatusCode.NOT_FOUND, "Artifact files incomplete") - except DecodeError: - context.abort(grpc.StatusCode.NOT_FOUND, "Artifact files not valid") + d = request.blob_digests.add() + d.CopyFrom(log_file.digest) + self.cas.FindMissingBlobs(request) + + except grpc.RpcError as err: + if err.code() == grpc.StatusCode.NOT_FOUND: + os.unlink(artifact_path) + context.abort(grpc.StatusCode.NOT_FOUND, "Artifact files incomplete") + else: + context.abort(grpc.StatusCode.NOT_FOUND, "Artifact files not valid") return artifact @@ -579,7 +509,7 @@ class _ArtifactServicer(artifact_pb2_grpc.ArtifactServiceServicer): # Add the artifact proto to the cas artifact_path = os.path.join(self.artifactdir, request.cache_key) os.makedirs(os.path.dirname(artifact_path), exist_ok=True) - with utils.save_file_atomic(artifact_path, mode="wb") as f: + with save_file_atomic(artifact_path, mode="wb") as f: f.write(artifact.SerializeToString()) return artifact @@ -590,20 +520,18 @@ class _ArtifactServicer(artifact_pb2_grpc.ArtifactServiceServicer): def _check_directory(self, name, digest, context): try: - directory = remote_execution_pb2.Directory() - with open(self.cas.objpath(digest), "rb") as f: - directory.ParseFromString(f.read()) + self.resolve_digest(digest) except FileNotFoundError: - self.logger.warning("Artifact %s specified but no files found (%s)", name, self.cas.objpath(digest)) + self.logger.warning("Artifact %s specified but no files found", name) context.abort(grpc.StatusCode.FAILED_PRECONDITION, "Artifact {} specified but no files found".format(name)) except DecodeError: - self.logger.warning("Artifact %s specified but directory not found (%s)", name, self.cas.objpath(digest)) + self.logger.warning("Artifact %s specified but directory not found", name) context.abort( grpc.StatusCode.FAILED_PRECONDITION, "Artifact {} specified but directory not found".format(name) ) def _check_file(self, name, digest, context): - if not os.path.exists(self.cas.objpath(digest)): + if not os.path.exists(self.object_path(digest)): context.abort(grpc.StatusCode.FAILED_PRECONDITION, "Artifact {} specified but not found".format(name)) @@ -651,7 +579,7 @@ class _SourceServicer(source_pb2_grpc.SourceServiceServicer): def _set_source(self, cache_key, source_proto): path = os.path.join(self.sourcedir, cache_key) os.makedirs(os.path.dirname(path), exist_ok=True) - with utils.save_file_atomic(path, "w+b") as f: + with save_file_atomic(path, "w+b") as f: f.write(source_proto.SerializeToString()) diff --git a/src/buildstream/utils.py b/src/buildstream/utils.py index 0307b8470..5009be338 100644 --- a/src/buildstream/utils.py +++ b/src/buildstream/utils.py @@ -779,6 +779,44 @@ def _is_main_process(): return os.getpid() == _MAIN_PID +# Remove a path and any empty directories leading up to it. +# +# Args: +# basedir - The basedir at which to stop pruning even if +# it is empty. +# path - A path relative to basedir that should be pruned. +# +# Raises: +# FileNotFoundError - if the path itself doesn't exist. +# OSError - if something else goes wrong +# +def _remove_path_with_parents(basedir: Union[Path, str], path: Union[Path, str]): + assert not os.path.isabs(path), "The path ({}) should be relative to basedir ({})".format(path, basedir) + path = os.path.join(basedir, path) + + # Start by removing the path itself + os.unlink(path) + + # Now walk up the directory tree and delete any empty directories + path = os.path.dirname(path) + while path != basedir: + try: + os.rmdir(path) + except FileNotFoundError: + # The parent directory did not exist (race conditions can + # cause this), but it's parent directory might still be + # ready to prune + pass + except OSError as e: + if e.errno == errno.ENOTEMPTY: + # The parent directory was not empty, so we + # cannot prune directories beyond this point + break + raise + + path = os.path.dirname(path) + + # Recursively remove directories, ignoring file permissions as much as # possible. def _force_rmtree(rootpath, **kwargs): |