summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJim MacArthur <jim.macarthur@codethink.co.uk>2018-07-09 13:51:18 +0100
committerJim MacArthur <jim.macarthur@codethink.co.uk>2018-07-09 13:51:18 +0100
commitbc25fa6400a2e2722d30cfbb5f378182d1a1ae07 (patch)
tree2ad4f11ca8535e4854553abb267efbf414d8876a
parentaf993bbb5319cc0568695b9f3ea26b738ef6f76f (diff)
parent9d69068742eadc38084e9745c633ab443372ca66 (diff)
downloadbuildstream-jmac/googlecas_and_virtual_directories_4.tar.gz
Merge branch 'juerg/googlecas' into jmac/virtual_directoriesjmac/googlecas_and_virtual_directories_4
-rw-r--r--.coveragerc2
-rw-r--r--.gitlab-ci.yml8
-rw-r--r--.pylintrc4
-rw-r--r--NEWS4
-rw-r--r--buildstream/_artifactcache/artifactcache.py33
-rw-r--r--buildstream/_artifactcache/cascache.py708
-rw-r--r--buildstream/_artifactcache/casserver.py246
-rw-r--r--buildstream/_artifactcache/ostreecache.py377
-rw-r--r--buildstream/_artifactcache/pushreceive.py903
-rw-r--r--buildstream/_artifactcache/tarcache.py297
-rw-r--r--buildstream/_ostree.py241
-rw-r--r--buildstream/_platform/linux.py4
-rw-r--r--buildstream/_platform/unix.py4
-rw-r--r--buildstream/_project.py2
-rw-r--r--buildstream/_protos/__init__.py0
-rw-r--r--buildstream/_protos/build/__init__.py0
-rw-r--r--buildstream/_protos/build/bazel/__init__.py0
-rw-r--r--buildstream/_protos/build/bazel/remote/__init__.py0
-rw-r--r--buildstream/_protos/build/bazel/remote/execution/__init__.py0
-rw-r--r--buildstream/_protos/build/bazel/remote/execution/v2/__init__.py0
-rw-r--r--buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto1253
-rw-r--r--buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py2466
-rw-r--r--buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py556
-rw-r--r--buildstream/_protos/build/bazel/semver/__init__.py0
-rw-r--r--buildstream/_protos/build/bazel/semver/semver.proto24
-rw-r--r--buildstream/_protos/build/bazel/semver/semver_pb2.py90
-rw-r--r--buildstream/_protos/build/bazel/semver/semver_pb2_grpc.py3
-rw-r--r--buildstream/_protos/buildstream/__init__.py0
-rw-r--r--buildstream/_protos/buildstream/buildstream.proto78
-rw-r--r--buildstream/_protos/buildstream/buildstream_pb2.py325
-rw-r--r--buildstream/_protos/buildstream/buildstream_pb2_grpc.py87
-rw-r--r--buildstream/_protos/google/__init__.py0
-rw-r--r--buildstream/_protos/google/api/__init__.py0
-rw-r--r--buildstream/_protos/google/api/annotations.proto31
-rw-r--r--buildstream/_protos/google/api/annotations_pb2.py46
-rw-r--r--buildstream/_protos/google/api/annotations_pb2_grpc.py3
-rw-r--r--buildstream/_protos/google/api/http.proto313
-rw-r--r--buildstream/_protos/google/api/http_pb2.py243
-rw-r--r--buildstream/_protos/google/api/http_pb2_grpc.py3
-rw-r--r--buildstream/_protos/google/bytestream/__init__.py0
-rw-r--r--buildstream/_protos/google/bytestream/bytestream.proto181
-rw-r--r--buildstream/_protos/google/bytestream/bytestream_pb2.py353
-rw-r--r--buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py160
-rw-r--r--buildstream/_protos/google/longrunning/__init__.py0
-rw-r--r--buildstream/_protos/google/longrunning/operations.proto160
-rw-r--r--buildstream/_protos/google/longrunning/operations_pb2.py391
-rw-r--r--buildstream/_protos/google/longrunning/operations_pb2_grpc.py132
-rw-r--r--buildstream/_protos/google/rpc/__init__.py0
-rw-r--r--buildstream/_protos/google/rpc/status.proto92
-rw-r--r--buildstream/_protos/google/rpc/status_pb2.py88
-rw-r--r--buildstream/_protos/google/rpc/status_pb2_grpc.py3
-rw-r--r--buildstream/_signals.py6
-rw-r--r--doc/Makefile2
-rw-r--r--doc/source/install_artifacts.rst140
-rw-r--r--setup.cfg2
-rwxr-xr-xsetup.py105
-rw-r--r--tests/artifactcache/junctions.py98
-rw-r--r--tests/artifactcache/tar.py82
-rw-r--r--tests/cachekey/project/elements/build1.expected2
-rw-r--r--tests/cachekey/project/elements/build2.expected2
-rw-r--r--tests/cachekey/project/elements/compose1.expected2
-rw-r--r--tests/cachekey/project/elements/compose2.expected2
-rw-r--r--tests/cachekey/project/elements/compose3.expected2
-rw-r--r--tests/cachekey/project/elements/compose4.expected2
-rw-r--r--tests/cachekey/project/elements/compose5.expected2
-rw-r--r--tests/cachekey/project/elements/import1.expected2
-rw-r--r--tests/cachekey/project/elements/import2.expected2
-rw-r--r--tests/cachekey/project/elements/import3.expected2
-rw-r--r--tests/cachekey/project/elements/script1.expected2
-rw-r--r--tests/cachekey/project/sources/bzr1.expected2
-rw-r--r--tests/cachekey/project/sources/git1.expected2
-rw-r--r--tests/cachekey/project/sources/git2.expected2
-rw-r--r--tests/cachekey/project/sources/local1.expected2
-rw-r--r--tests/cachekey/project/sources/local2.expected2
-rw-r--r--tests/cachekey/project/sources/ostree1.expected2
-rw-r--r--tests/cachekey/project/sources/patch1.expected2
-rw-r--r--tests/cachekey/project/sources/patch2.expected2
-rw-r--r--tests/cachekey/project/sources/patch3.expected2
-rw-r--r--tests/cachekey/project/sources/tar1.expected2
-rw-r--r--tests/cachekey/project/sources/tar2.expected2
-rw-r--r--tests/cachekey/project/sources/zip1.expected2
-rw-r--r--tests/cachekey/project/sources/zip2.expected2
-rw-r--r--tests/cachekey/project/target.expected2
-rw-r--r--tests/frontend/pull.py377
-rw-r--r--tests/frontend/push.py501
-rw-r--r--tests/integration/workspace.py1
-rw-r--r--tests/testutils/artifactshare.py98
-rw-r--r--tests/testutils/runcli.py7
88 files changed, 8767 insertions, 2616 deletions
diff --git a/.coveragerc b/.coveragerc
index 6014b7fd0..d81aec1a2 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -6,6 +6,8 @@ include =
omit =
# Omit profiling helper module
*/buildstream/_profile.py
+ # Omit generated code
+ */buildstream/_protos/*
*/.eggs/*
[report]
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 3c6554698..f7c93d658 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,4 +1,4 @@
-image: buildstream/testsuite-debian:8-master-57-be5a863
+image: buildstream/testsuite-debian:8-master-88-4d92c106
cache:
key: "$CI_JOB_NAME-"
@@ -92,18 +92,18 @@ tests-debian-8:
<<: *linux-tests
tests-debian-9:
- image: buildstream/buildstream-debian:master-81-caa5241
+ image: buildstream/buildstream-debian:master-88-4d92c106
<<: *linux-tests
tests-fedora-27:
- image: buildstream/buildstream-fedora:master-56-5d7ee17
+ image: buildstream/buildstream-fedora:master-88-4d92c106
<<: *linux-tests
tests-unix:
# Use fedora here, to a) run a test on fedora and b) ensure that we
# can get rid of ostree - this is not possible with debian-8
- image: buildstream/buildstream-fedora:master-56-5d7ee17
+ image: buildstream/buildstream-fedora:master-88-4d92c106
stage: test
variables:
BST_FORCE_BACKEND: "unix"
diff --git a/.pylintrc b/.pylintrc
index c38309372..93f9eeadf 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -11,7 +11,7 @@ ignore=CVS,tests,doc
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
-ignore-patterns=
+ignore-patterns=.*_pb2.py,.*_pb2_grpc.py
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
@@ -190,7 +190,7 @@ ignored-classes=optparse.Values,thread._local,_thread._local,contextlib.closing,
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
-ignored-modules=pkg_resources,gi.repository
+ignored-modules=pkg_resources,gi.repository,grpc,buildstream._protos.*
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
diff --git a/NEWS b/NEWS
index 0025e3d75..c14e3926f 100644
--- a/NEWS
+++ b/NEWS
@@ -9,6 +9,10 @@ buildstream 1.1.4
o Added new simple `make` element
+ o Switch to Remote Execution CAS-based artifact cache on all platforms.
+
+ Artifact servers need to be migrated.
+
=================
buildstream 1.1.3
diff --git a/buildstream/_artifactcache/artifactcache.py b/buildstream/_artifactcache/artifactcache.py
index 2d745f8c2..f33b112bc 100644
--- a/buildstream/_artifactcache/artifactcache.py
+++ b/buildstream/_artifactcache/artifactcache.py
@@ -35,22 +35,38 @@ from .. import _yaml
# push (bool): Whether we should attempt to push artifacts to this cache,
# in addition to pulling from it.
#
-class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push')):
+class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert client_key client_cert')):
# _new_from_config_node
#
# Creates an ArtifactCacheSpec() from a YAML loaded node
#
@staticmethod
- def _new_from_config_node(spec_node):
- _yaml.node_validate(spec_node, ['url', 'push'])
+ def _new_from_config_node(spec_node, basedir=None):
+ _yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert'])
url = _yaml.node_get(spec_node, str, 'url')
push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
if not url:
provenance = _yaml.node_get_provenance(spec_node)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: empty artifact cache URL".format(provenance))
- return ArtifactCacheSpec(url, push)
+
+ server_cert = _yaml.node_get(spec_node, str, 'server-cert', default_value=None)
+ if server_cert and basedir:
+ server_cert = os.path.join(basedir, server_cert)
+
+ client_key = _yaml.node_get(spec_node, str, 'client-key', default_value=None)
+ if client_key and basedir:
+ client_key = os.path.join(basedir, client_key)
+
+ client_cert = _yaml.node_get(spec_node, str, 'client-cert', default_value=None)
+ if client_cert and basedir:
+ client_cert = os.path.join(basedir, client_cert)
+
+ return ArtifactCacheSpec(url, push, server_cert, client_key, client_cert)
+
+
+ArtifactCacheSpec.__new__.__defaults__ = (None, None, None)
# An ArtifactCache manages artifacts.
@@ -138,6 +154,7 @@ class ArtifactCache():
#
# Args:
# config_node (dict): The config block, which may contain the 'artifacts' key
+ # basedir (str): The base directory for relative paths
#
# Returns:
# A list of ArtifactCacheSpec instances.
@@ -146,15 +163,15 @@ class ArtifactCache():
# LoadError, if the config block contains invalid keys.
#
@staticmethod
- def specs_from_config_node(config_node):
+ def specs_from_config_node(config_node, basedir=None):
cache_specs = []
artifacts = config_node.get('artifacts', [])
if isinstance(artifacts, Mapping):
- cache_specs.append(ArtifactCacheSpec._new_from_config_node(artifacts))
+ cache_specs.append(ArtifactCacheSpec._new_from_config_node(artifacts, basedir))
elif isinstance(artifacts, list):
for spec_node in artifacts:
- cache_specs.append(ArtifactCacheSpec._new_from_config_node(spec_node))
+ cache_specs.append(ArtifactCacheSpec._new_from_config_node(spec_node, basedir))
else:
provenance = _yaml.node_get_provenance(config_node, key='artifacts')
raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA,
@@ -356,7 +373,7 @@ class ArtifactCache():
#
def _initialize_remotes(self):
def remote_failed(url, error):
- self._message(MessageType.WARN, "Failed to fetch remote refs from {}: {}".format(url, error))
+ self._message(MessageType.WARN, "Failed to initialize remote {}: {}".format(url, error))
with self.context.timed_activity("Initializing remote caches", silent_nested=True):
self.initialize_remotes(on_failure=remote_failed)
diff --git a/buildstream/_artifactcache/cascache.py b/buildstream/_artifactcache/cascache.py
new file mode 100644
index 000000000..51d63a057
--- /dev/null
+++ b/buildstream/_artifactcache/cascache.py
@@ -0,0 +1,708 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+import hashlib
+import itertools
+import multiprocessing
+import os
+import signal
+import stat
+import tempfile
+from urllib.parse import urlparse
+
+import grpc
+
+from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
+from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
+from .._protos.buildstream import buildstream_pb2, buildstream_pb2_grpc
+
+from .. import _signals, utils
+from .._exceptions import ArtifactError
+
+from . import ArtifactCache
+
+
+# A CASCache manages artifacts in a CAS repository as specified in the
+# Remote Execution API.
+#
+# Args:
+# context (Context): The BuildStream context
+# enable_push (bool): Whether pushing is allowed by the platform
+#
+# Pushing is explicitly disabled by the platform in some cases,
+# like when we are falling back to functioning without using
+# user namespaces.
+#
+class CASCache(ArtifactCache):
+
+ def __init__(self, context, *, enable_push=True):
+ super().__init__(context)
+
+ self.casdir = os.path.join(context.artifactdir, 'cas')
+ os.makedirs(os.path.join(self.casdir, 'tmp'), exist_ok=True)
+
+ self._enable_push = enable_push
+
+ # Per-project list of _CASRemote instances.
+ self._remotes = {}
+
+ self._has_fetch_remotes = False
+ self._has_push_remotes = False
+
+ ################################################
+ # Implementation of abstract methods #
+ ################################################
+ def contains(self, element, key):
+ refpath = self._refpath(self.get_artifact_fullname(element, key))
+
+ # This assumes that the repository doesn't have any dangling pointers
+ return os.path.exists(refpath)
+
+ def extract(self, element, key):
+ ref = self.get_artifact_fullname(element, key)
+
+ tree = self.resolve_ref(ref)
+
+ dest = os.path.join(self.extractdir, element._get_project().name, element.normal_name, tree.hash)
+ if os.path.isdir(dest):
+ # artifact has already been extracted
+ return dest
+
+ os.makedirs(self.extractdir, exist_ok=True)
+
+ with tempfile.TemporaryDirectory(prefix='tmp', dir=self.extractdir) as tmpdir:
+ checkoutdir = os.path.join(tmpdir, ref)
+ self._checkout(checkoutdir, tree)
+
+ os.makedirs(os.path.dirname(dest), exist_ok=True)
+ try:
+ os.rename(checkoutdir, dest)
+ except OSError as e:
+ # With rename it's possible to get either ENOTEMPTY or EEXIST
+ # in the case that the destination path is a not empty directory.
+ #
+ # If rename fails with these errors, another process beat
+ # us to it so just ignore.
+ if e.errno not in [os.errno.ENOTEMPTY, os.errno.EEXIST]:
+ raise ArtifactError("Failed to extract artifact for ref '{}': {}"
+ .format(ref, e)) from e
+
+ return dest
+
+ def commit(self, element, content, keys):
+ refs = [self.get_artifact_fullname(element, key) for key in keys]
+
+ tree = self._create_tree(content)
+
+ for ref in refs:
+ self.set_ref(ref, tree)
+
+ def can_diff(self):
+ return True
+
+ def diff(self, element, key_a, key_b, *, subdir=None):
+ ref_a = self.get_artifact_fullname(element, key_a)
+ ref_b = self.get_artifact_fullname(element, key_b)
+
+ tree_a = self.resolve_ref(ref_a)
+ tree_b = self.resolve_ref(ref_b)
+
+ if subdir:
+ tree_a = self._get_subdir(tree_a, subdir)
+ tree_b = self._get_subdir(tree_b, subdir)
+
+ added = []
+ removed = []
+ modified = []
+
+ self._diff_trees(tree_a, tree_b, added=added, removed=removed, modified=modified)
+
+ return modified, removed, added
+
+ def initialize_remotes(self, *, on_failure=None):
+ remote_specs = self.global_remote_specs
+
+ for project in self.project_remote_specs:
+ remote_specs += self.project_remote_specs[project]
+
+ remote_specs = list(utils._deduplicate(remote_specs))
+
+ remotes = {}
+ q = multiprocessing.Queue()
+ for remote_spec in remote_specs:
+ # Use subprocess to avoid creation of gRPC threads in main BuildStream process
+ p = multiprocessing.Process(target=self._initialize_remote, args=(remote_spec, q))
+
+ try:
+ # Keep SIGINT blocked in the child process
+ with _signals.blocked([signal.SIGINT], ignore=False):
+ p.start()
+
+ error = q.get()
+ p.join()
+ except KeyboardInterrupt:
+ utils._kill_process_tree(p.pid)
+ raise
+
+ if error and on_failure:
+ on_failure(remote_spec.url, error)
+ elif error:
+ raise ArtifactError(error)
+ else:
+ self._has_fetch_remotes = True
+ if remote_spec.push:
+ self._has_push_remotes = True
+
+ remotes[remote_spec.url] = _CASRemote(remote_spec)
+
+ for project in self.context.get_projects():
+ remote_specs = self.global_remote_specs
+ if project in self.project_remote_specs:
+ remote_specs = list(utils._deduplicate(remote_specs + self.project_remote_specs[project]))
+
+ project_remotes = []
+
+ for remote_spec in remote_specs:
+ # Errors are already handled in the loop above,
+ # skip unreachable remotes here.
+ if remote_spec.url not in remotes:
+ continue
+
+ remote = remotes[remote_spec.url]
+ project_remotes.append(remote)
+
+ self._remotes[project] = project_remotes
+
+ def has_fetch_remotes(self, *, element=None):
+ if not self._has_fetch_remotes:
+ # No project has fetch remotes
+ return False
+ elif element is None:
+ # At least one (sub)project has fetch remotes
+ return True
+ else:
+ # Check whether the specified element's project has fetch remotes
+ remotes_for_project = self._remotes[element._get_project()]
+ return bool(remotes_for_project)
+
+ def has_push_remotes(self, *, element=None):
+ if not self._has_push_remotes or not self._enable_push:
+ # No project has push remotes
+ return False
+ elif element is None:
+ # At least one (sub)project has push remotes
+ return True
+ else:
+ # Check whether the specified element's project has push remotes
+ remotes_for_project = self._remotes[element._get_project()]
+ return any(remote.spec.push for remote in remotes_for_project)
+
+ def pull(self, element, key, *, progress=None):
+ ref = self.get_artifact_fullname(element, key)
+
+ project = element._get_project()
+
+ for remote in self._remotes[project]:
+ try:
+ remote.init()
+
+ request = buildstream_pb2.GetArtifactRequest()
+ request.key = ref
+ response = remote.artifact_cache.GetArtifact(request)
+
+ tree = remote_execution_pb2.Digest()
+ tree.hash = response.artifact.hash
+ tree.size_bytes = response.artifact.size_bytes
+
+ self._fetch_directory(remote, tree)
+
+ self.set_ref(ref, tree)
+
+ # no need to pull from additional remotes
+ return True
+
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ raise
+
+ return False
+
+ def link_key(self, element, oldkey, newkey):
+ oldref = self.get_artifact_fullname(element, oldkey)
+ newref = self.get_artifact_fullname(element, newkey)
+
+ tree = self.resolve_ref(oldref)
+
+ self.set_ref(newref, tree)
+
+ def push(self, element, keys):
+ refs = [self.get_artifact_fullname(element, key) for key in keys]
+
+ project = element._get_project()
+
+ push_remotes = [r for r in self._remotes[project] if r.spec.push]
+
+ pushed = False
+
+ for remote in push_remotes:
+ remote.init()
+
+ for ref in refs:
+ tree = self.resolve_ref(ref)
+
+ # Check whether ref is already on the server in which case
+ # there is no need to push the artifact
+ try:
+ request = buildstream_pb2.GetArtifactRequest()
+ request.key = ref
+ response = remote.artifact_cache.GetArtifact(request)
+
+ if response.artifact.hash == tree.hash and response.artifact.size_bytes == tree.size_bytes:
+ # ref is already on the server with the same tree
+ continue
+
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ raise
+
+ missing_blobs = {}
+ required_blobs = self._required_blobs(tree)
+
+ # Limit size of FindMissingBlobs request
+ for required_blobs_group in _grouper(required_blobs, 512):
+ request = remote_execution_pb2.FindMissingBlobsRequest()
+
+ for required_digest in required_blobs_group:
+ d = request.blob_digests.add()
+ d.hash = required_digest.hash
+ d.size_bytes = required_digest.size_bytes
+
+ response = remote.cas.FindMissingBlobs(request)
+ for digest in response.missing_blob_digests:
+ d = remote_execution_pb2.Digest()
+ d.hash = digest.hash
+ d.size_bytes = digest.size_bytes
+ missing_blobs[d.hash] = d
+
+ # Upload any blobs missing on the server
+ for digest in missing_blobs.values():
+ def request_stream():
+ resource_name = os.path.join(digest.hash, str(digest.size_bytes))
+ with open(self.objpath(digest), 'rb') as f:
+ assert os.fstat(f.fileno()).st_size == digest.size_bytes
+ offset = 0
+ finished = False
+ remaining = digest.size_bytes
+ while not finished:
+ chunk_size = min(remaining, 64 * 1024)
+ remaining -= chunk_size
+
+ request = bytestream_pb2.WriteRequest()
+ request.write_offset = offset
+ # max. 64 kB chunks
+ request.data = f.read(chunk_size)
+ request.resource_name = resource_name
+ request.finish_write = remaining <= 0
+ yield request
+ offset += chunk_size
+ finished = request.finish_write
+ response = remote.bytestream.Write(request_stream())
+
+ request = buildstream_pb2.UpdateArtifactRequest()
+ request.keys.append(ref)
+ request.artifact.hash = tree.hash
+ request.artifact.size_bytes = tree.size_bytes
+ remote.artifact_cache.UpdateArtifact(request)
+
+ pushed = True
+
+ return pushed
+
+ ################################################
+ # API Private Methods #
+ ################################################
+
+ # objpath():
+ #
+ # Return the path of an object based on its digest.
+ #
+ # Args:
+ # digest (Digest): The digest of the object
+ #
+ # Returns:
+ # (str): The path of the object
+ #
+ def objpath(self, digest):
+ return os.path.join(self.casdir, 'objects', digest.hash[:2], digest.hash[2:])
+
+ # add_object():
+ #
+ # Hash and write object to CAS.
+ #
+ # Args:
+ # digest (Digest): An optional Digest object to populate
+ # path (str): Path to file to add
+ # buffer (bytes): Byte buffer to add
+ #
+ # Returns:
+ # (Digest): The digest of the added object
+ #
+ # Either `path` or `buffer` must be passed, but not both.
+ #
+ def add_object(self, *, digest=None, path=None, buffer=None):
+ # Exactly one of the two parameters has to be specified
+ assert (path is None) != (buffer is None)
+
+ if digest is None:
+ digest = remote_execution_pb2.Digest()
+
+ try:
+ h = hashlib.sha256()
+ # Always write out new file to avoid corruption if input file is modified
+ with tempfile.NamedTemporaryFile(dir=os.path.join(self.casdir, 'tmp')) as out:
+ if path:
+ with open(path, 'rb') as f:
+ for chunk in iter(lambda: f.read(4096), b""):
+ h.update(chunk)
+ out.write(chunk)
+ else:
+ h.update(buffer)
+ out.write(buffer)
+
+ out.flush()
+
+ digest.hash = h.hexdigest()
+ digest.size_bytes = os.fstat(out.fileno()).st_size
+
+ # Place file at final location
+ objpath = self.objpath(digest)
+ os.makedirs(os.path.dirname(objpath), exist_ok=True)
+ os.link(out.name, objpath)
+
+ except FileExistsError as e:
+ # We can ignore the failed link() if the object is already in the repo.
+ pass
+
+ except OSError as e:
+ raise ArtifactError("Failed to hash object: {}".format(e)) from e
+
+ return digest
+
+ # set_ref():
+ #
+ # Create or replace a ref.
+ #
+ # Args:
+ # ref (str): The name of the ref
+ #
+ def set_ref(self, ref, tree):
+ refpath = self._refpath(ref)
+ os.makedirs(os.path.dirname(refpath), exist_ok=True)
+ with utils.save_file_atomic(refpath, 'wb') as f:
+ f.write(tree.SerializeToString())
+
+ # resolve_ref():
+ #
+ # Resolve a ref to a digest.
+ #
+ # Args:
+ # ref (str): The name of the ref
+ #
+ # Returns:
+ # (Digest): The digest stored in the ref
+ #
+ def resolve_ref(self, ref):
+ refpath = self._refpath(ref)
+
+ try:
+ with open(refpath, 'rb') as f:
+ digest = remote_execution_pb2.Digest()
+ digest.ParseFromString(f.read())
+ return digest
+
+ except FileNotFoundError as e:
+ raise ArtifactError("Attempt to access unavailable artifact: {}".format(e)) from e
+
+ ################################################
+ # Local Private Methods #
+ ################################################
+ def _checkout(self, dest, tree):
+ os.makedirs(dest, exist_ok=True)
+
+ directory = remote_execution_pb2.Directory()
+
+ with open(self.objpath(tree), 'rb') as f:
+ directory.ParseFromString(f.read())
+
+ for filenode in directory.files:
+ # regular file, create hardlink
+ fullpath = os.path.join(dest, filenode.name)
+ os.link(self.objpath(filenode.digest), fullpath)
+
+ if filenode.is_executable:
+ os.chmod(fullpath, stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP |
+ stat.S_IROTH | stat.S_IXOTH)
+
+ for dirnode in directory.directories:
+ fullpath = os.path.join(dest, dirnode.name)
+ self._checkout(fullpath, dirnode.digest)
+
+ for symlinknode in directory.symlinks:
+ # symlink
+ fullpath = os.path.join(dest, symlinknode.name)
+ os.symlink(symlinknode.target, fullpath)
+
+ def _refpath(self, ref):
+ return os.path.join(self.casdir, 'refs', 'heads', ref)
+
+ def _create_tree(self, path, *, digest=None):
+ directory = remote_execution_pb2.Directory()
+
+ for name in sorted(os.listdir(path)):
+ full_path = os.path.join(path, name)
+ mode = os.lstat(full_path).st_mode
+ if stat.S_ISDIR(mode):
+ dirnode = directory.directories.add()
+ dirnode.name = name
+ self._create_tree(full_path, digest=dirnode.digest)
+ elif stat.S_ISREG(mode):
+ filenode = directory.files.add()
+ filenode.name = name
+ self.add_object(path=full_path, digest=filenode.digest)
+ filenode.is_executable = (mode & stat.S_IXUSR) == stat.S_IXUSR
+ elif stat.S_ISLNK(mode):
+ symlinknode = directory.symlinks.add()
+ symlinknode.name = name
+ symlinknode.target = os.readlink(full_path)
+ else:
+ raise ArtifactError("Unsupported file type for {}".format(full_path))
+
+ return self.add_object(digest=digest, buffer=directory.SerializeToString())
+
+ def _get_subdir(self, tree, subdir):
+ head, name = os.path.split(subdir)
+ if head:
+ tree = self._get_subdir(tree, head)
+
+ directory = remote_execution_pb2.Directory()
+
+ with open(self.objpath(tree), 'rb') as f:
+ directory.ParseFromString(f.read())
+
+ for dirnode in directory.directories:
+ if dirnode.name == name:
+ return dirnode.digest
+
+ raise ArtifactError("Subdirectory {} not found".format(name))
+
+ def _diff_trees(self, tree_a, tree_b, *, added, removed, modified, path=""):
+ dir_a = remote_execution_pb2.Directory()
+ dir_b = remote_execution_pb2.Directory()
+
+ if tree_a:
+ with open(self.objpath(tree_a), 'rb') as f:
+ dir_a.ParseFromString(f.read())
+ if tree_b:
+ with open(self.objpath(tree_b), 'rb') as f:
+ dir_b.ParseFromString(f.read())
+
+ a = 0
+ b = 0
+ while a < len(dir_a.files) or b < len(dir_b.files):
+ if b < len(dir_b.files) and (a >= len(dir_a.files) or
+ dir_a.files[a].name > dir_b.files[b].name):
+ added.append(os.path.join(path, dir_b.files[b].name))
+ b += 1
+ elif a < len(dir_a.files) and (b >= len(dir_b.files) or
+ dir_b.files[b].name > dir_a.files[a].name):
+ removed.append(os.path.join(path, dir_a.files[a].name))
+ a += 1
+ else:
+ # File exists in both directories
+ if dir_a.files[a].digest.hash != dir_b.files[b].digest.hash:
+ modified.append(os.path.join(path, dir_a.files[a].name))
+ a += 1
+ b += 1
+
+ a = 0
+ b = 0
+ while a < len(dir_a.directories) or b < len(dir_b.directories):
+ if b < len(dir_b.directories) and (a >= len(dir_a.directories) or
+ dir_a.directories[a].name > dir_b.directories[b].name):
+ self._diff_trees(None, dir_b.directories[b].digest,
+ added=added, removed=removed, modified=modified,
+ path=os.path.join(path, dir_b.directories[b].name))
+ b += 1
+ elif a < len(dir_a.directories) and (b >= len(dir_b.directories) or
+ dir_b.directories[b].name > dir_a.directories[a].name):
+ self._diff_trees(dir_a.directories[a].digest, None,
+ added=added, removed=removed, modified=modified,
+ path=os.path.join(path, dir_a.directories[a].name))
+ a += 1
+ else:
+ # Subdirectory exists in both directories
+ if dir_a.directories[a].digest.hash != dir_b.directories[b].digest.hash:
+ self._diff_trees(dir_a.directories[a].digest, dir_b.directories[b].digest,
+ added=added, removed=removed, modified=modified,
+ path=os.path.join(path, dir_a.directories[a].name))
+ a += 1
+ b += 1
+
+ def _initialize_remote(self, remote_spec, q):
+ try:
+ remote = _CASRemote(remote_spec)
+ remote.init()
+
+ request = buildstream_pb2.StatusRequest()
+ response = remote.artifact_cache.Status(request)
+
+ if remote_spec.push and not response.allow_updates:
+ q.put('Artifact server does not allow push')
+ else:
+ # No error
+ q.put(None)
+
+ except Exception as e: # pylint: disable=broad-except
+ # Whatever happens, we need to return it to the calling process
+ #
+ q.put(str(e))
+
+ def _required_blobs(self, tree):
+ # parse directory, and recursively add blobs
+ d = remote_execution_pb2.Digest()
+ d.hash = tree.hash
+ d.size_bytes = tree.size_bytes
+ yield d
+
+ directory = remote_execution_pb2.Directory()
+
+ with open(self.objpath(tree), 'rb') as f:
+ directory.ParseFromString(f.read())
+
+ for filenode in directory.files:
+ d = remote_execution_pb2.Digest()
+ d.hash = filenode.digest.hash
+ d.size_bytes = filenode.digest.size_bytes
+ yield d
+
+ for dirnode in directory.directories:
+ yield from self._required_blobs(dirnode.digest)
+
+ def _fetch_blob(self, remote, digest, out):
+ resource_name = os.path.join(digest.hash, str(digest.size_bytes))
+ request = bytestream_pb2.ReadRequest()
+ request.resource_name = resource_name
+ request.read_offset = 0
+ for response in remote.bytestream.Read(request):
+ out.write(response.data)
+
+ out.flush()
+ assert digest.size_bytes == os.fstat(out.fileno()).st_size
+
+ def _fetch_directory(self, remote, tree):
+ objpath = self.objpath(tree)
+ if os.path.exists(objpath):
+ # already in local cache
+ return
+
+ with tempfile.NamedTemporaryFile(dir=os.path.join(self.casdir, 'tmp')) as out:
+ self._fetch_blob(remote, tree, out)
+
+ directory = remote_execution_pb2.Directory()
+
+ with open(out.name, 'rb') as f:
+ directory.ParseFromString(f.read())
+
+ for filenode in directory.files:
+ fileobjpath = self.objpath(tree)
+ if os.path.exists(fileobjpath):
+ # already in local cache
+ continue
+
+ with tempfile.NamedTemporaryFile(dir=os.path.join(self.casdir, 'tmp')) as f:
+ self._fetch_blob(remote, filenode.digest, f)
+
+ digest = self.add_object(path=f.name)
+ assert digest.hash == filenode.digest.hash
+
+ for dirnode in directory.directories:
+ self._fetch_directory(remote, dirnode.digest)
+
+ # place directory blob only in final location when we've downloaded
+ # all referenced blobs to avoid dangling references in the repository
+ digest = self.add_object(path=out.name)
+ assert digest.hash == tree.hash
+
+
+# Represents a single remote CAS cache.
+#
+class _CASRemote():
+ def __init__(self, spec):
+ self.spec = spec
+ self._initialized = False
+ self.channel = None
+ self.bytestream = None
+ self.cas = None
+ self.artifact_cache = None
+
+ def init(self):
+ if not self._initialized:
+ url = urlparse(self.spec.url)
+ if url.scheme == 'http':
+ port = url.port or 80
+ self.channel = grpc.insecure_channel('{}:{}'.format(url.hostname, port))
+ elif url.scheme == 'https':
+ port = url.port or 443
+
+ if self.spec.server_cert:
+ with open(self.spec.server_cert, 'rb') as f:
+ server_cert_bytes = f.read()
+ else:
+ server_cert_bytes = None
+
+ if self.spec.client_key:
+ with open(self.spec.client_key, 'rb') as f:
+ client_key_bytes = f.read()
+ else:
+ client_key_bytes = None
+
+ if self.spec.client_cert:
+ with open(self.spec.client_cert, 'rb') as f:
+ client_cert_bytes = f.read()
+ else:
+ client_cert_bytes = None
+
+ credentials = grpc.ssl_channel_credentials(root_certificates=server_cert_bytes,
+ private_key=client_key_bytes,
+ certificate_chain=client_cert_bytes)
+ self.channel = grpc.secure_channel('{}:{}'.format(url.hostname, port), credentials)
+ else:
+ raise ArtifactError("Unsupported URL: {}".format(self.spec.url))
+
+ self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
+ self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
+ self.artifact_cache = buildstream_pb2_grpc.ArtifactCacheStub(self.channel)
+
+ self._initialized = True
+
+
+def _grouper(iterable, n):
+ # pylint: disable=stop-iteration-return
+ while True:
+ yield itertools.chain([next(iterable)], itertools.islice(iterable, n - 1))
diff --git a/buildstream/_artifactcache/casserver.py b/buildstream/_artifactcache/casserver.py
new file mode 100644
index 000000000..bfda8c1e4
--- /dev/null
+++ b/buildstream/_artifactcache/casserver.py
@@ -0,0 +1,246 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+from concurrent import futures
+import os
+import signal
+import sys
+import tempfile
+
+import click
+import grpc
+
+from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
+from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
+from .._protos.buildstream import buildstream_pb2, buildstream_pb2_grpc
+
+from .._exceptions import ArtifactError
+from .._context import Context
+
+from .cascache import CASCache
+
+
+# create_server():
+#
+# Create gRPC CAS artifact server as specified in the Remote Execution API.
+#
+# Args:
+# repo (str): Path to CAS repository
+# enable_push (bool): Whether to allow blob uploads and artifact updates
+#
+def create_server(repo, *, enable_push):
+ context = Context()
+ context.artifactdir = repo
+
+ artifactcache = CASCache(context)
+
+ # Use max_workers default from Python 3.5+
+ max_workers = (os.cpu_count() or 1) * 5
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers))
+
+ bytestream_pb2_grpc.add_ByteStreamServicer_to_server(
+ _ByteStreamServicer(artifactcache, enable_push=enable_push), server)
+
+ remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
+ _ContentAddressableStorageServicer(artifactcache), server)
+
+ buildstream_pb2_grpc.add_ArtifactCacheServicer_to_server(
+ _ArtifactCacheServicer(artifactcache, enable_push=enable_push), server)
+
+ return server
+
+
+@click.command(short_help="CAS Artifact Server")
+@click.option('--port', '-p', type=click.INT, required=True, help="Port number")
+@click.option('--server-key', help="Private server key for TLS (PEM-encoded)")
+@click.option('--server-cert', help="Public server certificate for TLS (PEM-encoded)")
+@click.option('--client-certs', help="Public client certificates for TLS (PEM-encoded)")
+@click.option('--enable-push', default=False, is_flag=True,
+ help="Allow clients to upload blobs and update artifact cache")
+@click.argument('repo')
+def server_main(repo, port, server_key, server_cert, client_certs, enable_push):
+ server = create_server(repo, enable_push=enable_push)
+
+ use_tls = bool(server_key)
+
+ if bool(server_cert) != use_tls:
+ click.echo("ERROR: --server-key and --server-cert are both required for TLS", err=True)
+ sys.exit(-1)
+
+ if client_certs and not use_tls:
+ click.echo("ERROR: --client-certs can only be used with --server-key", err=True)
+ sys.exit(-1)
+
+ if use_tls:
+ # Read public/private key pair
+ with open(server_key, 'rb') as f:
+ server_key_bytes = f.read()
+ with open(server_cert, 'rb') as f:
+ server_cert_bytes = f.read()
+
+ if client_certs:
+ with open(client_certs, 'rb') as f:
+ client_certs_bytes = f.read()
+ else:
+ client_certs_bytes = None
+
+ credentials = grpc.ssl_server_credentials([(server_key_bytes, server_cert_bytes)],
+ root_certificates=client_certs_bytes,
+ require_client_auth=bool(client_certs))
+ server.add_secure_port('[::]:{}'.format(port), credentials)
+ else:
+ server.add_insecure_port('[::]:{}'.format(port))
+
+ # Run artifact server
+ server.start()
+ try:
+ while True:
+ signal.pause()
+ except KeyboardInterrupt:
+ server.stop(0)
+
+
+class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
+ def __init__(self, cas, *, enable_push):
+ super().__init__()
+ self.cas = cas
+ self.enable_push = enable_push
+
+ def Read(self, request, context):
+ resource_name = request.resource_name
+ client_digest = _digest_from_resource_name(resource_name)
+ assert request.read_offset <= client_digest.size_bytes
+
+ with open(self.cas.objpath(client_digest), 'rb') as f:
+ assert os.fstat(f.fileno()).st_size == client_digest.size_bytes
+ if request.read_offset > 0:
+ f.seek(request.read_offset)
+
+ remaining = client_digest.size_bytes - request.read_offset
+ while remaining > 0:
+ chunk_size = min(remaining, 64 * 1024)
+ remaining -= chunk_size
+
+ response = bytestream_pb2.ReadResponse()
+ # max. 64 kB chunks
+ response.data = f.read(chunk_size)
+ yield response
+
+ def Write(self, request_iterator, context):
+ response = bytestream_pb2.WriteResponse()
+
+ if not self.enable_push:
+ context.set_code(grpc.StatusCode.PERMISSION_DENIED)
+ return response
+
+ offset = 0
+ finished = False
+ resource_name = None
+ with tempfile.NamedTemporaryFile(dir=os.path.join(self.cas.casdir, 'tmp')) as out:
+ for request in request_iterator:
+ assert not finished
+ assert request.write_offset == offset
+ if resource_name is None:
+ # First request
+ resource_name = request.resource_name
+ client_digest = _digest_from_resource_name(resource_name)
+ elif request.resource_name:
+ # If it is set on subsequent calls, it **must** match the value of the first request.
+ assert request.resource_name == resource_name
+ out.write(request.data)
+ offset += len(request.data)
+ if request.finish_write:
+ assert client_digest.size_bytes == offset
+ out.flush()
+ digest = self.cas.add_object(path=out.name)
+ assert digest.hash == client_digest.hash
+ finished = True
+
+ assert finished
+
+ response.committed_size = offset
+ return response
+
+
+class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
+ def __init__(self, cas):
+ super().__init__()
+ self.cas = cas
+
+ def FindMissingBlobs(self, request, context):
+ response = remote_execution_pb2.FindMissingBlobsResponse()
+ for digest in request.blob_digests:
+ if not _has_object(self.cas, digest):
+ d = response.missing_blob_digests.add()
+ d.hash = digest.hash
+ d.size_bytes = digest.size_bytes
+ return response
+
+
+class _ArtifactCacheServicer(buildstream_pb2_grpc.ArtifactCacheServicer):
+ def __init__(self, cas, *, enable_push):
+ super().__init__()
+ self.cas = cas
+ self.enable_push = enable_push
+
+ def GetArtifact(self, request, context):
+ response = buildstream_pb2.GetArtifactResponse()
+
+ try:
+ tree = self.cas.resolve_ref(request.key)
+
+ response.artifact.hash = tree.hash
+ response.artifact.size_bytes = tree.size_bytes
+ except ArtifactError:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+
+ return response
+
+ def UpdateArtifact(self, request, context):
+ response = buildstream_pb2.UpdateArtifactResponse()
+
+ if not self.enable_push:
+ context.set_code(grpc.StatusCode.PERMISSION_DENIED)
+ return response
+
+ for key in request.keys:
+ self.cas.set_ref(key, request.artifact)
+
+ return response
+
+ def Status(self, request, context):
+ response = buildstream_pb2.StatusResponse()
+
+ response.allow_updates = self.enable_push
+
+ return response
+
+
+def _digest_from_resource_name(resource_name):
+ parts = resource_name.split('/')
+ assert len(parts) == 2
+ digest = remote_execution_pb2.Digest()
+ digest.hash = parts[0]
+ digest.size_bytes = int(parts[1])
+ return digest
+
+
+def _has_object(cas, digest):
+ objpath = cas.objpath(digest)
+ return os.path.exists(objpath)
diff --git a/buildstream/_artifactcache/ostreecache.py b/buildstream/_artifactcache/ostreecache.py
deleted file mode 100644
index 707a974eb..000000000
--- a/buildstream/_artifactcache/ostreecache.py
+++ /dev/null
@@ -1,377 +0,0 @@
-#
-# Copyright (C) 2017-2018 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library. If not, see <http://www.gnu.org/licenses/>.
-#
-# Authors:
-# Jürg Billeter <juerg.billeter@codethink.co.uk>
-
-import multiprocessing
-import os
-import signal
-import tempfile
-
-from .. import _ostree, _signals, utils
-from .._exceptions import ArtifactError
-from .._ostree import OSTreeError
-
-from . import ArtifactCache
-from .pushreceive import initialize_push_connection
-from .pushreceive import push as push_artifact
-from .pushreceive import PushException
-
-
-# An OSTreeCache manages artifacts in an OSTree repository
-#
-# Args:
-# context (Context): The BuildStream context
-# project (Project): The BuildStream project
-# enable_push (bool): Whether pushing is allowed by the platform
-#
-# Pushing is explicitly disabled by the platform in some cases,
-# like when we are falling back to functioning without using
-# user namespaces.
-#
-class OSTreeCache(ArtifactCache):
-
- def __init__(self, context, *, enable_push):
- super().__init__(context)
-
- self.enable_push = enable_push
-
- ostreedir = os.path.join(context.artifactdir, 'ostree')
- self.repo = _ostree.ensure(ostreedir, False)
-
- # Per-project list of OSTreeRemote instances.
- self._remotes = {}
-
- self._has_fetch_remotes = False
- self._has_push_remotes = False
-
- ################################################
- # Implementation of abstract methods #
- ################################################
- def has_fetch_remotes(self, *, element=None):
- if not self._has_fetch_remotes:
- # No project has push remotes
- return False
- elif element is None:
- # At least one (sub)project has fetch remotes
- return True
- else:
- # Check whether the specified element's project has fetch remotes
- remotes_for_project = self._remotes[element._get_project()]
- return bool(remotes_for_project)
-
- def has_push_remotes(self, *, element=None):
- if not self._has_push_remotes:
- # No project has push remotes
- return False
- elif element is None:
- # At least one (sub)project has push remotes
- return True
- else:
- # Check whether the specified element's project has push remotes
- remotes_for_project = self._remotes[element._get_project()]
- return any(remote.spec.push for remote in remotes_for_project)
-
- def contains(self, element, key):
- ref = self.get_artifact_fullname(element, key)
- return _ostree.exists(self.repo, ref)
-
- def extract(self, element, key):
- ref = self.get_artifact_fullname(element, key)
-
- # resolve ref to checksum
- rev = _ostree.checksum(self.repo, ref)
-
- # Extracting a nonexistent artifact is a bug
- assert rev, "Artifact missing for {}".format(ref)
-
- dest = os.path.join(self.extractdir, element._get_project().name, element.normal_name, rev)
- if os.path.isdir(dest):
- # artifact has already been extracted
- return dest
-
- os.makedirs(self.extractdir, exist_ok=True)
- with tempfile.TemporaryDirectory(prefix='tmp', dir=self.extractdir) as tmpdir:
-
- checkoutdir = os.path.join(tmpdir, ref)
-
- _ostree.checkout(self.repo, checkoutdir, rev, user=True)
-
- os.makedirs(os.path.dirname(dest), exist_ok=True)
- try:
- os.rename(checkoutdir, dest)
- except OSError as e:
- # With rename, it's possible to get either ENOTEMPTY or EEXIST
- # in the case that the destination path is a not empty directory.
- #
- # If rename fails with these errors, another process beat
- # us to it so just ignore.
- if e.errno not in [os.errno.ENOTEMPTY, os.errno.EEXIST]:
- raise ArtifactError("Failed to extract artifact for ref '{}': {}"
- .format(ref, e)) from e
-
- return dest
-
- def commit(self, element, content, keys):
- refs = [self.get_artifact_fullname(element, key) for key in keys]
-
- try:
- _ostree.commit(self.repo, content, refs)
- except OSTreeError as e:
- raise ArtifactError("Failed to commit artifact: {}".format(e)) from e
-
- def can_diff(self):
- return True
-
- def diff(self, element, key_a, key_b, *, subdir=None):
- _, a, _ = self.repo.read_commit(self.get_artifact_fullname(element, key_a))
- _, b, _ = self.repo.read_commit(self.get_artifact_fullname(element, key_b))
-
- if subdir:
- a = a.get_child(subdir)
- b = b.get_child(subdir)
-
- subpath = a.get_path()
- else:
- subpath = '/'
-
- modified, removed, added = _ostree.diff_dirs(a, b)
-
- modified = [os.path.relpath(item.target.get_path(), subpath) for item in modified]
- removed = [os.path.relpath(item.get_path(), subpath) for item in removed]
- added = [os.path.relpath(item.get_path(), subpath) for item in added]
-
- return modified, removed, added
-
- def pull(self, element, key, *, progress=None):
- project = element._get_project()
-
- ref = self.get_artifact_fullname(element, key)
-
- for remote in self._remotes[project]:
- try:
- # fetch the artifact from highest priority remote using the specified cache key
- remote_name = self._ensure_remote(self.repo, remote.pull_url)
- _ostree.fetch(self.repo, remote=remote_name, ref=ref, progress=progress)
- return True
- except OSTreeError:
- # Try next remote
- continue
-
- return False
-
- def link_key(self, element, oldkey, newkey):
- oldref = self.get_artifact_fullname(element, oldkey)
- newref = self.get_artifact_fullname(element, newkey)
-
- # resolve ref to checksum
- rev = _ostree.checksum(self.repo, oldref)
-
- # create additional ref for the same checksum
- _ostree.set_ref(self.repo, newref, rev)
-
- def push(self, element, keys):
- any_pushed = False
-
- project = element._get_project()
-
- push_remotes = [r for r in self._remotes[project] if r.spec.push]
-
- if not push_remotes:
- raise ArtifactError("Push is not enabled for any of the configured remote artifact caches.")
-
- refs = [self.get_artifact_fullname(element, key) for key in keys]
-
- for remote in push_remotes:
- any_pushed |= self._push_to_remote(remote, element, refs)
-
- return any_pushed
-
- def initialize_remotes(self, *, on_failure=None):
- remote_specs = self.global_remote_specs.copy()
-
- for project in self.project_remote_specs:
- remote_specs.extend(self.project_remote_specs[project])
-
- remote_specs = list(utils._deduplicate(remote_specs))
-
- remote_results = {}
-
- # Callback to initialize one remote in a 'multiprocessing' subprocess.
- #
- # We cannot do this in the main process because of the way the tasks
- # run by the main scheduler calls into libostree using
- # fork()-without-exec() subprocesses. OSTree fetch operations in
- # subprocesses hang if fetch operations were previously done in the
- # main process.
- #
- def child_action(url, q):
- try:
- push_url, pull_url = self._initialize_remote(url)
- q.put((None, push_url, pull_url))
- except Exception as e: # pylint: disable=broad-except
- # Whatever happens, we need to return it to the calling process
- #
- q.put((str(e), None, None))
-
- # Kick off all the initialization jobs one by one.
- #
- # Note that we cannot use multiprocessing.Pool here because it's not
- # possible to pickle local functions such as child_action().
- #
- q = multiprocessing.Queue()
- for remote_spec in remote_specs:
- p = multiprocessing.Process(target=child_action, args=(remote_spec.url, q))
-
- try:
-
- # Keep SIGINT blocked in the child process
- with _signals.blocked([signal.SIGINT], ignore=False):
- p.start()
-
- error, push_url, pull_url = q.get()
- p.join()
- except KeyboardInterrupt:
- utils._kill_process_tree(p.pid)
- raise
-
- if error and on_failure:
- on_failure(remote_spec.url, error)
- elif error:
- raise ArtifactError(error)
- else:
- if remote_spec.push and push_url:
- self._has_push_remotes = True
- if pull_url:
- self._has_fetch_remotes = True
-
- remote_results[remote_spec.url] = (push_url, pull_url)
-
- # Prepare push_urls and pull_urls for each project
- for project in self.context.get_projects():
- remote_specs = self.global_remote_specs
- if project in self.project_remote_specs:
- remote_specs = list(utils._deduplicate(remote_specs + self.project_remote_specs[project]))
-
- remotes = []
-
- for remote_spec in remote_specs:
- # Errors are already handled in the loop above,
- # skip unreachable remotes here.
- if remote_spec.url not in remote_results:
- continue
-
- push_url, pull_url = remote_results[remote_spec.url]
-
- if remote_spec.push and not push_url:
- raise ArtifactError("Push enabled but not supported by repo at: {}".format(remote_spec.url))
-
- remote = _OSTreeRemote(remote_spec, pull_url, push_url)
- remotes.append(remote)
-
- self._remotes[project] = remotes
-
- ################################################
- # Local Private Methods #
- ################################################
-
- # _initialize_remote():
- #
- # Do protocol-specific initialization necessary to use a given OSTree
- # remote.
- #
- # The SSH protocol that we use only supports pushing so initializing these
- # involves contacting the remote to find out the corresponding pull URL.
- #
- # Args:
- # url (str): URL of the remote
- #
- # Returns:
- # (str, str): the pull URL and push URL for the remote
- #
- # Raises:
- # ArtifactError: if there was an error
- def _initialize_remote(self, url):
- if url.startswith('ssh://'):
- try:
- push_url = url
- pull_url = initialize_push_connection(url)
- except PushException as e:
- raise ArtifactError(e) from e
- elif url.startswith('/'):
- push_url = pull_url = 'file://' + url
- elif url.startswith('file://'):
- push_url = pull_url = url
- elif url.startswith('http://') or url.startswith('https://'):
- push_url = None
- pull_url = url
- else:
- raise ArtifactError("Unsupported URL: {}".format(url))
-
- return push_url, pull_url
-
- # _ensure_remote():
- #
- # Ensure that our OSTree repo has a remote configured for the given URL.
- # Note that SSH access to remotes is not handled by libostree itself.
- #
- # Args:
- # repo (OSTree.Repo): an OSTree repository
- # pull_url (str): the URL where libostree can pull from the remote
- #
- # Returns:
- # (str): the name of the remote, which can be passed to various other
- # operations implemented by the _ostree module.
- #
- # Raises:
- # OSTreeError: if there was a problem reported by libostree
- def _ensure_remote(self, repo, pull_url):
- remote_name = utils.url_directory_name(pull_url)
- _ostree.configure_remote(repo, remote_name, pull_url)
- return remote_name
-
- def _push_to_remote(self, remote, element, refs):
- with utils._tempdir(dir=self.context.artifactdir, prefix='push-repo-') as temp_repo_dir:
-
- with element.timed_activity("Preparing compressed archive"):
- # First create a temporary archive-z2 repository, we can
- # only use ostree-push with archive-z2 local repo.
- temp_repo = _ostree.ensure(temp_repo_dir, True)
-
- # Now push the ref we want to push into our temporary archive-z2 repo
- for ref in refs:
- _ostree.fetch(temp_repo, remote=self.repo.get_path().get_uri(), ref=ref)
-
- with element.timed_activity("Sending artifact"), \
- element._output_file() as output_file:
- try:
- pushed = push_artifact(temp_repo.get_path().get_path(),
- remote.push_url,
- refs, output_file)
- except PushException as e:
- raise ArtifactError("Failed to push artifact {}: {}".format(refs, e)) from e
-
- return pushed
-
-
-# Represents a single remote OSTree cache.
-#
-class _OSTreeRemote():
- def __init__(self, spec, pull_url, push_url):
- self.spec = spec
- self.pull_url = pull_url
- self.push_url = push_url
diff --git a/buildstream/_artifactcache/pushreceive.py b/buildstream/_artifactcache/pushreceive.py
deleted file mode 100644
index 41dacf33f..000000000
--- a/buildstream/_artifactcache/pushreceive.py
+++ /dev/null
@@ -1,903 +0,0 @@
-#!/usr/bin/python3
-
-# Push OSTree commits to a remote repo, based on Dan Nicholson's ostree-push
-#
-# Copyright (C) 2015 Dan Nicholson <nicholson@endlessm.com>
-# Copyright (C) 2017 Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-import logging
-import multiprocessing
-import os
-import re
-import subprocess
-import sys
-import shutil
-import tarfile
-import tempfile
-from enum import Enum
-from urllib.parse import urlparse
-
-import click
-import gi
-
-from .. import _ostree
-from .. import _signals # nopep8
-from .._profile import Topics, profile_start, profile_end
-
-gi.require_version('OSTree', '1.0')
-# pylint: disable=wrong-import-position,wrong-import-order
-from gi.repository import GLib, Gio, OSTree # nopep8
-
-
-PROTO_VERSION = 1
-HEADER_SIZE = 5
-
-
-# An error occurred
-class PushException(Exception):
- pass
-
-
-# Trying to commit a ref which already exists in remote
-class PushExistsException(Exception):
- pass
-
-
-# Trying to push an artifact that is too large
-class ArtifactTooLargeException(Exception):
- pass
-
-
-class PushCommandType(Enum):
- info = 0
- update = 1
- putobjects = 2
- status = 3
- done = 4
-
-
-def python_to_msg_byteorder(python_byteorder=sys.byteorder):
- if python_byteorder == 'little':
- return 'l'
- elif python_byteorder == 'big':
- return 'B'
- else:
- raise PushException('Unrecognized system byteorder {}'
- .format(python_byteorder))
-
-
-def msg_to_python_byteorder(msg_byteorder):
- if msg_byteorder == 'l':
- return 'little'
- elif msg_byteorder == 'B':
- return 'big'
- else:
- raise PushException('Unrecognized message byteorder {}'
- .format(msg_byteorder))
-
-
-def ostree_object_path(repo, obj):
- repodir = repo.get_path().get_path()
- return os.path.join(repodir, 'objects', obj[0:2], obj[2:])
-
-
-class PushCommand(object):
- def __init__(self, cmdtype, args):
- self.cmdtype = cmdtype
- self.args = args
- self.validate(self.cmdtype, self.args)
- self.variant = GLib.Variant('a{sv}', self.args)
-
- @staticmethod
- def validate(command, args):
- if not isinstance(command, PushCommandType):
- raise PushException('Message command must be PushCommandType')
- if not isinstance(args, dict):
- raise PushException('Message args must be dict')
- # Ensure all values are variants for a{sv} vardict
- for val in args.values():
- if not isinstance(val, GLib.Variant):
- raise PushException('Message args values must be '
- 'GLib.Variant')
-
-
-class PushMessageWriter(object):
- def __init__(self, file, byteorder=sys.byteorder):
- self.file = file
- self.byteorder = byteorder
- self.msg_byteorder = python_to_msg_byteorder(self.byteorder) # 'l' or 'B'
-
- def encode_header(self, cmdtype, size):
- header = self.msg_byteorder.encode() + \
- PROTO_VERSION.to_bytes(1, self.byteorder) + \
- cmdtype.value.to_bytes(1, self.byteorder) + \
- size.to_bytes(2, self.byteorder)
- return header
-
- def encode_message(self, command):
- if not isinstance(command, PushCommand):
- raise PushException('Command must be PushCommand')
- data = command.variant.get_data_as_bytes()
- size = data.get_size()
-
- # Build the header
- header = self.encode_header(command.cmdtype, size)
-
- return header + data.get_data()
-
- def write(self, command):
- msg = self.encode_message(command)
- self.file.write(msg)
- self.file.flush()
-
- def send_hello(self):
- # The 'hello' message is used to check connectivity and discover the
- # cache's pull URL. It's actually transmitted as an empty info request.
- args = {
- 'mode': GLib.Variant('i', 0),
- 'refs': GLib.Variant('a{ss}', {})
- }
- command = PushCommand(PushCommandType.info, args)
- self.write(command)
-
- def send_info(self, repo, refs, pull_url=None):
- cmdtype = PushCommandType.info
- mode = repo.get_mode()
-
- ref_map = {}
- for ref in refs:
- _, checksum = repo.resolve_rev(ref, True)
- if checksum:
- _, has_object = repo.has_object(OSTree.ObjectType.COMMIT, checksum, None)
- if has_object:
- ref_map[ref] = checksum
-
- args = {
- 'mode': GLib.Variant('i', mode),
- 'refs': GLib.Variant('a{ss}', ref_map)
- }
-
- # The server sends this so clients can discover the correct pull URL
- # for this cache without requiring end-users to specify it.
- if pull_url:
- args['pull_url'] = GLib.Variant('s', pull_url)
-
- command = PushCommand(cmdtype, args)
- self.write(command)
-
- def send_update(self, refs):
- cmdtype = PushCommandType.update
- args = {}
- for branch, revs in refs.items():
- args[branch] = GLib.Variant('(ss)', revs)
- command = PushCommand(cmdtype, args)
- self.write(command)
-
- def send_putobjects(self, repo, objects):
-
- logging.info('Sending {} objects'.format(len(objects)))
-
- # Send command saying we're going to send a stream of objects
- cmdtype = PushCommandType.putobjects
- command = PushCommand(cmdtype, {})
- self.write(command)
-
- # Open a TarFile for writing uncompressed tar to a stream
- tar = tarfile.TarFile.open(mode='w|', fileobj=self.file)
- for obj in objects:
-
- logging.info('Sending object {}'.format(obj))
- objpath = ostree_object_path(repo, obj)
- stat = os.stat(objpath)
-
- tar_info = tarfile.TarInfo(obj)
- tar_info.mtime = stat.st_mtime
- tar_info.size = stat.st_size
- with open(objpath, 'rb') as obj_fp:
- tar.addfile(tar_info, obj_fp)
-
- # We're done, close the tarfile
- tar.close()
-
- def send_status(self, result, message=''):
- cmdtype = PushCommandType.status
- args = {
- 'result': GLib.Variant('b', result),
- 'message': GLib.Variant('s', message)
- }
- command = PushCommand(cmdtype, args)
- self.write(command)
-
- def send_done(self):
- command = PushCommand(PushCommandType.done, {})
- self.write(command)
-
-
-class PushMessageReader(object):
- def __init__(self, file, byteorder=sys.byteorder, tmpdir=None):
- self.file = file
- self.byteorder = byteorder
- self.tmpdir = tmpdir
-
- def decode_header(self, header):
- if len(header) != HEADER_SIZE:
- raise Exception('Header is {:d} bytes, not {:d}'.format(len(header), HEADER_SIZE))
- order = msg_to_python_byteorder(chr(header[0]))
- version = int(header[1])
- if version != PROTO_VERSION:
- raise Exception('Unsupported protocol version {:d}'.format(version))
- cmdtype = PushCommandType(int(header[2]))
- vlen = int.from_bytes(header[3:], order)
- return order, version, cmdtype, vlen
-
- def decode_message(self, message, size, order):
- if len(message) != size:
- raise Exception('Expected {:d} bytes, but got {:d}'.format(size, len(message)))
- data = GLib.Bytes.new(message)
- variant = GLib.Variant.new_from_bytes(GLib.VariantType.new('a{sv}'),
- data, False)
- if order != self.byteorder:
- variant = GLib.Variant.byteswap(variant)
-
- return variant
-
- def read(self):
- header = self.file.read(HEADER_SIZE)
- if not header:
- # Remote end quit
- return None, None
- order, _, cmdtype, size = self.decode_header(header)
- msg = self.file.read(size)
- if len(msg) != size:
- raise PushException('Did not receive full message')
- args = self.decode_message(msg, size, order)
-
- return cmdtype, args
-
- def receive(self, allowed):
- cmdtype, args = self.read()
- if cmdtype is None:
- raise PushException('Expected reply, got none')
- if cmdtype not in allowed:
- raise PushException('Unexpected reply type', cmdtype.name)
- return cmdtype, args.unpack()
-
- def receive_info(self):
- _, args = self.receive([PushCommandType.info])
- return args
-
- def receive_update(self):
- _, args = self.receive([PushCommandType.update])
- return args
-
- def receive_putobjects(self, repo, repopath):
- received_objects = []
-
- # Determine the available disk space, in bytes, of the file system
- # which mounts the repo
- stats = os.statvfs(repopath)
- buffer_ = int(2e9) # Add a 2 GB buffer
- free_disk_space = (stats.f_bfree * stats.f_bsize) - buffer_
- total_disk_space = (stats.f_blocks * stats.f_bsize) - buffer_
-
- # Open a TarFile for reading uncompressed tar from a stream
- tar = tarfile.TarFile.open(mode='r|', fileobj=self.file)
-
- # Extract every tarinfo into the temp location
- #
- # This should block while tar.next() reads the next
- # tar object from the stream.
- while True:
- filepos = tar.fileobj.tell()
- tar_info = tar.next()
- if not tar_info:
- # End of stream marker consists of two 512 Byte blocks.
- # Current Python tarfile stops reading after the first block.
- # Read the second block as well to ensure the stream is at
- # the right position for following messages.
- if tar.fileobj.tell() - filepos < 1024:
- tar.fileobj.read(512)
- break
-
- # obtain size of tar object in bytes
- artifact_size = tar_info.size
-
- if artifact_size > total_disk_space:
- raise ArtifactTooLargeException("Artifact of size: {} is too large for "
- "the filesystem which mounts the remote "
- "cache".format(artifact_size))
-
- if artifact_size > free_disk_space:
- # Clean up the cache with a buffer of 2GB
- removed_size = clean_up_cache(repo, artifact_size, free_disk_space)
- free_disk_space += removed_size
-
- tar.extract(tar_info, self.tmpdir)
- free_disk_space -= artifact_size
- received_objects.append(tar_info.name)
-
- # Finished with this stream
- tar.close()
-
- return received_objects
-
- def receive_status(self):
- _, args = self.receive([PushCommandType.status])
- return args
-
- def receive_done(self):
- _, args = self.receive([PushCommandType.done])
- return args
-
-
-def parse_remote_location(remotepath):
- """Parse remote artifact cache URL that's been specified in our config."""
- remote_host = remote_user = remote_repo = None
-
- url = urlparse(remotepath, scheme='file')
- if url.scheme:
- if url.scheme not in ['file', 'ssh']:
- raise PushException('Only URL schemes file and ssh are allowed, '
- 'not "{}"'.format(url.scheme))
- remote_host = url.hostname
- remote_user = url.username
- remote_repo = url.path
- remote_port = url.port or 22
- else:
- # Scp/git style remote (user@hostname:path)
- parts = remotepath.split('@', 1)
- if len(parts) > 1:
- remote_user = parts[0]
- remainder = parts[1]
- else:
- remote_user = None
- remainder = parts[0]
- parts = remainder.split(':', 1)
- if len(parts) != 2:
- raise PushException('Remote repository "{}" does not '
- 'contain a hostname and path separated '
- 'by ":"'.format(remotepath))
- remote_host, remote_repo = parts
- # This form doesn't make it possible to specify a non-standard port.
- remote_port = 22
-
- return remote_host, remote_user, remote_repo, remote_port
-
-
-def ssh_commandline(remote_host, remote_user=None, remote_port=22):
- if remote_host is None:
- return []
-
- ssh_cmd = ['ssh']
- if remote_user:
- ssh_cmd += ['-l', remote_user]
- if remote_port != 22:
- ssh_cmd += ['-p', str(remote_port)]
- ssh_cmd += [remote_host]
- return ssh_cmd
-
-
-def foo_run(func, args, stdin_fd, stdout_fd, stderr_fd):
- sys.stdin = open(stdin_fd, 'r')
- sys.stdout = open(stdout_fd, 'w')
- sys.stderr = open(stderr_fd, 'w')
- func(args)
-
-
-class ProcessWithPipes(object):
- def __init__(self, func, args, *, stderr=None):
- # Create a pipe and return a pair of file descriptors (r, w)
- r0, w0 = os.pipe()
- r1, w1 = os.pipe()
- if stderr is None:
- r2, w2 = os.pipe()
- else:
- w2 = stderr.fileno()
- self.proc = multiprocessing.Process(target=foo_run, args=(func, args, r0, w1, w2))
- self.proc.start()
- self.stdin = open(w0, 'wb')
- os.close(r0)
- self.stdout = open(r1, 'rb')
- os.close(w1)
- if stderr is None:
- self.stderr = open(r2, 'rb')
- os.close(w2)
-
- # The eventual return code
- self.returncode = -1
-
- def wait(self):
- self.proc.join()
- self.returncode = self.proc.exitcode
-
-
-class OSTreePusher(object):
- def __init__(self, repopath, remotepath, branches=None, verbose=False,
- debug=False, output=None):
- self.repopath = repopath
- self.remotepath = remotepath
- self.verbose = verbose
- self.debug = debug
- self.output = output
-
- self.remote_host, self.remote_user, self.remote_repo, self.remote_port = \
- parse_remote_location(remotepath)
-
- if self.repopath is None:
- self.repo = OSTree.Repo.new_default()
- else:
- self.repo = OSTree.Repo.new(Gio.File.new_for_path(self.repopath))
- self.repo.open(None)
-
- # Enumerate branches to push
- if branches is None:
- # obtain a dict of 'refs': 'checksums'
- _, self.refs = self.repo.list_refs(None, None)
- else:
- self.refs = {}
- for branch in branches:
- # branch is a ref, now find its checksum (i.e. rev)
- _, rev = self.repo.resolve_rev(branch, False)
- self.refs[branch] = rev
-
- # Start ssh
- ssh_cmd = ssh_commandline(self.remote_host, self.remote_user, self.remote_port)
-
- ssh_cmd += ['bst-artifact-receive']
- if self.verbose:
- ssh_cmd += ['--verbose']
- if self.debug:
- ssh_cmd += ['--debug']
- if not self.remote_host:
- ssh_cmd += ['--pull-url', self.remote_repo]
- ssh_cmd += [self.remote_repo]
-
- logging.info('Executing {}'.format(' '.join(ssh_cmd)))
-
- if self.remote_host:
- # subprocess.Popen(args, bufsize=-1,...)
- # Executes a child program in a new process which returns an open file
- # object connected to the pipe.
- self.ssh = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=self.output,
- start_new_session=True)
- else:
- self.ssh = ProcessWithPipes(receive_main, ssh_cmd[1:], stderr=self.output)
-
- self.writer = PushMessageWriter(self.ssh.stdin)
- self.reader = PushMessageReader(self.ssh.stdout)
-
- def needed_commits(self, remote, local, needed):
- parent = local
- if remote == '0' * 64:
- # Nonexistent remote branch, use None for convenience
- remote = None
- while parent != remote:
- needed.add(parent)
- _, commit = self.repo.load_variant_if_exists(OSTree.ObjectType.COMMIT,
- parent)
- if commit is None:
- raise PushException('Shallow history from commit {} does '
- 'not contain remote commit {}'.format(local, remote))
- parent = OSTree.commit_get_parent(commit)
- if parent is None:
- break
- if remote is not None and parent != remote:
- self.writer.send_done()
- raise PushExistsException('Remote commit {} not descendent of '
- 'commit {}'.format(remote, local))
-
- def needed_objects(self, commits):
- objects = set()
- for rev in commits:
- _, reachable = self.repo.traverse_commit(rev, 0, None)
- for obj in reachable:
- objname = OSTree.object_to_string(obj[0], obj[1])
- if obj[1] == OSTree.ObjectType.FILE:
- # Make this a filez since we're archive-z2
- objname += 'z'
- elif obj[1] == OSTree.ObjectType.COMMIT:
- # Add in detached metadata
- metaobj = objname + 'meta'
- metapath = ostree_object_path(self.repo, metaobj)
- if os.path.exists(metapath):
- objects.add(metaobj)
-
- # Add in Endless compat files
- for suffix in ['sig', 'sizes2']:
- metaobj = obj[0] + '.' + suffix
- metapath = ostree_object_path(self.repo, metaobj)
- if os.path.exists(metapath):
- objects.add(metaobj)
- objects.add(objname)
- return objects
-
- def close(self):
- self.ssh.stdin.close()
- return self.ssh.wait()
-
- def run(self):
- remote_refs = {}
- update_refs = {}
-
- # Send info immediately
- self.writer.send_info(self.repo, list(self.refs.keys()))
-
- # Receive remote info
- logging.info('Receiving repository information')
- args = self.reader.receive_info()
- remote_mode = args['mode']
- if remote_mode != OSTree.RepoMode.ARCHIVE_Z2:
- raise PushException('Can only push to archive-z2 repos')
- remote_refs = args['refs']
- for branch, rev in self.refs.items():
- remote_rev = remote_refs.get(branch, '0' * 64)
- if rev != remote_rev:
- # if the checksums for a branch aren't equal add a tuple of
- # the remote_rev and local rev to a new dictionary.
- update_refs[branch] = remote_rev, rev
- if not update_refs:
- logging.info('Nothing to update')
- self.writer.send_done()
- raise PushExistsException('Nothing to update')
-
- # Send update command
- logging.info('Sending update request')
- self.writer.send_update(update_refs)
-
- # Receive status for update request
- args = self.reader.receive_status()
- if not args['result']:
- self.writer.send_done()
- raise PushException(args['message'])
-
- # Collect commits and objects to push
- commits = set()
- exc_info = None
- ref_count = 0
-
- # update the remote checksum with the local one
- for branch, revs in update_refs.items():
- logging.info('Updating {} {} to {}'.format(branch, revs[0], revs[1]))
- try:
- # obtain a set of the commits needed to be pushed
- self.needed_commits(revs[0], revs[1], commits)
- ref_count += 1
- except PushExistsException:
- if exc_info is None:
- exc_info = sys.exc_info()
-
- # Re-raise PushExistsException if all refs exist already
- if ref_count == 0 and exc_info:
- raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
-
- logging.info('Enumerating objects to send')
- # obtain a set of the objects which need to be pushed to the server
- objects = self.needed_objects(commits)
-
- # Send all the objects to receiver, checking status after each
- try:
- self.writer.send_putobjects(self.repo, objects)
- except BrokenPipeError:
- # If the remote closes, we receive a BrokenPipeError
- # Return 1 to notify the frontend that something went
- # wrong on the server.
- return 1
-
- # Inform receiver that all objects have been sent
- self.writer.send_done()
-
- # Wait until receiver has completed
- self.reader.receive_done()
-
- return self.close()
-
-
-# OSTreeReceiver is on the receiving end of an OSTree push.
-#
-# Args:
-# repopath (str): On-disk location of the receiving repository.
-# pull_url (str): Redirection for clients who want to pull, not push.
-#
-class OSTreeReceiver(object):
- def __init__(self, repopath, pull_url):
- self.repopath = repopath
- self.pull_url = pull_url
-
- if self.repopath is None:
- self.repo = OSTree.Repo.new_default()
- self.repopath = self.repo.get_path().get_path()
- # NOTE: OSTree.Repo.get_path() returns Gio.File
- # Gio.File.get_path() returns a string of the pathway
- else:
- self.repo = OSTree.Repo.new(Gio.File.new_for_path(self.repopath))
- self.repo.open(None)
-
- repo_tmp = os.path.join(self.repopath, 'tmp')
- self.tmpdir = tempfile.mkdtemp(dir=repo_tmp, prefix='bst-push-')
- self.writer = PushMessageWriter(sys.stdout.buffer)
- self.reader = PushMessageReader(sys.stdin.buffer, tmpdir=self.tmpdir)
-
- # Set a sane umask before writing any objects
- os.umask(0o0022)
-
- def close(self):
- shutil.rmtree(self.tmpdir)
- sys.stdout.flush()
- return 0
-
- def run(self):
- try:
- exit_code = self.do_run()
- except ArtifactTooLargeException as e:
- logging.warning(str(e))
- exit_code = 0
-
- except:
- # BLIND EXCEPT - Just abort if we receive any exception, this
- # can be a broken pipe, a tarfile read error when the remote
- # connection is closed, a bug; whatever happens we want to cleanup.
- self.close()
- raise
-
- self.close()
- return exit_code
-
- def do_run(self):
- # Receive remote info
- args = self.reader.receive_info()
- remote_refs = args['refs']
-
- # Send info back
- self.writer.send_info(self.repo, list(remote_refs.keys()),
- pull_url=self.pull_url)
-
- # Wait for update or done command
- cmdtype, args = self.reader.receive([PushCommandType.update,
- PushCommandType.done])
- if cmdtype == PushCommandType.done:
- return 0
- update_refs = args
-
- profile_names = set()
- for update_ref in update_refs:
- # Strip off the SHA256 sum on the right of the reference,
- # leaving the project and element name
- project_and_element_name = re.sub(r"/[a-z0-9]+$", '', update_ref)
- profile_names.add(project_and_element_name)
-
- profile_name = '_'.join(profile_names)
- profile_start(Topics.ARTIFACT_RECEIVE, profile_name)
-
- self.writer.send_status(True)
-
- # Wait for putobjects or done
- cmdtype, args = self.reader.receive([PushCommandType.putobjects,
- PushCommandType.done])
-
- if cmdtype == PushCommandType.done:
- logging.debug('Received done before any objects, exiting')
- return 0
-
- # Receive the actual objects
- received_objects = self.reader.receive_putobjects(self.repo, self.repopath)
-
- # Ensure that pusher has sent all objects
- self.reader.receive_done()
-
- # If we didn't get any objects, we're done
- if not received_objects:
- return 0
-
- # Got all objects, move them to the object store
- for obj in received_objects:
- tmp_path = os.path.join(self.tmpdir, obj)
- obj_path = ostree_object_path(self.repo, obj)
- os.makedirs(os.path.dirname(obj_path), exist_ok=True)
- logging.debug('Renaming {} to {}'.format(tmp_path, obj_path))
- os.rename(tmp_path, obj_path)
-
- # Verify that we have the specified commit objects
- for branch, revs in update_refs.items():
- _, has_object = self.repo.has_object(OSTree.ObjectType.COMMIT, revs[1], None)
- if not has_object:
- raise PushException('Missing commit {} for ref {}'.format(revs[1], branch))
-
- # Finally, update the refs
- for branch, revs in update_refs.items():
- logging.debug('Setting ref {} to {}'.format(branch, revs[1]))
- self.repo.set_ref_immediate(None, branch, revs[1], None)
-
- # Inform pusher that everything is in place
- self.writer.send_done()
-
- profile_end(Topics.ARTIFACT_RECEIVE, profile_name)
-
- return 0
-
-
-# initialize_push_connection()
-#
-# Test that we can connect to the remote bst-artifact-receive program, and
-# receive the pull URL for this artifact cache.
-#
-# We don't want to make the user wait until the first artifact has been built
-# to discover that they actually cannot push, so this should be called early.
-#
-# The SSH push protocol doesn't allow pulling artifacts. We don't want to
-# require users to specify two URLs for a single cache, so we have the push
-# protocol return the corresponding pull URL as part of the 'hello' response.
-#
-# Args:
-# remote: The ssh remote url to push to
-#
-# Returns:
-# (str): The URL that should be used for pushing to this cache.
-#
-# Raises:
-# PushException if there was an issue connecting to the remote.
-def initialize_push_connection(remote):
- remote_host, remote_user, remote_repo, remote_port = parse_remote_location(remote)
- ssh_cmd = ssh_commandline(remote_host, remote_user, remote_port)
-
- if remote_host:
- # We need a short timeout here because if 'remote' isn't reachable at
- # all, the process will hang until the connection times out.
- ssh_cmd += ['-oConnectTimeout=3']
-
- ssh_cmd += ['bst-artifact-receive', remote_repo]
-
- if remote_host:
- ssh = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- else:
- ssh_cmd += ['--pull-url', remote_repo]
- ssh = ProcessWithPipes(receive_main, ssh_cmd[1:])
-
- writer = PushMessageWriter(ssh.stdin)
- reader = PushMessageReader(ssh.stdout)
-
- try:
- writer.send_hello()
- args = reader.receive_info()
- writer.send_done()
-
- if 'pull_url' in args:
- pull_url = args['pull_url']
- else:
- raise PushException(
- "Remote cache did not tell us its pull URL. This cache probably "
- "requires updating to a newer version of `bst-artifact-receive`.")
- except PushException as protocol_error:
- # If we get a read error on the wire, let's first see if SSH reported
- # an error such as 'Permission denied'. If so this will be much more
- # useful to the user than the "Expected reply, got none" sort of
- # message that reader.receive_info() will have raised.
- ssh.wait()
- if ssh.returncode != 0:
- ssh_error = ssh.stderr.read().decode('unicode-escape').strip()
- raise PushException("{}".format(ssh_error))
- else:
- raise protocol_error
-
- return pull_url
-
-
-# push()
-#
-# Run the pusher in process, with logging going to the output file
-#
-# Args:
-# repo: The local repository path
-# remote: The ssh remote url to push to
-# branches: The refs to push
-# output: The output where logging should go
-#
-# Returns:
-# (bool): True if the remote was updated, False if it already existed
-# and no updated was required
-#
-# Raises:
-# PushException if there was an error
-#
-def push(repo, remote, branches, output):
-
- logging.basicConfig(format='%(module)s: %(levelname)s: %(message)s',
- level=logging.INFO, stream=output)
-
- pusher = OSTreePusher(repo, remote, branches, True, False, output=output)
-
- def terminate_push():
- pusher.close()
-
- with _signals.terminator(terminate_push):
- try:
- pusher.run()
- return True
- except ConnectionError as e:
- # Connection attempt failed or connection was terminated unexpectedly
- terminate_push()
- raise PushException("Connection failed") from e
- except PushException:
- terminate_push()
- raise
- except PushExistsException:
- # If the commit already existed, just bail out
- # on the push and dont bother re-raising the error
- logging.info("Ref {} was already present in remote {}".format(branches, remote))
- terminate_push()
- return False
-
-
-# clean_up_cache()
-#
-# Keep removing Least Recently Pushed (LRP) artifacts in a cache until there
-# is enough space for the incoming artifact
-#
-# Args:
-# repo: OSTree.Repo object
-# free_disk_space: The available disk space on the file system in bytes
-# artifact_size: The size of the artifact in bytes
-#
-# Returns:
-# int: The total bytes removed on the filesystem
-#
-def clean_up_cache(repo, artifact_size, free_disk_space):
- # obtain a list of LRP artifacts
- LRP_artifacts = _ostree.list_artifacts(repo)
-
- removed_size = 0 # in bytes
- while artifact_size - removed_size > free_disk_space:
- try:
- to_remove = LRP_artifacts.pop(0) # The first element in the list is the LRP artifact
- except IndexError:
- # This exception is caught if there are no more artifacts in the list
- # LRP_artifacts. This means the the artifact is too large for the filesystem
- # so we abort the process
- raise ArtifactTooLargeException("Artifact of size {} is too large for "
- "the filesystem which mounts the remote "
- "cache".format(artifact_size))
-
- removed_size += _ostree.remove(repo, to_remove, defer_prune=False)
-
- if removed_size > 0:
- logging.info("Successfully removed {} bytes from the cache".format(removed_size))
- else:
- logging.info("No artifacts were removed from the cache.")
-
- return removed_size
-
-
-@click.command(short_help="Receive pushed artifacts over ssh")
-@click.option('--verbose', '-v', is_flag=True, default=False, help="Verbose mode")
-@click.option('--debug', '-d', is_flag=True, default=False, help="Debug mode")
-@click.option('--pull-url', type=str, required=True,
- help="Clients who try to pull over SSH will be redirected here")
-@click.argument('repo', type=click.Path(file_okay=False, dir_okay=True, writable=True, exists=True))
-def receive_main(verbose, debug, pull_url, repo):
- """A BuildStream sister program for receiving artifacts send to a shared artifact cache
- """
- loglevel = logging.WARNING
- if verbose:
- loglevel = logging.INFO
- if debug:
- loglevel = logging.DEBUG
- logging.basicConfig(format='%(module)s: %(levelname)s: %(message)s',
- level=loglevel, stream=sys.stderr)
-
- receiver = OSTreeReceiver(repo, pull_url)
- return receiver.run()
diff --git a/buildstream/_artifactcache/tarcache.py b/buildstream/_artifactcache/tarcache.py
deleted file mode 100644
index ab814abb0..000000000
--- a/buildstream/_artifactcache/tarcache.py
+++ /dev/null
@@ -1,297 +0,0 @@
-#
-# Copyright (C) 2017 Codethink Limited
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library. If not, see <http://www.gnu.org/licenses/>.
-#
-# Authors:
-# Tristan Maat <tristan.maat@codethink.co.uk>
-
-import os
-import shutil
-import tarfile
-import subprocess
-
-from .. import utils, ProgramNotFoundError
-from .._exceptions import ArtifactError
-
-from . import ArtifactCache
-
-
-class TarCache(ArtifactCache):
-
- def __init__(self, context):
-
- super().__init__(context)
-
- self.tardir = os.path.join(context.artifactdir, 'tar')
- os.makedirs(self.tardir, exist_ok=True)
-
- ################################################
- # Implementation of abstract methods #
- ################################################
- def contains(self, element, key):
- path = os.path.join(self.tardir, _tarpath(element, key))
- return os.path.isfile(path)
-
- def commit(self, element, content, keys):
- os.makedirs(os.path.join(self.tardir, element._get_project().name, element.normal_name), exist_ok=True)
-
- with utils._tempdir() as temp:
- for key in keys:
- ref = _tarpath(element, key)
-
- refdir = os.path.join(temp, key)
- shutil.copytree(content, refdir, symlinks=True)
-
- _Tar.archive(os.path.join(self.tardir, ref), key, temp)
-
- def extract(self, element, key):
-
- fullname = self.get_artifact_fullname(element, key)
- path = _tarpath(element, key)
-
- # Extracting a nonexistent artifact is a bug
- assert os.path.isfile(os.path.join(self.tardir, path)), "Artifact missing for {}".format(fullname)
-
- # If the destination already exists, the artifact has been extracted
- dest = os.path.join(self.extractdir, fullname)
- if os.path.isdir(dest):
- return dest
-
- os.makedirs(self.extractdir, exist_ok=True)
-
- with utils._tempdir(dir=self.extractdir) as tmpdir:
- _Tar.extract(os.path.join(self.tardir, path), tmpdir)
-
- os.makedirs(os.path.join(self.extractdir, element._get_project().name, element.normal_name),
- exist_ok=True)
- try:
- os.rename(os.path.join(tmpdir, key), dest)
- except OSError as e:
- # With rename, it's possible to get either ENOTEMPTY or EEXIST
- # in the case that the destination path is a not empty directory.
- #
- # If rename fails with these errors, another process beat
- # us to it so just ignore.
- if e.errno not in [os.errno.ENOTEMPTY, os.errno.EEXIST]:
- raise ArtifactError("Failed to extract artifact '{}': {}"
- .format(fullname, e)) from e
-
- return dest
-
-
-# _tarpath()
-#
-# Generate a relative tarball path for a given element and it's cache key
-#
-# Args:
-# element (Element): The Element object
-# key (str): The element's cache key
-#
-# Returns:
-# (str): The relative path to use for storing tarballs
-#
-def _tarpath(element, key):
- project = element._get_project()
- return os.path.join(project.name, element.normal_name, key + '.tar.bz2')
-
-
-# A helper class that contains tar archive/extract functions
-class _Tar():
-
- # archive()
- #
- # Attempt to archive the given tarfile with the `tar` command,
- # falling back to python's `tarfile` if this fails.
- #
- # Args:
- # location (str): The path to the tar to create
- # content (str): The path to the content to archvive
- # cwd (str): The cwd
- #
- # This is done since AIX tar does not support 2G+ files.
- #
- @classmethod
- def archive(cls, location, content, cwd=os.getcwd()):
-
- try:
- cls._archive_with_tar(location, content, cwd)
- return
- except tarfile.TarError:
- pass
- except ProgramNotFoundError:
- pass
-
- # If the former did not complete successfully, we try with
- # python's tar implementation (since it's hard to detect
- # specific issues with specific tar implementations - a
- # fallback).
-
- try:
- cls._archive_with_python(location, content, cwd)
- except tarfile.TarError as e:
- raise ArtifactError("Failed to archive {}: {}"
- .format(location, e)) from e
-
- # extract()
- #
- # Attempt to extract the given tarfile with the `tar` command,
- # falling back to python's `tarfile` if this fails.
- #
- # Args:
- # location (str): The path to the tar to extract
- # dest (str): The destination path to extract to
- #
- # This is done since python tarfile extraction is horrendously
- # slow (2 hrs+ for base images).
- #
- @classmethod
- def extract(cls, location, dest):
-
- try:
- cls._extract_with_tar(location, dest)
- return
- except tarfile.TarError:
- pass
- except ProgramNotFoundError:
- pass
-
- try:
- cls._extract_with_python(location, dest)
- except tarfile.TarError as e:
- raise ArtifactError("Failed to extract {}: {}"
- .format(location, e)) from e
-
- # _get_host_tar()
- #
- # Get the host tar command.
- #
- # Raises:
- # ProgramNotFoundError: If the tar executable cannot be
- # located
- #
- @classmethod
- def _get_host_tar(cls):
- tar_cmd = None
-
- for potential_tar_cmd in ['gtar', 'tar']:
- try:
- tar_cmd = utils.get_host_tool(potential_tar_cmd)
- break
- except ProgramNotFoundError:
- continue
-
- # If we still couldn't find tar, raise the ProgramNotfounderror
- if tar_cmd is None:
- raise ProgramNotFoundError("Did not find tar in PATH: {}"
- .format(os.environ.get('PATH')))
-
- return tar_cmd
-
- # _archive_with_tar()
- #
- # Archive with an implementation of the `tar` command
- #
- # Args:
- # location (str): The path to the tar to create
- # content (str): The path to the content to archvive
- # cwd (str): The cwd
- #
- # Raises:
- # TarError: If an error occurs during extraction
- # ProgramNotFoundError: If the tar executable cannot be
- # located
- #
- @classmethod
- def _archive_with_tar(cls, location, content, cwd):
- tar_cmd = cls._get_host_tar()
-
- process = subprocess.Popen(
- [tar_cmd, 'jcaf', location, content],
- cwd=cwd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE
- )
-
- _, err = process.communicate()
- if process.poll() != 0:
- # Clean up in case the command failed in a broken state
- try:
- os.remove(location)
- except FileNotFoundError:
- pass
-
- raise tarfile.TarError("Failed to archive '{}': {}"
- .format(content, err.decode('utf8')))
-
- # _archive_with_python()
- #
- # Archive with the python `tarfile` module
- #
- # Args:
- # location (str): The path to the tar to create
- # content (str): The path to the content to archvive
- # cwd (str): The cwd
- #
- # Raises:
- # TarError: If an error occurs during extraction
- #
- @classmethod
- def _archive_with_python(cls, location, content, cwd):
- with tarfile.open(location, mode='w:bz2') as tar:
- tar.add(os.path.join(cwd, content), arcname=content)
-
- # _extract_with_tar()
- #
- # Extract with an implementation of the `tar` command
- #
- # Args:
- # location (str): The path to the tar to extract
- # dest (str): The destination path to extract to
- #
- # Raises:
- # TarError: If an error occurs during extraction
- #
- @classmethod
- def _extract_with_tar(cls, location, dest):
- tar_cmd = cls._get_host_tar()
-
- # Some tar implementations do not support '-C'
- process = subprocess.Popen(
- [tar_cmd, 'jxf', location],
- cwd=dest,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE
- )
-
- _, err = process.communicate()
- if process.poll() != 0:
- raise tarfile.TarError("Failed to extract '{}': {}"
- .format(location, err.decode('utf8')))
-
- # _extract_with_python()
- #
- # Extract with the python `tarfile` module
- #
- # Args:
- # location (str): The path to the tar to extract
- # dest (str): The destination path to extract to
- #
- # Raises:
- # TarError: If an error occurs during extraction
- #
- @classmethod
- def _extract_with_python(cls, location, dest):
- with tarfile.open(location) as tar:
- tar.extractall(path=dest)
diff --git a/buildstream/_ostree.py b/buildstream/_ostree.py
index 238c6b4db..217790d84 100644
--- a/buildstream/_ostree.py
+++ b/buildstream/_ostree.py
@@ -26,7 +26,6 @@
# pylint: disable=bad-exception-context,catching-non-exception
import os
-from collections import namedtuple
import gi
from gi.repository.GLib import Variant, VariantDict
@@ -116,80 +115,6 @@ def checkout(repo, path, commit_, user=False):
raise OSTreeError("Failed to checkout commit '{}': {}".format(commit_, e.message)) from e
-# commit():
-#
-# Commit built artifact to cache.
-#
-# Files are all recorded with uid/gid 0
-#
-# Args:
-# repo (OSTree.Repo): The repo
-# dir_ (str): The source directory to commit to the repo
-# refs (list): A list of symbolic references (tag) for the commit
-#
-def commit(repo, dir_, refs):
-
- def commit_filter(repo, path, file_info):
-
- # For now, just set everything in the repo as uid/gid 0
- #
- # In the future we'll want to extract virtualized file
- # attributes from a fuse layer and use that.
- #
- file_info.set_attribute_uint32('unix::uid', 0)
- file_info.set_attribute_uint32('unix::gid', 0)
-
- return OSTree.RepoCommitFilterResult.ALLOW
-
- commit_modifier = OSTree.RepoCommitModifier.new(
- OSTree.RepoCommitModifierFlags.NONE, commit_filter)
-
- repo.prepare_transaction()
- try:
- # add tree to repository
- mtree = OSTree.MutableTree.new()
- repo.write_directory_to_mtree(Gio.File.new_for_path(dir_),
- mtree, commit_modifier)
- _, root = repo.write_mtree(mtree)
-
- # create root commit object, no parent, no branch
- _, rev = repo.write_commit(None, None, None, None, root)
-
- # create refs
- for ref in refs:
- repo.transaction_set_ref(None, ref, rev)
-
- # complete repo transaction
- repo.commit_transaction(None)
- except GLib.GError as e:
-
- # Reraise any error as a buildstream error
- repo.abort_transaction()
- raise OSTreeError(e.message) from e
-
-
-# set_ref():
-#
-# Set symbolic reference to specified revision.
-#
-# Args:
-# repo (OSTree.Repo): The repo
-# ref (str): A symbolic reference (tag) for the commit
-# rev (str): Commit checksum
-#
-def set_ref(repo, ref, rev):
-
- repo.prepare_transaction()
- try:
- repo.transaction_set_ref(None, ref, rev)
-
- # complete repo transaction
- repo.commit_transaction(None)
- except:
- repo.abort_transaction()
- raise
-
-
# exists():
#
# Checks wether a given commit or symbolic ref exists and
@@ -279,172 +204,6 @@ def checksum(repo, ref):
return checksum_
-OSTREE_GIO_FAST_QUERYINFO = ("standard::name,standard::type,standard::size,"
- "standard::is-symlink,standard::symlink-target,"
- "unix::device,unix::inode,unix::mode,unix::uid,"
- "unix::gid,unix::rdev")
-
-
-DiffItem = namedtuple('DiffItem', ['src', 'src_info',
- 'target', 'target_info',
- 'src_checksum', 'target_checksum'])
-
-
-# diff_dirs():
-#
-# Compute the difference between directory a and b as 3 separate sets
-# of OSTree.DiffItem.
-#
-# This is more-or-less a direct port of OSTree.diff_dirs (which cannot
-# be used via PyGobject), but does not support options.
-#
-# Args:
-# a (Gio.File): The first directory for the comparison.
-# b (Gio.File): The second directory for the comparison.
-#
-# Returns:
-# (modified, removed, added)
-#
-def diff_dirs(a, b):
- # get_file_checksum():
- #
- # Helper to compute the checksum of an arbitrary file (different
- # objects have different methods to compute these).
- #
- def get_file_checksum(f, f_info):
- if isinstance(f, OSTree.RepoFile):
- return f.get_checksum()
- else:
- contents = None
- if f_info.get_file_type() == Gio.FileType.REGULAR:
- contents = f.read()
-
- csum = OSTree.checksum_file_from_input(f_info, None, contents,
- OSTree.ObjectType.FILE)
- return OSTree.checksum_from_bytes(csum)
-
- # diff_files():
- #
- # Helper to compute a diff between two files.
- #
- def diff_files(a, a_info, b, b_info):
- checksum_a = get_file_checksum(a, a_info)
- checksum_b = get_file_checksum(b, b_info)
-
- if checksum_a != checksum_b:
- return DiffItem(a, a_info, b, b_info, checksum_a, checksum_b)
-
- return None
-
- # diff_add_dir_recurse():
- #
- # Helper to collect all files in a directory recursively.
- #
- def diff_add_dir_recurse(d):
- added = []
-
- dir_enum = d.enumerate_children(OSTREE_GIO_FAST_QUERYINFO,
- Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
-
- for child_info in dir_enum:
- name = child_info.get_name()
- child = d.get_child(name)
- added.append(child)
-
- if child_info.get_file_type() == Gio.FileType.DIRECTORY:
- added.extend(diff_add_dir_recurse(child))
-
- return added
-
- modified = []
- removed = []
- added = []
-
- child_a_info = a.query_info(OSTREE_GIO_FAST_QUERYINFO,
- Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
- child_b_info = b.query_info(OSTREE_GIO_FAST_QUERYINFO,
- Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
-
- # If both are directories and have the same checksum, we know that
- # none of the underlying files changed, so we can save time.
- if (child_a_info.get_file_type() == Gio.FileType.DIRECTORY and
- child_b_info.get_file_type() == Gio.FileType.DIRECTORY and
- isinstance(a, OSTree.RepoFileClass) and
- isinstance(b, OSTree.RepoFileClass)):
- if a.tree_get_contents_checksum() == b.tree_get_contents_checksum():
- return modified, removed, added
-
- # We walk through 'a' first
- dir_enum = a.enumerate_children(OSTREE_GIO_FAST_QUERYINFO,
- Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
- for child_a_info in dir_enum:
- name = child_a_info.get_name()
-
- child_a = a.get_child(name)
- child_a_type = child_a_info.get_file_type()
-
- try:
- child_b = b.get_child(name)
- child_b_info = child_b.query_info(OSTREE_GIO_FAST_QUERYINFO,
- Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
- except GLib.Error as e:
- # If the file does not exist in b, it has been removed
- if e.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_FOUND):
- removed.append(child_a)
- continue
- else:
- raise
-
- # If the files differ but are of different types, we report a
- # modification, saving a bit of time because we won't need a
- # checksum
- child_b_type = child_b_info.get_file_type()
- if child_a_type != child_b_type:
- diff_item = DiffItem(child_a, child_a_info,
- child_b, child_b_info,
- None, None)
- modified.append(diff_item)
- # Finally, we compute checksums and compare the file contents directly
- else:
- diff_item = diff_files(child_a, child_a_info, child_b, child_b_info)
-
- if diff_item:
- modified.append(diff_item)
-
- # If the files are both directories, we recursively use
- # this function to find differences - saving time if they
- # are equal.
- if child_a_type == Gio.FileType.DIRECTORY:
- subdir = diff_dirs(child_a, child_b)
- modified.extend(subdir[0])
- removed.extend(subdir[1])
- added.extend(subdir[2])
-
- # Now we walk through 'b' to find any files that were added
- dir_enum = b.enumerate_children(OSTREE_GIO_FAST_QUERYINFO,
- Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
- for child_b_info in dir_enum:
- name = child_b_info.get_name()
-
- child_b = b.get_child(name)
-
- try:
- child_a = a.get_child(name)
- child_a_info = child_a.query_info(OSTREE_GIO_FAST_QUERYINFO,
- Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS)
- except GLib.Error as e:
- # If the file does not exist in 'a', it was added.
- if e.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_FOUND):
- added.append(child_b)
- if child_b_info.get_file_type() == Gio.FileType.DIRECTORY:
- added.extend(diff_add_dir_recurse(child_b))
- continue
- else:
- raise
-
- return modified, removed, added
-
-
# fetch()
#
# Fetch new objects from a remote, if configured
diff --git a/buildstream/_platform/linux.py b/buildstream/_platform/linux.py
index fec512b0a..56ebb410e 100644
--- a/buildstream/_platform/linux.py
+++ b/buildstream/_platform/linux.py
@@ -21,7 +21,7 @@ import subprocess
from .. import _site
from .. import utils
-from .._artifactcache.ostreecache import OSTreeCache
+from .._artifactcache.cascache import CASCache
from .._message import Message, MessageType
from ..sandbox import SandboxBwrap
@@ -36,7 +36,7 @@ class Linux(Platform):
self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
self._user_ns_available = self._check_user_ns_available(context)
- self._artifact_cache = OSTreeCache(context, enable_push=self._user_ns_available)
+ self._artifact_cache = CASCache(context, enable_push=self._user_ns_available)
@property
def artifactcache(self):
diff --git a/buildstream/_platform/unix.py b/buildstream/_platform/unix.py
index 8b1d2ece7..be4c129d3 100644
--- a/buildstream/_platform/unix.py
+++ b/buildstream/_platform/unix.py
@@ -19,7 +19,7 @@
import os
-from .._artifactcache.tarcache import TarCache
+from .._artifactcache.cascache import CASCache
from .._exceptions import PlatformError
from ..sandbox import SandboxChroot
@@ -31,7 +31,7 @@ class Unix(Platform):
def __init__(self, context, project):
super().__init__(context, project)
- self._artifact_cache = TarCache(context)
+ self._artifact_cache = CASCache(context)
# Not necessarily 100% reliable, but we want to fail early.
if os.geteuid() != 0:
diff --git a/buildstream/_project.py b/buildstream/_project.py
index b568cf852..54ec9ee34 100644
--- a/buildstream/_project.py
+++ b/buildstream/_project.py
@@ -296,7 +296,7 @@ class Project():
#
# Load artifacts pull/push configuration for this project
- self.artifact_cache_specs = ArtifactCache.specs_from_config_node(config)
+ self.artifact_cache_specs = ArtifactCache.specs_from_config_node(config, self.directory)
# Plugin origins and versions
origins = _yaml.node_get(config, list, 'plugins', default_value=[])
diff --git a/buildstream/_protos/__init__.py b/buildstream/_protos/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/__init__.py
diff --git a/buildstream/_protos/build/__init__.py b/buildstream/_protos/build/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/build/__init__.py
diff --git a/buildstream/_protos/build/bazel/__init__.py b/buildstream/_protos/build/bazel/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/build/bazel/__init__.py
diff --git a/buildstream/_protos/build/bazel/remote/__init__.py b/buildstream/_protos/build/bazel/remote/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/build/bazel/remote/__init__.py
diff --git a/buildstream/_protos/build/bazel/remote/execution/__init__.py b/buildstream/_protos/build/bazel/remote/execution/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/build/bazel/remote/execution/__init__.py
diff --git a/buildstream/_protos/build/bazel/remote/execution/v2/__init__.py b/buildstream/_protos/build/bazel/remote/execution/v2/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/build/bazel/remote/execution/v2/__init__.py
diff --git a/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto b/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
new file mode 100644
index 000000000..2aa99c93c
--- /dev/null
+++ b/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
@@ -0,0 +1,1253 @@
+// Copyright 2018 The Bazel Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package build.bazel.remote.execution.v2;
+
+import "build/bazel/semver/semver.proto";
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Build.Bazel.Remote.Execution.V2";
+option go_package = "remoteexecution";
+option java_multiple_files = true;
+option java_outer_classname = "RemoteExecutionProto";
+option java_package = "build.bazel.remote.execution.v2";
+option objc_class_prefix = "REX";
+
+
+// The Remote Execution API is used to execute an
+// [Action][build.bazel.remote.execution.v2.Action] on the remote
+// workers.
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service Execution {
+ // Execute an action remotely.
+ //
+ // In order to execute an action, the client must first upload all of the
+ // inputs, the
+ // [Command][build.bazel.remote.execution.v2.Command] to run, and the
+ // [Action][build.bazel.remote.execution.v2.Action] into the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ // It then calls `Execute` with an `action_digest` referring to them. The
+ // server will run the action and eventually return the result.
+ //
+ // The input `Action`'s fields MUST meet the various canonicalization
+ // requirements specified in the documentation for their types so that it has
+ // the same digest as other logically equivalent `Action`s. The server MAY
+ // enforce the requirements and return errors if a non-canonical input is
+ // received. It MAY also proceed without verifying some or all of the
+ // requirements, such as for performance reasons. If the server does not
+ // verify the requirement, then it will treat the `Action` as distinct from
+ // another logically equivalent action if they hash differently.
+ //
+ // Returns a stream of
+ // [google.longrunning.Operation][google.longrunning.Operation] messages
+ // describing the resulting execution, with eventual `response`
+ // [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The
+ // `metadata` on the operation is of type
+ // [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata].
+ //
+ // If the client remains connected after the first response is returned after
+ // the server, then updates are streamed as if the client had called
+ // [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]
+ // until the execution completes or the request reaches an error. The
+ // operation can also be queried using [Operations
+ // API][google.longrunning.Operations.GetOperation].
+ //
+ // The server NEED NOT implement other methods or functionality of the
+ // Operations API.
+ //
+ // Errors discovered during creation of the `Operation` will be reported
+ // as gRPC Status errors, while errors that occurred while running the
+ // action will be reported in the `status` field of the `ExecuteResponse`. The
+ // server MUST NOT set the `error` field of the `Operation` proto.
+ // The possible errors include:
+ // * `INVALID_ARGUMENT`: One or more arguments are invalid.
+ // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
+ // action requested, such as a missing input or command or no worker being
+ // available. The client may be able to fix the errors and retry.
+ // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
+ // the action.
+ // * `UNAVAILABLE`: Due to a transient condition, such as all workers being
+ // occupied (and the server does not support a queue), the action could not
+ // be started. The client should retry.
+ // * `INTERNAL`: An internal error occurred in the execution engine or the
+ // worker.
+ // * `DEADLINE_EXCEEDED`: The execution timed out.
+ //
+ // In the case of a missing input or command, the server SHOULD additionally
+ // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
+ // where, for each requested blob not present in the CAS, there is a
+ // `Violation` with a `type` of `MISSING` and a `subject` of
+ // `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+ rpc Execute(ExecuteRequest) returns (stream google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v2/{instance_name=**}/actions:execute" body: "*" };
+ }
+
+ // Wait for an execution operation to complete. When the client initially
+ // makes the request, the server immediately responds with the current status
+ // of the execution. The server will leave the request stream open until the
+ // operation completes, and then respond with the completed operation. The
+ // server MAY choose to stream additional updates as execution progresses,
+ // such as to provide an update as to the state of the execution.
+ rpc WaitExecution(WaitExecutionRequest) returns (stream google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v2/{name=operations/**}:waitExecution" body: "*" };
+ }
+}
+
+// The action cache API is used to query whether a given action has already been
+// performed and, if so, retrieve its result. Unlike the
+// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
+// which addresses blobs by their own content, the action cache addresses the
+// [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
+// digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
+// which produced them.
+//
+// The lifetime of entries in the action cache is implementation-specific, but
+// the server SHOULD assume that more recently used entries are more likely to
+// be used again. Additionally, action cache implementations SHOULD ensure that
+// any blobs referenced in the
+// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+// are still valid when returning a result.
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service ActionCache {
+ // Retrieve a cached execution result.
+ //
+ // Errors:
+ // * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+ rpc GetActionResult(GetActionResultRequest) returns (ActionResult) {
+ option (google.api.http) = { get: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" };
+ }
+
+ // Upload a new execution result.
+ //
+ // This method is intended for servers which implement the distributed cache
+ // independently of the
+ // [Execution][build.bazel.remote.execution.v2.Execution] API. As a
+ // result, it is OPTIONAL for servers to implement.
+ //
+ // In order to allow the server to perform access control based on the type of
+ // action, and to assist with client debugging, the client MUST first upload
+ // the [Action][build.bazel.remote.execution.v2.Execution] that produced the
+ // result, along with its
+ // [Command][build.bazel.remote.execution.v2.Command], into the
+ // `ContentAddressableStorage`.
+ //
+ // Errors:
+ // * `NOT_IMPLEMENTED`: This method is not supported by the server.
+ // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+ // entry to the cache.
+ rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) {
+ option (google.api.http) = { put: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" body: "action_result" };
+ }
+}
+
+// The CAS (content-addressable storage) is used to store the inputs to and
+// outputs from the execution service. Each piece of content is addressed by the
+// digest of its binary data.
+//
+// Most of the binary data stored in the CAS is opaque to the execution engine,
+// and is only used as a communication medium. In order to build an
+// [Action][build.bazel.remote.execution.v2.Action],
+// however, the client will need to also upload the
+// [Command][build.bazel.remote.execution.v2.Command] and input root
+// [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
+// The Command and Directory messages must be marshalled to wire format and then
+// uploaded under the hash as with any other piece of content. In practice, the
+// input root directory is likely to refer to other Directories in its
+// hierarchy, which must also each be uploaded on their own.
+//
+// For small file uploads the client should group them together and call
+// [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
+// on chunks of no more than 10 MiB. For large uploads, the client must use the
+// [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+// `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+// where `instance_name` is as described in the next paragraph, `uuid` is a
+// version 4 UUID generated by the client, and `hash` and `size` are the
+// [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
+// `uuid` is used only to avoid collisions when multiple clients try to upload
+// the same file (or the same client tries to upload the file multiple times at
+// once on different threads), so the client MAY reuse the `uuid` for uploading
+// different blobs. The `resource_name` may optionally have a trailing filename
+// (or other metadata) for a client to use if it is storing URLs, as in
+// `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+// after the `size` is ignored.
+//
+// A single server MAY support multiple instances of the execution system, each
+// with their own workers, storage, cache, etc. The exact relationship between
+// instances is up to the server. If the server does, then the `instance_name`
+// is an identifier, possibly containing multiple path segments, used to
+// distinguish between the various instances on the server, in a manner defined
+// by the server. For servers which do not support multiple instances, then the
+// `instance_name` is the empty path and the leading slash is omitted, so that
+// the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+//
+// When attempting an upload, if another client has already completed the upload
+// (which may occur in the middle of a single upload if another client uploads
+// the same blob concurrently), the request will terminate immediately with
+// a response whose `committed_size` is the full size of the uploaded file
+// (regardless of how much data was transmitted by the client). If the client
+// completes the upload but the
+// [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
+// `INVALID_ARGUMENT` error will be returned. In either case, the client should
+// not attempt to retry the upload.
+//
+// For downloading blobs, the client must use the
+// [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+// a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+// `instance_name` is the instance name (see above), and `hash` and `size` are
+// the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+//
+// The lifetime of entries in the CAS is implementation specific, but it SHOULD
+// be long enough to allow for newly-added and recently looked-up entries to be
+// used in subsequent calls (e.g. to
+// [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service ContentAddressableStorage {
+ // Determine if blobs are present in the CAS.
+ //
+ // Clients can use this API before uploading blobs to determine which ones are
+ // already present in the CAS and do not need to be uploaded again.
+ //
+ // There are no method-specific errors.
+ rpc FindMissingBlobs(FindMissingBlobsRequest) returns (FindMissingBlobsResponse) {
+ option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:findMissing" body: "*" };
+ }
+
+ // Upload many blobs at once.
+ //
+ // The client MUST NOT upload blobs with a combined total size of more than 10
+ // MiB using this API. Such requests should either be split into smaller
+ // chunks or uploaded using the
+ // [ByteStream API][google.bytestream.ByteStream], as appropriate.
+ //
+ // This request is equivalent to calling a hypothetical `UpdateBlob` request
+ // on each individual blob, in parallel. The requests may succeed or fail
+ // independently.
+ //
+ // Errors:
+ // * `INVALID_ARGUMENT`: The client attempted to upload more than 10 MiB of
+ // data.
+ //
+ // Individual requests may return the following errors, additionally:
+ // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
+ // * `INVALID_ARGUMENT`: The
+ // [Digest][build.bazel.remote.execution.v2.Digest] does not match the
+ // provided data.
+ rpc BatchUpdateBlobs(BatchUpdateBlobsRequest) returns (BatchUpdateBlobsResponse) {
+ option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchUpdate" body: "*" };
+ }
+
+ // Fetch the entire directory tree rooted at a node.
+ //
+ // This request must be targeted at a
+ // [Directory][build.bazel.remote.execution.v2.Directory] stored in the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+ // (CAS). The server will enumerate the `Directory` tree recursively and
+ // return every node descended from the root.
+ //
+ // The GetTreeRequest.page_token parameter can be used to skip ahead in
+ // the stream (e.g. when retrying a partially completed and aborted request),
+ // by setting it to a value taken from GetTreeResponse.next_page_token of the
+ // last successfully processed GetTreeResponse).
+ //
+ // The exact traversal order is unspecified and, unless retrieving subsequent
+ // pages from an earlier request, is not guaranteed to be stable across
+ // multiple invocations of `GetTree`.
+ //
+ // If part of the tree is missing from the CAS, the server will return the
+ // portion present and omit the rest.
+ //
+ // * `NOT_FOUND`: The requested tree root is not present in the CAS.
+ rpc GetTree(GetTreeRequest) returns (stream GetTreeResponse) {
+ option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" };
+ }
+}
+
+// The Capabilities service may be used by remote execution clients to query
+// various server properties, in order to self-configure or return meaningful
+// error messages.
+//
+// The query may include a particular `instance_name`, in which case the values
+// returned will pertain to that instance.
+service Capabilities {
+ // GetCapabilities returns the server capabilities configuration.
+ rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) {
+ option (google.api.http) = {
+ get: "/v2/{instance_name=**}/capabilities"
+ };
+ }
+}
+
+// An `Action` captures all the information about an execution which is required
+// to reproduce it.
+//
+// `Action`s are the core component of the [Execution] service. A single
+// `Action` represents a repeatable action that can be performed by the
+// execution service. `Action`s can be succinctly identified by the digest of
+// their wire format encoding and, once an `Action` has been executed, will be
+// cached in the action cache. Future requests can then use the cached result
+// rather than needing to run afresh.
+//
+// When a server completes execution of an
+// [Action][build.bazel.remote.execution.v2.Action], it MAY choose to
+// cache the [result][build.bazel.remote.execution.v2.ActionResult] in
+// the [ActionCache][build.bazel.remote.execution.v2.ActionCache] unless
+// `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
+// default, future calls to
+// [Execute][build.bazel.remote.execution.v2.Execution.Execute] the same
+// `Action` will also serve their results from the cache. Clients must take care
+// to understand the caching behaviour. Ideally, all `Action`s will be
+// reproducible so that serving a result from cache is always desirable and
+// correct.
+message Action {
+ // The digest of the [Command][build.bazel.remote.execution.v2.Command]
+ // to run, which MUST be present in the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ Digest command_digest = 1;
+
+ // The digest of the root
+ // [Directory][build.bazel.remote.execution.v2.Directory] for the input
+ // files. The files in the directory tree are available in the correct
+ // location on the build machine before the command is executed. The root
+ // directory, as well as every subdirectory and content blob referred to, MUST
+ // be in the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ Digest input_root_digest = 2;
+
+ reserved 3 to 5; // Used for fields moved to [Command][build.bazel.remote.execution.v2.Command].
+
+ // A timeout after which the execution should be killed. If the timeout is
+ // absent, then the client is specifying that the execution should continue
+ // as long as the server will let it. The server SHOULD impose a timeout if
+ // the client does not specify one, however, if the client does specify a
+ // timeout that is longer than the server's maximum timeout, the server MUST
+ // reject the request.
+ //
+ // The timeout is a part of the
+ // [Action][build.bazel.remote.execution.v2.Action] message, and
+ // therefore two `Actions` with different timeouts are different, even if they
+ // are otherwise identical. This is because, if they were not, running an
+ // `Action` with a lower timeout than is required might result in a cache hit
+ // from an execution run with a longer timeout, hiding the fact that the
+ // timeout is too short. By encoding it directly in the `Action`, a lower
+ // timeout will result in a cache miss and the execution timeout will fail
+ // immediately, rather than whenever the cache entry gets evicted.
+ google.protobuf.Duration timeout = 6;
+
+ // If true, then the `Action`'s result cannot be cached.
+ bool do_not_cache = 7;
+}
+
+// A `Command` is the actual command executed by a worker running an
+// [Action][build.bazel.remote.execution.v2.Action] and specifications of its
+// environment.
+//
+// Except as otherwise required, the environment (such as which system
+// libraries or binaries are available, and what filesystems are mounted where)
+// is defined by and specific to the implementation of the remote execution API.
+message Command {
+ // An `EnvironmentVariable` is one variable to set in the running program's
+ // environment.
+ message EnvironmentVariable {
+ // The variable name.
+ string name = 1;
+
+ // The variable value.
+ string value = 2;
+ }
+
+ // The arguments to the command. The first argument must be the path to the
+ // executable, which must be either a relative path, in which case it is
+ // evaluated with respect to the input root, or an absolute path.
+ repeated string arguments = 1;
+
+ // The environment variables to set when running the program. The worker may
+ // provide its own default environment variables; these defaults can be
+ // overridden using this field. Additional variables can also be specified.
+ //
+ // In order to ensure that equivalent `Command`s always hash to the same
+ // value, the environment variables MUST be lexicographically sorted by name.
+ // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
+ repeated EnvironmentVariable environment_variables = 2;
+
+ // A list of the output files that the client expects to retrieve from the
+ // action. Only the listed files, as well as directories listed in
+ // `output_directories`, will be returned to the client as output.
+ // Other files that may be created during command execution are discarded.
+ //
+ // The paths are relative to the working directory of the action execution.
+ // The paths are specified using a single forward slash (`/`) as a path
+ // separator, even if the execution platform natively uses a different
+ // separator. The path MUST NOT include a trailing slash, nor a leading slash,
+ // being a relative path.
+ //
+ // In order to ensure consistent hashing of the same Action, the output paths
+ // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
+ // bytes).
+ //
+ // An output file cannot be duplicated, be a parent of another output file, be
+ // a child of a listed output directory, or have the same path as any of the
+ // listed output directories.
+ repeated string output_files = 3;
+
+ // A list of the output directories that the client expects to retrieve from
+ // the action. Only the contents of the indicated directories (recursively
+ // including the contents of their subdirectories) will be
+ // returned, as well as files listed in `output_files`. Other files that may
+ // be created during command execution are discarded.
+ //
+ // The paths are relative to the working directory of the action execution.
+ // The paths are specified using a single forward slash (`/`) as a path
+ // separator, even if the execution platform natively uses a different
+ // separator. The path MUST NOT include a trailing slash, nor a leading slash,
+ // being a relative path. The special value of empty string is allowed,
+ // although not recommended, and can be used to capture the entire working
+ // directory tree, including inputs.
+ //
+ // In order to ensure consistent hashing of the same Action, the output paths
+ // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
+ // bytes).
+ //
+ // An output directory cannot be duplicated, be a parent of another output
+ // directory, be a parent of a listed output file, or have the same path as
+ // any of the listed output files.
+ repeated string output_directories = 4;
+
+ // The platform requirements for the execution environment. The server MAY
+ // choose to execute the action on any worker satisfying the requirements, so
+ // the client SHOULD ensure that running the action on any such worker will
+ // have the same result.
+ Platform platform = 5;
+
+ // The working directory, relative to the input root, for the command to run
+ // in. It must be a directory which exists in the input tree. If it is left
+ // empty, then the action is run in the input root.
+ string working_directory = 6;
+}
+
+// A `Platform` is a set of requirements, such as hardware, operating system, or
+// compiler toolchain, for an
+// [Action][build.bazel.remote.execution.v2.Action]'s execution
+// environment. A `Platform` is represented as a series of key-value pairs
+// representing the properties that are required of the platform.
+message Platform {
+ // A single property for the environment. The server is responsible for
+ // specifying the property `name`s that it accepts. If an unknown `name` is
+ // provided in the requirements for an
+ // [Action][build.bazel.remote.execution.v2.Action], the server SHOULD
+ // reject the execution request. If permitted by the server, the same `name`
+ // may occur multiple times.
+ //
+ // The server is also responsible for specifying the interpretation of
+ // property `value`s. For instance, a property describing how much RAM must be
+ // available may be interpreted as allowing a worker with 16GB to fulfill a
+ // request for 8GB, while a property describing the OS environment on which
+ // the action must be performed may require an exact match with the worker's
+ // OS.
+ //
+ // The server MAY use the `value` of one or more properties to determine how
+ // it sets up the execution environment, such as by making specific system
+ // files available to the worker.
+ message Property {
+ // The property name.
+ string name = 1;
+
+ // The property value.
+ string value = 2;
+ }
+
+ // The properties that make up this platform. In order to ensure that
+ // equivalent `Platform`s always hash to the same value, the properties MUST
+ // be lexicographically sorted by name, and then by value. Sorting of strings
+ // is done by code point, equivalently, by the UTF-8 bytes.
+ repeated Property properties = 1;
+}
+
+// A `Directory` represents a directory node in a file tree, containing zero or
+// more children [FileNodes][build.bazel.remote.execution.v2.FileNode],
+// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode] and
+// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode].
+// Each `Node` contains its name in the directory, either the digest of its
+// content (either a file blob or a `Directory` proto) or a symlink target, as
+// well as possibly some metadata about the file or directory.
+//
+// In order to ensure that two equivalent directory trees hash to the same
+// value, the following restrictions MUST be obeyed when constructing a
+// a `Directory`:
+// - Every child in the directory must have a path of exactly one segment.
+// Multiple levels of directory hierarchy may not be collapsed.
+// - Each child in the directory must have a unique path segment (file name).
+// - The files, directories and symlinks in the directory must each be sorted
+// in lexicographical order by path. The path strings must be sorted by code
+// point, equivalently, by UTF-8 bytes.
+//
+// A `Directory` that obeys the restrictions is said to be in canonical form.
+//
+// As an example, the following could be used for a file named `bar` and a
+// directory named `foo` with an executable file named `baz` (hashes shortened
+// for readability):
+//
+// ```json
+// // (Directory proto)
+// {
+// files: [
+// {
+// name: "bar",
+// digest: {
+// hash: "4a73bc9d03...",
+// size: 65534
+// }
+// }
+// ],
+// directories: [
+// {
+// name: "foo",
+// digest: {
+// hash: "4cf2eda940...",
+// size: 43
+// }
+// }
+// ]
+// }
+//
+// // (Directory proto with hash "4cf2eda940..." and size 43)
+// {
+// files: [
+// {
+// name: "baz",
+// digest: {
+// hash: "b2c941073e...",
+// size: 1294,
+// },
+// is_executable: true
+// }
+// ]
+// }
+// ```
+message Directory {
+ // The files in the directory.
+ repeated FileNode files = 1;
+
+ // The subdirectories in the directory.
+ repeated DirectoryNode directories = 2;
+
+ // The symlinks in the directory.
+ repeated SymlinkNode symlinks = 3;
+}
+
+// A `FileNode` represents a single file and associated metadata.
+message FileNode {
+ // The name of the file.
+ string name = 1;
+
+ // The digest of the file's content.
+ Digest digest = 2;
+
+ reserved 3; // Reserved to ensure wire-compatibility with `OutputFile`.
+
+ // True if file is executable, false otherwise.
+ bool is_executable = 4;
+}
+
+// A `DirectoryNode` represents a child of a
+// [Directory][build.bazel.remote.execution.v2.Directory] which is itself
+// a `Directory` and its associated metadata.
+message DirectoryNode {
+ // The name of the directory.
+ string name = 1;
+
+ // The digest of the
+ // [Directory][build.bazel.remote.execution.v2.Directory] object
+ // represented. See [Digest][build.bazel.remote.execution.v2.Digest]
+ // for information about how to take the digest of a proto message.
+ Digest digest = 2;
+}
+
+// A `SymlinkNode` represents a symbolic link.
+message SymlinkNode {
+ // The name of the symlink.
+ string name = 1;
+
+ // The target path of the symlink. The path separator is a forward slash `/`.
+ // The target path can be relative to the parent directory of the symlink or
+ // it can be an absolute path starting with `/`. Absolute paths are
+ // interpreted relative to the input root directory, i.e., they are not
+ // allowed to escape the input space. The canonical form forbids the
+ // substrings `/./` and `//` in the target path. `..` components are allowed
+ // anywhere in the target path except for the leading component of an
+ // absolute path.
+ string target = 2;
+}
+
+// A content digest. A digest for a given blob consists of the size of the blob
+// and its hash. The hash algorithm to use is defined by the server, but servers
+// SHOULD use SHA-256.
+//
+// The size is considered to be an integral part of the digest and cannot be
+// separated. That is, even if the `hash` field is correctly specified but
+// `size_bytes` is not, the server MUST reject the request.
+//
+// The reason for including the size in the digest is as follows: in a great
+// many cases, the server needs to know the size of the blob it is about to work
+// with prior to starting an operation with it, such as flattening Merkle tree
+// structures or streaming it to a worker. Technically, the server could
+// implement a separate metadata store, but this results in a significantly more
+// complicated implementation as opposed to having the client specify the size
+// up-front (or storing the size along with the digest in every message where
+// digests are embedded). This does mean that the API leaks some implementation
+// details of (what we consider to be) a reasonable server implementation, but
+// we consider this to be a worthwhile tradeoff.
+//
+// When a `Digest` is used to refer to a proto message, it always refers to the
+// message in binary encoded form. To ensure consistent hashing, clients and
+// servers MUST ensure that they serialize messages according to the following
+// rules, even if there are alternate valid encodings for the same message.
+// - Fields are serialized in tag order.
+// - There are no unknown fields.
+// - There are no duplicate fields.
+// - Fields are serialized according to the default semantics for their type.
+//
+// Most protocol buffer implementations will always follow these rules when
+// serializing, but care should be taken to avoid shortcuts. For instance,
+// concatenating two messages to merge them may produce duplicate fields.
+message Digest {
+ // The hash. In the case of SHA-256, it will always be a lowercase hex string
+ // exactly 64 characters long.
+ string hash = 1;
+
+ // The size of the blob, in bytes.
+ int64 size_bytes = 2;
+}
+
+// ExecutedActionMetadata contains details about a completed execution.
+message ExecutedActionMetadata {
+ // The name of the worker which ran the execution.
+ string worker = 1;
+
+ // When was the action added to the queue.
+ google.protobuf.Timestamp queued_timestamp = 2;
+
+ // When the worker received the action.
+ google.protobuf.Timestamp worker_start_timestamp = 3;
+
+ // When the worker completed the action, including all stages.
+ google.protobuf.Timestamp worker_completed_timestamp = 4;
+
+ // When the worker started fetching action inputs.
+ google.protobuf.Timestamp input_fetch_start_timestamp = 5;
+
+ // When the worker finished fetching action inputs.
+ google.protobuf.Timestamp input_fetch_completed_timestamp = 6;
+
+ // When the worker started executing the action command.
+ google.protobuf.Timestamp execution_start_timestamp = 7;
+
+ // When the worker completed executing the action command.
+ google.protobuf.Timestamp execution_completed_timestamp = 8;
+
+ // When the worker started uploading action outputs.
+ google.protobuf.Timestamp output_upload_start_timestamp = 9;
+
+ // When the worker finished uploading action outputs.
+ google.protobuf.Timestamp output_upload_completed_timestamp = 10;
+}
+
+// An ActionResult represents the result of an
+// [Action][build.bazel.remote.execution.v2.Action] being run.
+message ActionResult {
+ reserved 1; // Reserved for use as the resource name.
+
+ // The output files of the action. For each output file requested in the
+ // `output_files` field of the Action, if the corresponding file existed after
+ // the action completed, a single entry will be present in the output list.
+ //
+ // If the action does not produce the requested output, or produces a
+ // directory where a regular file is expected or vice versa, then that output
+ // will be omitted from the list. The server is free to arrange the output
+ // list as desired; clients MUST NOT assume that the output list is sorted.
+ repeated OutputFile output_files = 2;
+
+ // The output directories of the action. For each output directory requested
+ // in the `output_directories` field of the Action, if the corresponding
+ // directory existed after the action completed, a single entry will be
+ // present in the output list, which will contain the digest of a
+ // [Tree][build.bazel.remote.execution.v2.Tree] message containing the
+ // directory tree, and the path equal exactly to the corresponding Action
+ // output_directories member.
+ //
+ // As an example, suppose the Action had an output directory `a/b/dir` and the
+ // execution produced the following contents in `a/b/dir`: a file named `bar`
+ // and a directory named `foo` with an executable file named `baz`. Then,
+ // output_directory will contain (hashes shortened for readability):
+ //
+ // ```json
+ // // OutputDirectory proto:
+ // {
+ // path: "a/b/dir"
+ // tree_digest: {
+ // hash: "4a73bc9d03...",
+ // size: 55
+ // }
+ // }
+ // // Tree proto with hash "4a73bc9d03..." and size 55:
+ // {
+ // root: {
+ // files: [
+ // {
+ // name: "bar",
+ // digest: {
+ // hash: "4a73bc9d03...",
+ // size: 65534
+ // }
+ // }
+ // ],
+ // directories: [
+ // {
+ // name: "foo",
+ // digest: {
+ // hash: "4cf2eda940...",
+ // size: 43
+ // }
+ // }
+ // ]
+ // }
+ // children : {
+ // // (Directory proto with hash "4cf2eda940..." and size 43)
+ // files: [
+ // {
+ // name: "baz",
+ // digest: {
+ // hash: "b2c941073e...",
+ // size: 1294,
+ // },
+ // is_executable: true
+ // }
+ // ]
+ // }
+ // }
+ // ```
+ repeated OutputDirectory output_directories = 3;
+
+ // The exit code of the command.
+ int32 exit_code = 4;
+
+ // The standard output buffer of the action. The server will determine, based
+ // on the size of the buffer, whether to return it in raw form or to return
+ // a digest in `stdout_digest` that points to the buffer. If neither is set,
+ // then the buffer is empty. The client SHOULD NOT assume it will get one of
+ // the raw buffer or a digest on any given request and should be prepared to
+ // handle either.
+ bytes stdout_raw = 5;
+
+ // The digest for a blob containing the standard output of the action, which
+ // can be retrieved from the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ // See `stdout_raw` for when this will be set.
+ Digest stdout_digest = 6;
+
+ // The standard error buffer of the action. The server will determine, based
+ // on the size of the buffer, whether to return it in raw form or to return
+ // a digest in `stderr_digest` that points to the buffer. If neither is set,
+ // then the buffer is empty. The client SHOULD NOT assume it will get one of
+ // the raw buffer or a digest on any given request and should be prepared to
+ // handle either.
+ bytes stderr_raw = 7;
+
+ // The digest for a blob containing the standard error of the action, which
+ // can be retrieved from the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ // See `stderr_raw` for when this will be set.
+ Digest stderr_digest = 8;
+
+ // The details of the execution that originally produced this result.
+ ExecutedActionMetadata execution_metadata = 9;
+}
+
+// An `OutputFile` is similar to a
+// [FileNode][build.bazel.remote.execution.v2.FileNode], but it is used as an
+// output in an `ActionResult`. It allows a full file path rather than
+// only a name.
+//
+// `OutputFile` is binary-compatible with `FileNode`.
+message OutputFile {
+ // The full path of the file relative to the input root, including the
+ // filename. The path separator is a forward slash `/`. Since this is a
+ // relative path, it MUST NOT begin with a leading forward slash.
+ string path = 1;
+
+ // The digest of the file's content.
+ Digest digest = 2;
+
+ reserved 3; // Used for a removed field in an earlier version of the API.
+
+ // True if file is executable, false otherwise.
+ bool is_executable = 4;
+}
+
+// A `Tree` contains all the
+// [Directory][build.bazel.remote.execution.v2.Directory] protos in a
+// single directory Merkle tree, compressed into one message.
+message Tree {
+ // The root directory in the tree.
+ Directory root = 1;
+
+ // All the child directories: the directories referred to by the root and,
+ // recursively, all its children. In order to reconstruct the directory tree,
+ // the client must take the digests of each of the child directories and then
+ // build up a tree starting from the `root`.
+ repeated Directory children = 2;
+}
+
+// An `OutputDirectory` is the output in an `ActionResult` corresponding to a
+// directory's full contents rather than a single file.
+message OutputDirectory {
+ // The full path of the directory relative to the working directory. The path
+ // separator is a forward slash `/`. Since this is a relative path, it MUST
+ // NOT begin with a leading forward slash. The empty string value is allowed,
+ // and it denotes the entire working directory.
+ string path = 1;
+
+ reserved 2; // Used for a removed field in an earlier version of the API.
+
+ // The digest of the encoded
+ // [Tree][build.bazel.remote.execution.v2.Tree] proto containing the
+ // directory's contents.
+ Digest tree_digest = 3;
+}
+
+// An `ExecutionPolicy` can be used to control the scheduling of the action.
+message ExecutionPolicy {
+ // The priority (relative importance) of this action. Generally, a lower value
+ // means that the action should be run sooner than actions having a greater
+ // priority value, but the interpretation of a given value is server-
+ // dependent. A priority of 0 means the *default* priority. Priorities may be
+ // positive or negative, and such actions should run later or sooner than
+ // actions having the default priority, respectively. The particular semantics
+ // of this field is up to the server. In particular, every server will have
+ // their own supported range of priorities, and will decide how these map into
+ // scheduling policy.
+ int32 priority = 1;
+}
+
+// A `ResultsCachePolicy` is used for fine-grained control over how action
+// outputs are stored in the CAS and Action Cache.
+message ResultsCachePolicy {
+ // The priority (relative importance) of this content in the overall cache.
+ // Generally, a lower value means a longer retention time or other advantage,
+ // but the interpretation of a given value is server-dependent. A priority of
+ // 0 means a *default* value, decided by the server.
+ //
+ // The particular semantics of this field is up to the server. In particular,
+ // every server will have their own supported range of priorities, and will
+ // decide how these map into retention/eviction policy.
+ int32 priority = 1;
+}
+
+// A request message for
+// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute].
+message ExecuteRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // If true, the action will be executed anew even if its result was already
+ // present in the cache. If false, the result may be served from the
+ // [ActionCache][build.bazel.remote.execution.v2.ActionCache].
+ bool skip_cache_lookup = 3;
+
+ reserved 2, 4, 5; // Used for removed fields in an earlier version of the API.
+
+ // The digest of the [Action][build.bazel.remote.execution.v2.Action] to
+ // execute.
+ Digest action_digest = 6;
+
+ // An optional policy for execution of the action.
+ // The server will have a default policy if this is not provided.
+ ExecutionPolicy execution_policy = 7;
+
+ // An optional policy for the results of this execution in the remote cache.
+ // The server will have a default policy if this is not provided.
+ // This may be applied to both the ActionResult and the associated blobs.
+ ResultsCachePolicy results_cache_policy = 8;
+}
+
+// A `LogFile` is a log stored in the CAS.
+message LogFile {
+ // The digest of the log contents.
+ Digest digest = 1;
+
+ // This is a hint as to the purpose of the log, and is set to true if the log
+ // is human-readable text that can be usefully displayed to a user, and false
+ // otherwise. For instance, if a command-line client wishes to print the
+ // server logs to the terminal for a failed action, this allows it to avoid
+ // displaying a binary file.
+ bool human_readable = 2;
+}
+
+// The response message for
+// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute],
+// which will be contained in the [response
+// field][google.longrunning.Operation.response] of the
+// [Operation][google.longrunning.Operation].
+message ExecuteResponse {
+ // The result of the action.
+ ActionResult result = 1;
+
+ // True if the result was served from cache, false if it was executed.
+ bool cached_result = 2;
+
+ // If the status has a code other than `OK`, it indicates that the action did
+ // not finish execution. For example, if the operation times out during
+ // execution, the status will have a `DEADLINE_EXCEEDED` code. Servers MUST
+ // use this field for errors in execution, rather than the error field on the
+ // `Operation` object.
+ //
+ // If the status code is other than `OK`, then the result MUST NOT be cached.
+ // For an error status, the `result` field is optional; the server may
+ // populate the output-, stdout-, and stderr-related fields if it has any
+ // information available, such as the stdout and stderr of a timed-out action.
+ google.rpc.Status status = 3;
+
+ // An optional list of additional log outputs the server wishes to provide. A
+ // server can use this to return execution-specific logs however it wishes.
+ // This is intended primarily to make it easier for users to debug issues that
+ // may be outside of the actual job execution, such as by identifying the
+ // worker executing the action or by providing logs from the worker's setup
+ // phase. The keys SHOULD be human readable so that a client can display them
+ // to a user.
+ map<string, LogFile> server_logs = 4;
+}
+
+// Metadata about an ongoing
+// [execution][build.bazel.remote.execution.v2.Execution.Execute], which
+// will be contained in the [metadata
+// field][google.longrunning.Operation.response] of the
+// [Operation][google.longrunning.Operation].
+message ExecuteOperationMetadata {
+ // The current stage of execution.
+ enum Stage {
+ UNKNOWN = 0;
+
+ // Checking the result against the cache.
+ CACHE_CHECK = 1;
+
+ // Currently idle, awaiting a free machine to execute.
+ QUEUED = 2;
+
+ // Currently being executed by a worker.
+ EXECUTING = 3;
+
+ // Finished execution.
+ COMPLETED = 4;
+ }
+
+ Stage stage = 1;
+
+ // The digest of the [Action][build.bazel.remote.execution.v2.Action]
+ // being executed.
+ Digest action_digest = 2;
+
+ // If set, the client can use this name with
+ // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
+ // standard output.
+ string stdout_stream_name = 3;
+
+ // If set, the client can use this name with
+ // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
+ // standard error.
+ string stderr_stream_name = 4;
+}
+
+// A request message for
+// [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution].
+message WaitExecutionRequest {
+ // The name of the [Operation][google.longrunning.operations.v1.Operation]
+ // returned by [Execute][build.bazel.remote.execution.v2.Execution.Execute].
+ string name = 1;
+}
+
+// A request message for
+// [ActionCache.GetActionResult][build.bazel.remote.execution.v2.ActionCache.GetActionResult].
+message GetActionResultRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The digest of the [Action][build.bazel.remote.execution.v2.Action]
+ // whose result is requested.
+ Digest action_digest = 2;
+}
+
+// A request message for
+// [ActionCache.UpdateActionResult][build.bazel.remote.execution.v2.ActionCache.UpdateActionResult].
+message UpdateActionResultRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The digest of the [Action][build.bazel.remote.execution.v2.Action]
+ // whose result is being uploaded.
+ Digest action_digest = 2;
+
+ // The [ActionResult][build.bazel.remote.execution.v2.ActionResult]
+ // to store in the cache.
+ ActionResult action_result = 3;
+
+ // An optional policy for the results of this execution in the remote cache.
+ // The server will have a default policy if this is not provided.
+ // This may be applied to both the ActionResult and the associated blobs.
+ ResultsCachePolicy results_cache_policy = 4;
+}
+
+// A request message for
+// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs].
+message FindMissingBlobsRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // A list of the blobs to check.
+ repeated Digest blob_digests = 2;
+}
+
+// A response message for
+// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs].
+message FindMissingBlobsResponse {
+ // A list of the blobs requested *not* present in the storage.
+ repeated Digest missing_blob_digests = 2;
+}
+
+// A single request message for
+// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+message UpdateBlobRequest {
+ // The digest of the blob. This MUST be the digest of `data`.
+ Digest content_digest = 1;
+
+ // The raw binary data.
+ bytes data = 2;
+}
+
+// A request message for
+// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+message BatchUpdateBlobsRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The individual upload requests.
+ repeated UpdateBlobRequest requests = 2;
+}
+
+// A response message for
+// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+message BatchUpdateBlobsResponse {
+ // A response corresponding to a single blob that the client tried to upload.
+ message Response {
+ // The digest to which this response corresponds.
+ Digest blob_digest = 1;
+
+ // The result of attempting to upload that blob.
+ google.rpc.Status status = 2;
+ }
+
+ // The responses to the requests.
+ repeated Response responses = 1;
+}
+
+// A request message for
+// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree].
+message GetTreeRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The digest of the root, which must be an encoded
+ // [Directory][build.bazel.remote.execution.v2.Directory] message
+ // stored in the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ Digest root_digest = 2;
+
+ // A maximum page size to request. If present, the server will request no more
+ // than this many items. Regardless of whether a page size is specified, the
+ // server may place its own limit on the number of items to be returned and
+ // require the client to retrieve more items using a subsequent request.
+ int32 page_size = 3;
+
+ // A page token, which must be a value received in a previous
+ // [GetTreeResponse][build.bazel.remote.execution.v2.GetTreeResponse].
+ // If present, the server will use it to return the following page of results.
+ string page_token = 4;
+}
+
+// A response message for
+// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree].
+message GetTreeResponse {
+ // The directories descended from the requested root.
+ repeated Directory directories = 1;
+
+ // If present, signifies that there are more results which the client can
+ // retrieve by passing this as the page_token in a subsequent
+ // [request][build.bazel.remote.execution.v2.GetTreeRequest].
+ // If empty, signifies that this is the last page of results.
+ string next_page_token = 2;
+}
+
+// A request message for
+// [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities].
+message GetCapabilitiesRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+}
+
+// A response message for
+// [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities].
+message ServerCapabilities {
+ // Capabilities of the remote cache system.
+ CacheCapabilities cache_capabilities = 1;
+
+ // Capabilities of the remote execution system.
+ ExecutionCapabilities execution_capabilities = 2;
+
+ // Earliest RE API version supported, including deprecated versions.
+ build.bazel.semver.SemVer deprecated_api_version = 3;
+
+ // Earliest non-deprecated RE API version supported.
+ build.bazel.semver.SemVer low_api_version = 4;
+
+ // Latest RE API version supported.
+ build.bazel.semver.SemVer high_api_version = 5;
+}
+
+// The digest function used for converting values into keys for CAS and Action
+// Cache.
+enum DigestFunction {
+ UNKNOWN = 0;
+ SHA256 = 1;
+ SHA1 = 2;
+ MD5 = 3;
+}
+
+// Describes the server/instance capabilities for updating the action cache.
+message ActionCacheUpdateCapabilities {
+ bool update_enabled = 1;
+}
+
+// Allowed values for priority in
+// [ResultsCachePolicy][google.devtools.remoteexecution.v2.ResultsCachePolicy]
+// Used for querying both cache and execution valid priority ranges.
+message PriorityCapabilities {
+ // Supported range of priorities, including boundaries.
+ message PriorityRange {
+ int32 min_priority = 1;
+ int32 max_priority = 2;
+ }
+ repeated PriorityRange priorities = 1;
+}
+
+// Capabilities of the remote cache system.
+message CacheCapabilities {
+ // All the digest functions supported by the remote cache.
+ // Remote cache may support multiple digest functions simultaneously.
+ repeated DigestFunction digest_function = 1;
+
+ // Capabilities for updating the action cache.
+ ActionCacheUpdateCapabilities action_cache_update_capabilities = 2;
+
+ // Supported cache priority range for both CAS and ActionCache.
+ PriorityCapabilities cache_priority_capabilities = 3;
+}
+
+// Capabilities of the remote execution system.
+message ExecutionCapabilities {
+ // Remote execution may only support a single digest function.
+ DigestFunction digest_function = 1;
+
+ // Whether remote execution is enabled for the particular server/instance.
+ bool exec_enabled = 2;
+
+ // Supported execution priority range.
+ PriorityCapabilities execution_priority_capabilities = 3;
+}
+
+// Details for the tool used to call the API.
+message ToolDetails {
+ // Name of the tool, e.g. bazel.
+ string tool_name = 1;
+
+ // Version of the tool used for the request, e.g. 5.0.3.
+ string tool_version = 2;
+}
+
+// An optional Metadata to attach to any RPC request to tell the server about an
+// external context of the request. The server may use this for logging or other
+// purposes. To use it, the client attaches the header to the call using the
+// canonical proto serialization:
+// name: build.bazel.remote.execution.v2.requestmetadata-bin
+// contents: the base64 encoded binary RequestMetadata message.
+message RequestMetadata {
+ // The details for the tool invoking the requests.
+ ToolDetails tool_details = 1;
+
+ // An identifier that ties multiple requests to the same action.
+ // For example, multiple requests to the CAS, Action Cache, and Execution
+ // API are used in order to compile foo.cc.
+ string action_id = 2;
+
+ // An identifier that ties multiple actions together to a final result.
+ // For example, multiple actions are required to build and run foo_test.
+ string tool_invocation_id = 3;
+
+ // An identifier to tie multiple tool invocations together. For example,
+ // runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
+ string correlated_invocations_id = 4;
+}
diff --git a/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py b/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
new file mode 100644
index 000000000..ea2c90787
--- /dev/null
+++ b/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
@@ -0,0 +1,2466 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: build/bazel/remote/execution/v2/remote_execution.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.build.bazel.semver import semver_pb2 as build_dot_bazel_dot_semver_dot_semver__pb2
+from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
+from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='build/bazel/remote/execution/v2/remote_execution.proto',
+ package='build.bazel.remote.execution.v2',
+ syntax='proto3',
+ serialized_pb=_b('\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xd5\x01\n\x06\x41\x63tion\x12?\n\x0e\x63ommand_digest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x42\n\x11input_root_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12*\n\x07timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x14\n\x0c\x64o_not_cache\x18\x07 \x01(\x08J\x04\x08\x03\x10\x06\"\xb7\x02\n\x07\x43ommand\x12\x11\n\targuments\x18\x01 \x03(\t\x12[\n\x15\x65nvironment_variables\x18\x02 \x03(\x0b\x32<.build.bazel.remote.execution.v2.Command.EnvironmentVariable\x12\x14\n\x0coutput_files\x18\x03 \x03(\t\x12\x1a\n\x12output_directories\x18\x04 \x03(\t\x12;\n\x08platform\x18\x05 \x01(\x0b\x32).build.bazel.remote.execution.v2.Platform\x12\x19\n\x11working_directory\x18\x06 \x01(\t\x1a\x32\n\x13\x45nvironmentVariable\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"{\n\x08Platform\x12\x46\n\nproperties\x18\x01 \x03(\x0b\x32\x32.build.bazel.remote.execution.v2.Platform.Property\x1a\'\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xca\x01\n\tDirectory\x12\x38\n\x05\x66iles\x18\x01 \x03(\x0b\x32).build.bazel.remote.execution.v2.FileNode\x12\x43\n\x0b\x64irectories\x18\x02 \x03(\x0b\x32..build.bazel.remote.execution.v2.DirectoryNode\x12>\n\x08symlinks\x18\x03 \x03(\x0b\x32,.build.bazel.remote.execution.v2.SymlinkNode\"n\n\x08\x46ileNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"V\n\rDirectoryNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"+\n\x0bSymlinkNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"*\n\x06\x44igest\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x12\n\nsize_bytes\x18\x02 \x01(\x03\"\xec\x04\n\x16\x45xecutedActionMetadata\x12\x0e\n\x06worker\x18\x01 \x01(\t\x12\x34\n\x10queued_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16worker_start_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12>\n\x1aworker_completed_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1binput_fetch_start_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x43\n\x1finput_fetch_completed_timestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12=\n\x19\x65xecution_start_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1d\x65xecution_completed_timestamp\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1doutput_upload_start_timestamp\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x45\n!output_upload_completed_timestamp\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb5\x03\n\x0c\x41\x63tionResult\x12\x41\n\x0coutput_files\x18\x02 \x03(\x0b\x32+.build.bazel.remote.execution.v2.OutputFile\x12L\n\x12output_directories\x18\x03 \x03(\x0b\x32\x30.build.bazel.remote.execution.v2.OutputDirectory\x12\x11\n\texit_code\x18\x04 \x01(\x05\x12\x12\n\nstdout_raw\x18\x05 \x01(\x0c\x12>\n\rstdout_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x12\n\nstderr_raw\x18\x07 \x01(\x0c\x12>\n\rstderr_digest\x18\x08 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12S\n\x12\x65xecution_metadata\x18\t \x01(\x0b\x32\x37.build.bazel.remote.execution.v2.ExecutedActionMetadataJ\x04\x08\x01\x10\x02\"p\n\nOutputFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"~\n\x04Tree\x12\x38\n\x04root\x18\x01 \x01(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12<\n\x08\x63hildren\x18\x02 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\"c\n\x0fOutputDirectory\x12\x0c\n\x04path\x18\x01 \x01(\t\x12<\n\x0btree_digest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.DigestJ\x04\x08\x02\x10\x03\"#\n\x0f\x45xecutionPolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"&\n\x12ResultsCachePolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"\xb3\x02\n\x0e\x45xecuteRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x19\n\x11skip_cache_lookup\x18\x03 \x01(\x08\x12>\n\raction_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12J\n\x10\x65xecution_policy\x18\x07 \x01(\x0b\x32\x30.build.bazel.remote.execution.v2.ExecutionPolicy\x12Q\n\x14results_cache_policy\x18\x08 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicyJ\x04\x08\x02\x10\x03J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"Z\n\x07LogFile\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x16\n\x0ehuman_readable\x18\x02 \x01(\x08\"\xbf\x02\n\x0f\x45xecuteResponse\x12=\n\x06result\x18\x01 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12\x15\n\rcached_result\x18\x02 \x01(\x08\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12U\n\x0bserver_logs\x18\x04 \x03(\x0b\x32@.build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry\x1a[\n\x0fServerLogsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x37\n\x05value\x18\x02 \x01(\x0b\x32(.build.bazel.remote.execution.v2.LogFile:\x02\x38\x01\"\xb3\x02\n\x18\x45xecuteOperationMetadata\x12N\n\x05stage\x18\x01 \x01(\x0e\x32?.build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x1a\n\x12stdout_stream_name\x18\x03 \x01(\t\x12\x1a\n\x12stderr_stream_name\x18\x04 \x01(\t\"O\n\x05Stage\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0f\n\x0b\x43\x41\x43HE_CHECK\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\r\n\tEXECUTING\x10\x03\x12\r\n\tCOMPLETED\x10\x04\"$\n\x14WaitExecutionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"o\n\x16GetActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x8b\x02\n\x19UpdateActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x44\n\raction_result\x18\x03 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12Q\n\x14results_cache_policy\x18\x04 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicy\"o\n\x17\x46indMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"a\n\x18\x46indMissingBlobsResponse\x12\x45\n\x14missing_blob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"b\n\x11UpdateBlobRequest\x12?\n\x0e\x63ontent_digest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"v\n\x17\x42\x61tchUpdateBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x44\n\x08requests\x18\x02 \x03(\x0b\x32\x32.build.bazel.remote.execution.v2.UpdateBlobRequest\"\xdf\x01\n\x18\x42\x61tchUpdateBlobsResponse\x12U\n\tresponses\x18\x01 \x03(\x0b\x32\x42.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response\x1al\n\x08Response\x12<\n\x0b\x62lob_digest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\x8c\x01\n\x0eGetTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\"k\n\x0fGetTreeResponse\x12?\n\x0b\x64irectories\x18\x01 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"/\n\x16GetCapabilitiesRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\xe3\x02\n\x12ServerCapabilities\x12N\n\x12\x63\x61\x63he_capabilities\x18\x01 \x01(\x0b\x32\x32.build.bazel.remote.execution.v2.CacheCapabilities\x12V\n\x16\x65xecution_capabilities\x18\x02 \x01(\x0b\x32\x36.build.bazel.remote.execution.v2.ExecutionCapabilities\x12:\n\x16\x64\x65precated_api_version\x18\x03 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x33\n\x0flow_api_version\x18\x04 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x34\n\x10high_api_version\x18\x05 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\"7\n\x1d\x41\x63tionCacheUpdateCapabilities\x12\x16\n\x0eupdate_enabled\x18\x01 \x01(\x08\"\xac\x01\n\x14PriorityCapabilities\x12W\n\npriorities\x18\x01 \x03(\x0b\x32\x43.build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange\x1a;\n\rPriorityRange\x12\x14\n\x0cmin_priority\x18\x01 \x01(\x05\x12\x14\n\x0cmax_priority\x18\x02 \x01(\x05\"\xa3\x02\n\x11\x43\x61\x63heCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x03(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12h\n action_cache_update_capabilities\x18\x02 \x01(\x0b\x32>.build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities\x12Z\n\x1b\x63\x61\x63he_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\"\xd7\x01\n\x15\x45xecutionCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x01(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12\x14\n\x0c\x65xec_enabled\x18\x02 \x01(\x08\x12^\n\x1f\x65xecution_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\"6\n\x0bToolDetails\x12\x11\n\ttool_name\x18\x01 \x01(\t\x12\x14\n\x0ctool_version\x18\x02 \x01(\t\"\xa7\x01\n\x0fRequestMetadata\x12\x42\n\x0ctool_details\x18\x01 \x01(\x0b\x32,.build.bazel.remote.execution.v2.ToolDetails\x12\x11\n\taction_id\x18\x02 \x01(\t\x12\x1a\n\x12tool_invocation_id\x18\x03 \x01(\t\x12!\n\x19\x63orrelated_invocations_id\x18\x04 \x01(\t*<\n\x0e\x44igestFunction\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06SHA256\x10\x01\x12\x08\n\x04SHA1\x10\x02\x12\x07\n\x03MD5\x10\x03\x32\xb9\x02\n\tExecution\x12\x8e\x01\n\x07\x45xecute\x12/.build.bazel.remote.execution.v2.ExecuteRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/actions:execute:\x01*0\x01\x12\x9a\x01\n\rWaitExecution\x12\x35.build.bazel.remote.execution.v2.WaitExecutionRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{name=operations/**}:waitExecution:\x01*0\x01\x32\xd6\x03\n\x0b\x41\x63tionCache\x12\xd7\x01\n\x0fGetActionResult\x12\x37.build.bazel.remote.execution.v2.GetActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"\\\x82\xd3\xe4\x93\x02V\x12T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}\x12\xec\x01\n\x12UpdateActionResult\x12:.build.bazel.remote.execution.v2.UpdateActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"k\x82\xd3\xe4\x93\x02\x65\x1aT/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result2\xe4\x04\n\x19\x43ontentAddressableStorage\x12\xbc\x01\n\x10\x46indMissingBlobs\x12\x38.build.bazel.remote.execution.v2.FindMissingBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.FindMissingBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:findMissing:\x01*\x12\xbc\x01\n\x10\x42\x61tchUpdateBlobs\x12\x38.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:batchUpdate:\x01*\x12\xc8\x01\n\x07GetTree\x12/.build.bazel.remote.execution.v2.GetTreeRequest\x1a\x30.build.bazel.remote.execution.v2.GetTreeResponse\"X\x82\xd3\xe4\x93\x02R\x12P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree0\x01\x32\xbd\x01\n\x0c\x43\x61pabilities\x12\xac\x01\n\x0fGetCapabilities\x12\x37.build.bazel.remote.execution.v2.GetCapabilitiesRequest\x1a\x33.build.bazel.remote.execution.v2.ServerCapabilities\"+\x82\xd3\xe4\x93\x02%\x12#/v2/{instance_name=**}/capabilitiesBr\n\x1f\x62uild.bazel.remote.execution.v2B\x14RemoteExecutionProtoP\x01Z\x0fremoteexecution\xa2\x02\x03REX\xaa\x02\x1f\x42uild.Bazel.Remote.Execution.V2b\x06proto3')
+ ,
+ dependencies=[build_dot_bazel_dot_semver_dot_semver__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
+
+_DIGESTFUNCTION = _descriptor.EnumDescriptor(
+ name='DigestFunction',
+ full_name='build.bazel.remote.execution.v2.DigestFunction',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNKNOWN', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SHA256', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SHA1', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='MD5', index=3, number=3,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=6655,
+ serialized_end=6715,
+)
+_sym_db.RegisterEnumDescriptor(_DIGESTFUNCTION)
+
+DigestFunction = enum_type_wrapper.EnumTypeWrapper(_DIGESTFUNCTION)
+UNKNOWN = 0
+SHA256 = 1
+SHA1 = 2
+MD5 = 3
+
+
+_EXECUTEOPERATIONMETADATA_STAGE = _descriptor.EnumDescriptor(
+ name='Stage',
+ full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNKNOWN', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CACHE_CHECK', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='QUEUED', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='EXECUTING', index=3, number=3,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='COMPLETED', index=4, number=4,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=3866,
+ serialized_end=3945,
+)
+_sym_db.RegisterEnumDescriptor(_EXECUTEOPERATIONMETADATA_STAGE)
+
+
+_ACTION = _descriptor.Descriptor(
+ name='Action',
+ full_name='build.bazel.remote.execution.v2.Action',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='command_digest', full_name='build.bazel.remote.execution.v2.Action.command_digest', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='input_root_digest', full_name='build.bazel.remote.execution.v2.Action.input_root_digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='timeout', full_name='build.bazel.remote.execution.v2.Action.timeout', index=2,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='do_not_cache', full_name='build.bazel.remote.execution.v2.Action.do_not_cache', index=3,
+ number=7, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=282,
+ serialized_end=495,
+)
+
+
+_COMMAND_ENVIRONMENTVARIABLE = _descriptor.Descriptor(
+ name='EnvironmentVariable',
+ full_name='build.bazel.remote.execution.v2.Command.EnvironmentVariable',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.Command.EnvironmentVariable.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='build.bazel.remote.execution.v2.Command.EnvironmentVariable.value', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=759,
+ serialized_end=809,
+)
+
+_COMMAND = _descriptor.Descriptor(
+ name='Command',
+ full_name='build.bazel.remote.execution.v2.Command',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='arguments', full_name='build.bazel.remote.execution.v2.Command.arguments', index=0,
+ number=1, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='environment_variables', full_name='build.bazel.remote.execution.v2.Command.environment_variables', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='output_files', full_name='build.bazel.remote.execution.v2.Command.output_files', index=2,
+ number=3, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='output_directories', full_name='build.bazel.remote.execution.v2.Command.output_directories', index=3,
+ number=4, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='platform', full_name='build.bazel.remote.execution.v2.Command.platform', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='working_directory', full_name='build.bazel.remote.execution.v2.Command.working_directory', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_COMMAND_ENVIRONMENTVARIABLE, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=498,
+ serialized_end=809,
+)
+
+
+_PLATFORM_PROPERTY = _descriptor.Descriptor(
+ name='Property',
+ full_name='build.bazel.remote.execution.v2.Platform.Property',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.Platform.Property.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='build.bazel.remote.execution.v2.Platform.Property.value', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=895,
+ serialized_end=934,
+)
+
+_PLATFORM = _descriptor.Descriptor(
+ name='Platform',
+ full_name='build.bazel.remote.execution.v2.Platform',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='properties', full_name='build.bazel.remote.execution.v2.Platform.properties', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_PLATFORM_PROPERTY, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=811,
+ serialized_end=934,
+)
+
+
+_DIRECTORY = _descriptor.Descriptor(
+ name='Directory',
+ full_name='build.bazel.remote.execution.v2.Directory',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='files', full_name='build.bazel.remote.execution.v2.Directory.files', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='directories', full_name='build.bazel.remote.execution.v2.Directory.directories', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='symlinks', full_name='build.bazel.remote.execution.v2.Directory.symlinks', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=937,
+ serialized_end=1139,
+)
+
+
+_FILENODE = _descriptor.Descriptor(
+ name='FileNode',
+ full_name='build.bazel.remote.execution.v2.FileNode',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.FileNode.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.FileNode.digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='is_executable', full_name='build.bazel.remote.execution.v2.FileNode.is_executable', index=2,
+ number=4, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1141,
+ serialized_end=1251,
+)
+
+
+_DIRECTORYNODE = _descriptor.Descriptor(
+ name='DirectoryNode',
+ full_name='build.bazel.remote.execution.v2.DirectoryNode',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.DirectoryNode.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.DirectoryNode.digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1253,
+ serialized_end=1339,
+)
+
+
+_SYMLINKNODE = _descriptor.Descriptor(
+ name='SymlinkNode',
+ full_name='build.bazel.remote.execution.v2.SymlinkNode',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.SymlinkNode.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='target', full_name='build.bazel.remote.execution.v2.SymlinkNode.target', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1341,
+ serialized_end=1384,
+)
+
+
+_DIGEST = _descriptor.Descriptor(
+ name='Digest',
+ full_name='build.bazel.remote.execution.v2.Digest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='hash', full_name='build.bazel.remote.execution.v2.Digest.hash', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='size_bytes', full_name='build.bazel.remote.execution.v2.Digest.size_bytes', index=1,
+ number=2, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1386,
+ serialized_end=1428,
+)
+
+
+_EXECUTEDACTIONMETADATA = _descriptor.Descriptor(
+ name='ExecutedActionMetadata',
+ full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='worker', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.worker', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='queued_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.queued_timestamp', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='worker_start_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.worker_start_timestamp', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='worker_completed_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.worker_completed_timestamp', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='input_fetch_start_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.input_fetch_start_timestamp', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='input_fetch_completed_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.input_fetch_completed_timestamp', index=5,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_start_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.execution_start_timestamp', index=6,
+ number=7, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_completed_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.execution_completed_timestamp', index=7,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='output_upload_start_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.output_upload_start_timestamp', index=8,
+ number=9, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='output_upload_completed_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.output_upload_completed_timestamp', index=9,
+ number=10, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1431,
+ serialized_end=2051,
+)
+
+
+_ACTIONRESULT = _descriptor.Descriptor(
+ name='ActionResult',
+ full_name='build.bazel.remote.execution.v2.ActionResult',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='output_files', full_name='build.bazel.remote.execution.v2.ActionResult.output_files', index=0,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='output_directories', full_name='build.bazel.remote.execution.v2.ActionResult.output_directories', index=1,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='exit_code', full_name='build.bazel.remote.execution.v2.ActionResult.exit_code', index=2,
+ number=4, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stdout_raw', full_name='build.bazel.remote.execution.v2.ActionResult.stdout_raw', index=3,
+ number=5, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stdout_digest', full_name='build.bazel.remote.execution.v2.ActionResult.stdout_digest', index=4,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stderr_raw', full_name='build.bazel.remote.execution.v2.ActionResult.stderr_raw', index=5,
+ number=7, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stderr_digest', full_name='build.bazel.remote.execution.v2.ActionResult.stderr_digest', index=6,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_metadata', full_name='build.bazel.remote.execution.v2.ActionResult.execution_metadata', index=7,
+ number=9, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2054,
+ serialized_end=2491,
+)
+
+
+_OUTPUTFILE = _descriptor.Descriptor(
+ name='OutputFile',
+ full_name='build.bazel.remote.execution.v2.OutputFile',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='path', full_name='build.bazel.remote.execution.v2.OutputFile.path', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.OutputFile.digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='is_executable', full_name='build.bazel.remote.execution.v2.OutputFile.is_executable', index=2,
+ number=4, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2493,
+ serialized_end=2605,
+)
+
+
+_TREE = _descriptor.Descriptor(
+ name='Tree',
+ full_name='build.bazel.remote.execution.v2.Tree',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='root', full_name='build.bazel.remote.execution.v2.Tree.root', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='children', full_name='build.bazel.remote.execution.v2.Tree.children', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2607,
+ serialized_end=2733,
+)
+
+
+_OUTPUTDIRECTORY = _descriptor.Descriptor(
+ name='OutputDirectory',
+ full_name='build.bazel.remote.execution.v2.OutputDirectory',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='path', full_name='build.bazel.remote.execution.v2.OutputDirectory.path', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='tree_digest', full_name='build.bazel.remote.execution.v2.OutputDirectory.tree_digest', index=1,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2735,
+ serialized_end=2834,
+)
+
+
+_EXECUTIONPOLICY = _descriptor.Descriptor(
+ name='ExecutionPolicy',
+ full_name='build.bazel.remote.execution.v2.ExecutionPolicy',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='priority', full_name='build.bazel.remote.execution.v2.ExecutionPolicy.priority', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2836,
+ serialized_end=2871,
+)
+
+
+_RESULTSCACHEPOLICY = _descriptor.Descriptor(
+ name='ResultsCachePolicy',
+ full_name='build.bazel.remote.execution.v2.ResultsCachePolicy',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='priority', full_name='build.bazel.remote.execution.v2.ResultsCachePolicy.priority', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2873,
+ serialized_end=2911,
+)
+
+
+_EXECUTEREQUEST = _descriptor.Descriptor(
+ name='ExecuteRequest',
+ full_name='build.bazel.remote.execution.v2.ExecuteRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.ExecuteRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='skip_cache_lookup', full_name='build.bazel.remote.execution.v2.ExecuteRequest.skip_cache_lookup', index=1,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_digest', full_name='build.bazel.remote.execution.v2.ExecuteRequest.action_digest', index=2,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_policy', full_name='build.bazel.remote.execution.v2.ExecuteRequest.execution_policy', index=3,
+ number=7, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='results_cache_policy', full_name='build.bazel.remote.execution.v2.ExecuteRequest.results_cache_policy', index=4,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2914,
+ serialized_end=3221,
+)
+
+
+_LOGFILE = _descriptor.Descriptor(
+ name='LogFile',
+ full_name='build.bazel.remote.execution.v2.LogFile',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.LogFile.digest', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='human_readable', full_name='build.bazel.remote.execution.v2.LogFile.human_readable', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3223,
+ serialized_end=3313,
+)
+
+
+_EXECUTERESPONSE_SERVERLOGSENTRY = _descriptor.Descriptor(
+ name='ServerLogsEntry',
+ full_name='build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='key', full_name='build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry.key', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry.value', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3544,
+ serialized_end=3635,
+)
+
+_EXECUTERESPONSE = _descriptor.Descriptor(
+ name='ExecuteResponse',
+ full_name='build.bazel.remote.execution.v2.ExecuteResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='result', full_name='build.bazel.remote.execution.v2.ExecuteResponse.result', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='cached_result', full_name='build.bazel.remote.execution.v2.ExecuteResponse.cached_result', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='status', full_name='build.bazel.remote.execution.v2.ExecuteResponse.status', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='server_logs', full_name='build.bazel.remote.execution.v2.ExecuteResponse.server_logs', index=3,
+ number=4, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_EXECUTERESPONSE_SERVERLOGSENTRY, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3316,
+ serialized_end=3635,
+)
+
+
+_EXECUTEOPERATIONMETADATA = _descriptor.Descriptor(
+ name='ExecuteOperationMetadata',
+ full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='stage', full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.stage', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_digest', full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.action_digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stdout_stream_name', full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.stdout_stream_name', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stderr_stream_name', full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.stderr_stream_name', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _EXECUTEOPERATIONMETADATA_STAGE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3638,
+ serialized_end=3945,
+)
+
+
+_WAITEXECUTIONREQUEST = _descriptor.Descriptor(
+ name='WaitExecutionRequest',
+ full_name='build.bazel.remote.execution.v2.WaitExecutionRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.WaitExecutionRequest.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3947,
+ serialized_end=3983,
+)
+
+
+_GETACTIONRESULTREQUEST = _descriptor.Descriptor(
+ name='GetActionResultRequest',
+ full_name='build.bazel.remote.execution.v2.GetActionResultRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.GetActionResultRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_digest', full_name='build.bazel.remote.execution.v2.GetActionResultRequest.action_digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3985,
+ serialized_end=4096,
+)
+
+
+_UPDATEACTIONRESULTREQUEST = _descriptor.Descriptor(
+ name='UpdateActionResultRequest',
+ full_name='build.bazel.remote.execution.v2.UpdateActionResultRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.UpdateActionResultRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_digest', full_name='build.bazel.remote.execution.v2.UpdateActionResultRequest.action_digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_result', full_name='build.bazel.remote.execution.v2.UpdateActionResultRequest.action_result', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='results_cache_policy', full_name='build.bazel.remote.execution.v2.UpdateActionResultRequest.results_cache_policy', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4099,
+ serialized_end=4366,
+)
+
+
+_FINDMISSINGBLOBSREQUEST = _descriptor.Descriptor(
+ name='FindMissingBlobsRequest',
+ full_name='build.bazel.remote.execution.v2.FindMissingBlobsRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.FindMissingBlobsRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='blob_digests', full_name='build.bazel.remote.execution.v2.FindMissingBlobsRequest.blob_digests', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4368,
+ serialized_end=4479,
+)
+
+
+_FINDMISSINGBLOBSRESPONSE = _descriptor.Descriptor(
+ name='FindMissingBlobsResponse',
+ full_name='build.bazel.remote.execution.v2.FindMissingBlobsResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='missing_blob_digests', full_name='build.bazel.remote.execution.v2.FindMissingBlobsResponse.missing_blob_digests', index=0,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4481,
+ serialized_end=4578,
+)
+
+
+_UPDATEBLOBREQUEST = _descriptor.Descriptor(
+ name='UpdateBlobRequest',
+ full_name='build.bazel.remote.execution.v2.UpdateBlobRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='content_digest', full_name='build.bazel.remote.execution.v2.UpdateBlobRequest.content_digest', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='data', full_name='build.bazel.remote.execution.v2.UpdateBlobRequest.data', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4580,
+ serialized_end=4678,
+)
+
+
+_BATCHUPDATEBLOBSREQUEST = _descriptor.Descriptor(
+ name='BatchUpdateBlobsRequest',
+ full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='requests', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.requests', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4680,
+ serialized_end=4798,
+)
+
+
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE = _descriptor.Descriptor(
+ name='Response',
+ full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='blob_digest', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response.blob_digest', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='status', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response.status', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4916,
+ serialized_end=5024,
+)
+
+_BATCHUPDATEBLOBSRESPONSE = _descriptor.Descriptor(
+ name='BatchUpdateBlobsResponse',
+ full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='responses', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.responses', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_BATCHUPDATEBLOBSRESPONSE_RESPONSE, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4801,
+ serialized_end=5024,
+)
+
+
+_GETTREEREQUEST = _descriptor.Descriptor(
+ name='GetTreeRequest',
+ full_name='build.bazel.remote.execution.v2.GetTreeRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.GetTreeRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='root_digest', full_name='build.bazel.remote.execution.v2.GetTreeRequest.root_digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_size', full_name='build.bazel.remote.execution.v2.GetTreeRequest.page_size', index=2,
+ number=3, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_token', full_name='build.bazel.remote.execution.v2.GetTreeRequest.page_token', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5027,
+ serialized_end=5167,
+)
+
+
+_GETTREERESPONSE = _descriptor.Descriptor(
+ name='GetTreeResponse',
+ full_name='build.bazel.remote.execution.v2.GetTreeResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='directories', full_name='build.bazel.remote.execution.v2.GetTreeResponse.directories', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='next_page_token', full_name='build.bazel.remote.execution.v2.GetTreeResponse.next_page_token', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5169,
+ serialized_end=5276,
+)
+
+
+_GETCAPABILITIESREQUEST = _descriptor.Descriptor(
+ name='GetCapabilitiesRequest',
+ full_name='build.bazel.remote.execution.v2.GetCapabilitiesRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.GetCapabilitiesRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5278,
+ serialized_end=5325,
+)
+
+
+_SERVERCAPABILITIES = _descriptor.Descriptor(
+ name='ServerCapabilities',
+ full_name='build.bazel.remote.execution.v2.ServerCapabilities',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='cache_capabilities', full_name='build.bazel.remote.execution.v2.ServerCapabilities.cache_capabilities', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_capabilities', full_name='build.bazel.remote.execution.v2.ServerCapabilities.execution_capabilities', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='deprecated_api_version', full_name='build.bazel.remote.execution.v2.ServerCapabilities.deprecated_api_version', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='low_api_version', full_name='build.bazel.remote.execution.v2.ServerCapabilities.low_api_version', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='high_api_version', full_name='build.bazel.remote.execution.v2.ServerCapabilities.high_api_version', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5328,
+ serialized_end=5683,
+)
+
+
+_ACTIONCACHEUPDATECAPABILITIES = _descriptor.Descriptor(
+ name='ActionCacheUpdateCapabilities',
+ full_name='build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='update_enabled', full_name='build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities.update_enabled', index=0,
+ number=1, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5685,
+ serialized_end=5740,
+)
+
+
+_PRIORITYCAPABILITIES_PRIORITYRANGE = _descriptor.Descriptor(
+ name='PriorityRange',
+ full_name='build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='min_priority', full_name='build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange.min_priority', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='max_priority', full_name='build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange.max_priority', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5856,
+ serialized_end=5915,
+)
+
+_PRIORITYCAPABILITIES = _descriptor.Descriptor(
+ name='PriorityCapabilities',
+ full_name='build.bazel.remote.execution.v2.PriorityCapabilities',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='priorities', full_name='build.bazel.remote.execution.v2.PriorityCapabilities.priorities', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_PRIORITYCAPABILITIES_PRIORITYRANGE, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5743,
+ serialized_end=5915,
+)
+
+
+_CACHECAPABILITIES = _descriptor.Descriptor(
+ name='CacheCapabilities',
+ full_name='build.bazel.remote.execution.v2.CacheCapabilities',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='digest_function', full_name='build.bazel.remote.execution.v2.CacheCapabilities.digest_function', index=0,
+ number=1, type=14, cpp_type=8, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_cache_update_capabilities', full_name='build.bazel.remote.execution.v2.CacheCapabilities.action_cache_update_capabilities', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='cache_priority_capabilities', full_name='build.bazel.remote.execution.v2.CacheCapabilities.cache_priority_capabilities', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5918,
+ serialized_end=6209,
+)
+
+
+_EXECUTIONCAPABILITIES = _descriptor.Descriptor(
+ name='ExecutionCapabilities',
+ full_name='build.bazel.remote.execution.v2.ExecutionCapabilities',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='digest_function', full_name='build.bazel.remote.execution.v2.ExecutionCapabilities.digest_function', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='exec_enabled', full_name='build.bazel.remote.execution.v2.ExecutionCapabilities.exec_enabled', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_priority_capabilities', full_name='build.bazel.remote.execution.v2.ExecutionCapabilities.execution_priority_capabilities', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=6212,
+ serialized_end=6427,
+)
+
+
+_TOOLDETAILS = _descriptor.Descriptor(
+ name='ToolDetails',
+ full_name='build.bazel.remote.execution.v2.ToolDetails',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='tool_name', full_name='build.bazel.remote.execution.v2.ToolDetails.tool_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='tool_version', full_name='build.bazel.remote.execution.v2.ToolDetails.tool_version', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=6429,
+ serialized_end=6483,
+)
+
+
+_REQUESTMETADATA = _descriptor.Descriptor(
+ name='RequestMetadata',
+ full_name='build.bazel.remote.execution.v2.RequestMetadata',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='tool_details', full_name='build.bazel.remote.execution.v2.RequestMetadata.tool_details', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_id', full_name='build.bazel.remote.execution.v2.RequestMetadata.action_id', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='tool_invocation_id', full_name='build.bazel.remote.execution.v2.RequestMetadata.tool_invocation_id', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='correlated_invocations_id', full_name='build.bazel.remote.execution.v2.RequestMetadata.correlated_invocations_id', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=6486,
+ serialized_end=6653,
+)
+
+_ACTION.fields_by_name['command_digest'].message_type = _DIGEST
+_ACTION.fields_by_name['input_root_digest'].message_type = _DIGEST
+_ACTION.fields_by_name['timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
+_COMMAND_ENVIRONMENTVARIABLE.containing_type = _COMMAND
+_COMMAND.fields_by_name['environment_variables'].message_type = _COMMAND_ENVIRONMENTVARIABLE
+_COMMAND.fields_by_name['platform'].message_type = _PLATFORM
+_PLATFORM_PROPERTY.containing_type = _PLATFORM
+_PLATFORM.fields_by_name['properties'].message_type = _PLATFORM_PROPERTY
+_DIRECTORY.fields_by_name['files'].message_type = _FILENODE
+_DIRECTORY.fields_by_name['directories'].message_type = _DIRECTORYNODE
+_DIRECTORY.fields_by_name['symlinks'].message_type = _SYMLINKNODE
+_FILENODE.fields_by_name['digest'].message_type = _DIGEST
+_DIRECTORYNODE.fields_by_name['digest'].message_type = _DIGEST
+_EXECUTEDACTIONMETADATA.fields_by_name['queued_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['worker_start_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['worker_completed_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['input_fetch_start_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['input_fetch_completed_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['execution_start_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['execution_completed_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['output_upload_start_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['output_upload_completed_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_ACTIONRESULT.fields_by_name['output_files'].message_type = _OUTPUTFILE
+_ACTIONRESULT.fields_by_name['output_directories'].message_type = _OUTPUTDIRECTORY
+_ACTIONRESULT.fields_by_name['stdout_digest'].message_type = _DIGEST
+_ACTIONRESULT.fields_by_name['stderr_digest'].message_type = _DIGEST
+_ACTIONRESULT.fields_by_name['execution_metadata'].message_type = _EXECUTEDACTIONMETADATA
+_OUTPUTFILE.fields_by_name['digest'].message_type = _DIGEST
+_TREE.fields_by_name['root'].message_type = _DIRECTORY
+_TREE.fields_by_name['children'].message_type = _DIRECTORY
+_OUTPUTDIRECTORY.fields_by_name['tree_digest'].message_type = _DIGEST
+_EXECUTEREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
+_EXECUTEREQUEST.fields_by_name['execution_policy'].message_type = _EXECUTIONPOLICY
+_EXECUTEREQUEST.fields_by_name['results_cache_policy'].message_type = _RESULTSCACHEPOLICY
+_LOGFILE.fields_by_name['digest'].message_type = _DIGEST
+_EXECUTERESPONSE_SERVERLOGSENTRY.fields_by_name['value'].message_type = _LOGFILE
+_EXECUTERESPONSE_SERVERLOGSENTRY.containing_type = _EXECUTERESPONSE
+_EXECUTERESPONSE.fields_by_name['result'].message_type = _ACTIONRESULT
+_EXECUTERESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_EXECUTERESPONSE.fields_by_name['server_logs'].message_type = _EXECUTERESPONSE_SERVERLOGSENTRY
+_EXECUTEOPERATIONMETADATA.fields_by_name['stage'].enum_type = _EXECUTEOPERATIONMETADATA_STAGE
+_EXECUTEOPERATIONMETADATA.fields_by_name['action_digest'].message_type = _DIGEST
+_EXECUTEOPERATIONMETADATA_STAGE.containing_type = _EXECUTEOPERATIONMETADATA
+_GETACTIONRESULTREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
+_UPDATEACTIONRESULTREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
+_UPDATEACTIONRESULTREQUEST.fields_by_name['action_result'].message_type = _ACTIONRESULT
+_UPDATEACTIONRESULTREQUEST.fields_by_name['results_cache_policy'].message_type = _RESULTSCACHEPOLICY
+_FINDMISSINGBLOBSREQUEST.fields_by_name['blob_digests'].message_type = _DIGEST
+_FINDMISSINGBLOBSRESPONSE.fields_by_name['missing_blob_digests'].message_type = _DIGEST
+_UPDATEBLOBREQUEST.fields_by_name['content_digest'].message_type = _DIGEST
+_BATCHUPDATEBLOBSREQUEST.fields_by_name['requests'].message_type = _UPDATEBLOBREQUEST
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE.fields_by_name['blob_digest'].message_type = _DIGEST
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE.containing_type = _BATCHUPDATEBLOBSRESPONSE
+_BATCHUPDATEBLOBSRESPONSE.fields_by_name['responses'].message_type = _BATCHUPDATEBLOBSRESPONSE_RESPONSE
+_GETTREEREQUEST.fields_by_name['root_digest'].message_type = _DIGEST
+_GETTREERESPONSE.fields_by_name['directories'].message_type = _DIRECTORY
+_SERVERCAPABILITIES.fields_by_name['cache_capabilities'].message_type = _CACHECAPABILITIES
+_SERVERCAPABILITIES.fields_by_name['execution_capabilities'].message_type = _EXECUTIONCAPABILITIES
+_SERVERCAPABILITIES.fields_by_name['deprecated_api_version'].message_type = build_dot_bazel_dot_semver_dot_semver__pb2._SEMVER
+_SERVERCAPABILITIES.fields_by_name['low_api_version'].message_type = build_dot_bazel_dot_semver_dot_semver__pb2._SEMVER
+_SERVERCAPABILITIES.fields_by_name['high_api_version'].message_type = build_dot_bazel_dot_semver_dot_semver__pb2._SEMVER
+_PRIORITYCAPABILITIES_PRIORITYRANGE.containing_type = _PRIORITYCAPABILITIES
+_PRIORITYCAPABILITIES.fields_by_name['priorities'].message_type = _PRIORITYCAPABILITIES_PRIORITYRANGE
+_CACHECAPABILITIES.fields_by_name['digest_function'].enum_type = _DIGESTFUNCTION
+_CACHECAPABILITIES.fields_by_name['action_cache_update_capabilities'].message_type = _ACTIONCACHEUPDATECAPABILITIES
+_CACHECAPABILITIES.fields_by_name['cache_priority_capabilities'].message_type = _PRIORITYCAPABILITIES
+_EXECUTIONCAPABILITIES.fields_by_name['digest_function'].enum_type = _DIGESTFUNCTION
+_EXECUTIONCAPABILITIES.fields_by_name['execution_priority_capabilities'].message_type = _PRIORITYCAPABILITIES
+_REQUESTMETADATA.fields_by_name['tool_details'].message_type = _TOOLDETAILS
+DESCRIPTOR.message_types_by_name['Action'] = _ACTION
+DESCRIPTOR.message_types_by_name['Command'] = _COMMAND
+DESCRIPTOR.message_types_by_name['Platform'] = _PLATFORM
+DESCRIPTOR.message_types_by_name['Directory'] = _DIRECTORY
+DESCRIPTOR.message_types_by_name['FileNode'] = _FILENODE
+DESCRIPTOR.message_types_by_name['DirectoryNode'] = _DIRECTORYNODE
+DESCRIPTOR.message_types_by_name['SymlinkNode'] = _SYMLINKNODE
+DESCRIPTOR.message_types_by_name['Digest'] = _DIGEST
+DESCRIPTOR.message_types_by_name['ExecutedActionMetadata'] = _EXECUTEDACTIONMETADATA
+DESCRIPTOR.message_types_by_name['ActionResult'] = _ACTIONRESULT
+DESCRIPTOR.message_types_by_name['OutputFile'] = _OUTPUTFILE
+DESCRIPTOR.message_types_by_name['Tree'] = _TREE
+DESCRIPTOR.message_types_by_name['OutputDirectory'] = _OUTPUTDIRECTORY
+DESCRIPTOR.message_types_by_name['ExecutionPolicy'] = _EXECUTIONPOLICY
+DESCRIPTOR.message_types_by_name['ResultsCachePolicy'] = _RESULTSCACHEPOLICY
+DESCRIPTOR.message_types_by_name['ExecuteRequest'] = _EXECUTEREQUEST
+DESCRIPTOR.message_types_by_name['LogFile'] = _LOGFILE
+DESCRIPTOR.message_types_by_name['ExecuteResponse'] = _EXECUTERESPONSE
+DESCRIPTOR.message_types_by_name['ExecuteOperationMetadata'] = _EXECUTEOPERATIONMETADATA
+DESCRIPTOR.message_types_by_name['WaitExecutionRequest'] = _WAITEXECUTIONREQUEST
+DESCRIPTOR.message_types_by_name['GetActionResultRequest'] = _GETACTIONRESULTREQUEST
+DESCRIPTOR.message_types_by_name['UpdateActionResultRequest'] = _UPDATEACTIONRESULTREQUEST
+DESCRIPTOR.message_types_by_name['FindMissingBlobsRequest'] = _FINDMISSINGBLOBSREQUEST
+DESCRIPTOR.message_types_by_name['FindMissingBlobsResponse'] = _FINDMISSINGBLOBSRESPONSE
+DESCRIPTOR.message_types_by_name['UpdateBlobRequest'] = _UPDATEBLOBREQUEST
+DESCRIPTOR.message_types_by_name['BatchUpdateBlobsRequest'] = _BATCHUPDATEBLOBSREQUEST
+DESCRIPTOR.message_types_by_name['BatchUpdateBlobsResponse'] = _BATCHUPDATEBLOBSRESPONSE
+DESCRIPTOR.message_types_by_name['GetTreeRequest'] = _GETTREEREQUEST
+DESCRIPTOR.message_types_by_name['GetTreeResponse'] = _GETTREERESPONSE
+DESCRIPTOR.message_types_by_name['GetCapabilitiesRequest'] = _GETCAPABILITIESREQUEST
+DESCRIPTOR.message_types_by_name['ServerCapabilities'] = _SERVERCAPABILITIES
+DESCRIPTOR.message_types_by_name['ActionCacheUpdateCapabilities'] = _ACTIONCACHEUPDATECAPABILITIES
+DESCRIPTOR.message_types_by_name['PriorityCapabilities'] = _PRIORITYCAPABILITIES
+DESCRIPTOR.message_types_by_name['CacheCapabilities'] = _CACHECAPABILITIES
+DESCRIPTOR.message_types_by_name['ExecutionCapabilities'] = _EXECUTIONCAPABILITIES
+DESCRIPTOR.message_types_by_name['ToolDetails'] = _TOOLDETAILS
+DESCRIPTOR.message_types_by_name['RequestMetadata'] = _REQUESTMETADATA
+DESCRIPTOR.enum_types_by_name['DigestFunction'] = _DIGESTFUNCTION
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Action = _reflection.GeneratedProtocolMessageType('Action', (_message.Message,), dict(
+ DESCRIPTOR = _ACTION,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Action)
+ ))
+_sym_db.RegisterMessage(Action)
+
+Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), dict(
+
+ EnvironmentVariable = _reflection.GeneratedProtocolMessageType('EnvironmentVariable', (_message.Message,), dict(
+ DESCRIPTOR = _COMMAND_ENVIRONMENTVARIABLE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Command.EnvironmentVariable)
+ ))
+ ,
+ DESCRIPTOR = _COMMAND,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Command)
+ ))
+_sym_db.RegisterMessage(Command)
+_sym_db.RegisterMessage(Command.EnvironmentVariable)
+
+Platform = _reflection.GeneratedProtocolMessageType('Platform', (_message.Message,), dict(
+
+ Property = _reflection.GeneratedProtocolMessageType('Property', (_message.Message,), dict(
+ DESCRIPTOR = _PLATFORM_PROPERTY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Platform.Property)
+ ))
+ ,
+ DESCRIPTOR = _PLATFORM,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Platform)
+ ))
+_sym_db.RegisterMessage(Platform)
+_sym_db.RegisterMessage(Platform.Property)
+
+Directory = _reflection.GeneratedProtocolMessageType('Directory', (_message.Message,), dict(
+ DESCRIPTOR = _DIRECTORY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Directory)
+ ))
+_sym_db.RegisterMessage(Directory)
+
+FileNode = _reflection.GeneratedProtocolMessageType('FileNode', (_message.Message,), dict(
+ DESCRIPTOR = _FILENODE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FileNode)
+ ))
+_sym_db.RegisterMessage(FileNode)
+
+DirectoryNode = _reflection.GeneratedProtocolMessageType('DirectoryNode', (_message.Message,), dict(
+ DESCRIPTOR = _DIRECTORYNODE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.DirectoryNode)
+ ))
+_sym_db.RegisterMessage(DirectoryNode)
+
+SymlinkNode = _reflection.GeneratedProtocolMessageType('SymlinkNode', (_message.Message,), dict(
+ DESCRIPTOR = _SYMLINKNODE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.SymlinkNode)
+ ))
+_sym_db.RegisterMessage(SymlinkNode)
+
+Digest = _reflection.GeneratedProtocolMessageType('Digest', (_message.Message,), dict(
+ DESCRIPTOR = _DIGEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Digest)
+ ))
+_sym_db.RegisterMessage(Digest)
+
+ExecutedActionMetadata = _reflection.GeneratedProtocolMessageType('ExecutedActionMetadata', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTEDACTIONMETADATA,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutedActionMetadata)
+ ))
+_sym_db.RegisterMessage(ExecutedActionMetadata)
+
+ActionResult = _reflection.GeneratedProtocolMessageType('ActionResult', (_message.Message,), dict(
+ DESCRIPTOR = _ACTIONRESULT,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ActionResult)
+ ))
+_sym_db.RegisterMessage(ActionResult)
+
+OutputFile = _reflection.GeneratedProtocolMessageType('OutputFile', (_message.Message,), dict(
+ DESCRIPTOR = _OUTPUTFILE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.OutputFile)
+ ))
+_sym_db.RegisterMessage(OutputFile)
+
+Tree = _reflection.GeneratedProtocolMessageType('Tree', (_message.Message,), dict(
+ DESCRIPTOR = _TREE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Tree)
+ ))
+_sym_db.RegisterMessage(Tree)
+
+OutputDirectory = _reflection.GeneratedProtocolMessageType('OutputDirectory', (_message.Message,), dict(
+ DESCRIPTOR = _OUTPUTDIRECTORY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.OutputDirectory)
+ ))
+_sym_db.RegisterMessage(OutputDirectory)
+
+ExecutionPolicy = _reflection.GeneratedProtocolMessageType('ExecutionPolicy', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTIONPOLICY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutionPolicy)
+ ))
+_sym_db.RegisterMessage(ExecutionPolicy)
+
+ResultsCachePolicy = _reflection.GeneratedProtocolMessageType('ResultsCachePolicy', (_message.Message,), dict(
+ DESCRIPTOR = _RESULTSCACHEPOLICY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ResultsCachePolicy)
+ ))
+_sym_db.RegisterMessage(ResultsCachePolicy)
+
+ExecuteRequest = _reflection.GeneratedProtocolMessageType('ExecuteRequest', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTEREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteRequest)
+ ))
+_sym_db.RegisterMessage(ExecuteRequest)
+
+LogFile = _reflection.GeneratedProtocolMessageType('LogFile', (_message.Message,), dict(
+ DESCRIPTOR = _LOGFILE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.LogFile)
+ ))
+_sym_db.RegisterMessage(LogFile)
+
+ExecuteResponse = _reflection.GeneratedProtocolMessageType('ExecuteResponse', (_message.Message,), dict(
+
+ ServerLogsEntry = _reflection.GeneratedProtocolMessageType('ServerLogsEntry', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTERESPONSE_SERVERLOGSENTRY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry)
+ ))
+ ,
+ DESCRIPTOR = _EXECUTERESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteResponse)
+ ))
+_sym_db.RegisterMessage(ExecuteResponse)
+_sym_db.RegisterMessage(ExecuteResponse.ServerLogsEntry)
+
+ExecuteOperationMetadata = _reflection.GeneratedProtocolMessageType('ExecuteOperationMetadata', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTEOPERATIONMETADATA,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteOperationMetadata)
+ ))
+_sym_db.RegisterMessage(ExecuteOperationMetadata)
+
+WaitExecutionRequest = _reflection.GeneratedProtocolMessageType('WaitExecutionRequest', (_message.Message,), dict(
+ DESCRIPTOR = _WAITEXECUTIONREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.WaitExecutionRequest)
+ ))
+_sym_db.RegisterMessage(WaitExecutionRequest)
+
+GetActionResultRequest = _reflection.GeneratedProtocolMessageType('GetActionResultRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETACTIONRESULTREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetActionResultRequest)
+ ))
+_sym_db.RegisterMessage(GetActionResultRequest)
+
+UpdateActionResultRequest = _reflection.GeneratedProtocolMessageType('UpdateActionResultRequest', (_message.Message,), dict(
+ DESCRIPTOR = _UPDATEACTIONRESULTREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.UpdateActionResultRequest)
+ ))
+_sym_db.RegisterMessage(UpdateActionResultRequest)
+
+FindMissingBlobsRequest = _reflection.GeneratedProtocolMessageType('FindMissingBlobsRequest', (_message.Message,), dict(
+ DESCRIPTOR = _FINDMISSINGBLOBSREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FindMissingBlobsRequest)
+ ))
+_sym_db.RegisterMessage(FindMissingBlobsRequest)
+
+FindMissingBlobsResponse = _reflection.GeneratedProtocolMessageType('FindMissingBlobsResponse', (_message.Message,), dict(
+ DESCRIPTOR = _FINDMISSINGBLOBSRESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FindMissingBlobsResponse)
+ ))
+_sym_db.RegisterMessage(FindMissingBlobsResponse)
+
+UpdateBlobRequest = _reflection.GeneratedProtocolMessageType('UpdateBlobRequest', (_message.Message,), dict(
+ DESCRIPTOR = _UPDATEBLOBREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.UpdateBlobRequest)
+ ))
+_sym_db.RegisterMessage(UpdateBlobRequest)
+
+BatchUpdateBlobsRequest = _reflection.GeneratedProtocolMessageType('BatchUpdateBlobsRequest', (_message.Message,), dict(
+ DESCRIPTOR = _BATCHUPDATEBLOBSREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsRequest)
+ ))
+_sym_db.RegisterMessage(BatchUpdateBlobsRequest)
+
+BatchUpdateBlobsResponse = _reflection.GeneratedProtocolMessageType('BatchUpdateBlobsResponse', (_message.Message,), dict(
+
+ Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
+ DESCRIPTOR = _BATCHUPDATEBLOBSRESPONSE_RESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response)
+ ))
+ ,
+ DESCRIPTOR = _BATCHUPDATEBLOBSRESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsResponse)
+ ))
+_sym_db.RegisterMessage(BatchUpdateBlobsResponse)
+_sym_db.RegisterMessage(BatchUpdateBlobsResponse.Response)
+
+GetTreeRequest = _reflection.GeneratedProtocolMessageType('GetTreeRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETTREEREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetTreeRequest)
+ ))
+_sym_db.RegisterMessage(GetTreeRequest)
+
+GetTreeResponse = _reflection.GeneratedProtocolMessageType('GetTreeResponse', (_message.Message,), dict(
+ DESCRIPTOR = _GETTREERESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetTreeResponse)
+ ))
+_sym_db.RegisterMessage(GetTreeResponse)
+
+GetCapabilitiesRequest = _reflection.GeneratedProtocolMessageType('GetCapabilitiesRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETCAPABILITIESREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetCapabilitiesRequest)
+ ))
+_sym_db.RegisterMessage(GetCapabilitiesRequest)
+
+ServerCapabilities = _reflection.GeneratedProtocolMessageType('ServerCapabilities', (_message.Message,), dict(
+ DESCRIPTOR = _SERVERCAPABILITIES,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ServerCapabilities)
+ ))
+_sym_db.RegisterMessage(ServerCapabilities)
+
+ActionCacheUpdateCapabilities = _reflection.GeneratedProtocolMessageType('ActionCacheUpdateCapabilities', (_message.Message,), dict(
+ DESCRIPTOR = _ACTIONCACHEUPDATECAPABILITIES,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities)
+ ))
+_sym_db.RegisterMessage(ActionCacheUpdateCapabilities)
+
+PriorityCapabilities = _reflection.GeneratedProtocolMessageType('PriorityCapabilities', (_message.Message,), dict(
+
+ PriorityRange = _reflection.GeneratedProtocolMessageType('PriorityRange', (_message.Message,), dict(
+ DESCRIPTOR = _PRIORITYCAPABILITIES_PRIORITYRANGE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange)
+ ))
+ ,
+ DESCRIPTOR = _PRIORITYCAPABILITIES,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.PriorityCapabilities)
+ ))
+_sym_db.RegisterMessage(PriorityCapabilities)
+_sym_db.RegisterMessage(PriorityCapabilities.PriorityRange)
+
+CacheCapabilities = _reflection.GeneratedProtocolMessageType('CacheCapabilities', (_message.Message,), dict(
+ DESCRIPTOR = _CACHECAPABILITIES,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.CacheCapabilities)
+ ))
+_sym_db.RegisterMessage(CacheCapabilities)
+
+ExecutionCapabilities = _reflection.GeneratedProtocolMessageType('ExecutionCapabilities', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTIONCAPABILITIES,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutionCapabilities)
+ ))
+_sym_db.RegisterMessage(ExecutionCapabilities)
+
+ToolDetails = _reflection.GeneratedProtocolMessageType('ToolDetails', (_message.Message,), dict(
+ DESCRIPTOR = _TOOLDETAILS,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ToolDetails)
+ ))
+_sym_db.RegisterMessage(ToolDetails)
+
+RequestMetadata = _reflection.GeneratedProtocolMessageType('RequestMetadata', (_message.Message,), dict(
+ DESCRIPTOR = _REQUESTMETADATA,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.RequestMetadata)
+ ))
+_sym_db.RegisterMessage(RequestMetadata)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\037build.bazel.remote.execution.v2B\024RemoteExecutionProtoP\001Z\017remoteexecution\242\002\003REX\252\002\037Build.Bazel.Remote.Execution.V2'))
+_EXECUTERESPONSE_SERVERLOGSENTRY.has_options = True
+_EXECUTERESPONSE_SERVERLOGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
+
+_EXECUTION = _descriptor.ServiceDescriptor(
+ name='Execution',
+ full_name='build.bazel.remote.execution.v2.Execution',
+ file=DESCRIPTOR,
+ index=0,
+ options=None,
+ serialized_start=6718,
+ serialized_end=7031,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='Execute',
+ full_name='build.bazel.remote.execution.v2.Execution.Execute',
+ index=0,
+ containing_service=None,
+ input_type=_EXECUTEREQUEST,
+ output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002+\"&/v2/{instance_name=**}/actions:execute:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='WaitExecution',
+ full_name='build.bazel.remote.execution.v2.Execution.WaitExecution',
+ index=1,
+ containing_service=None,
+ input_type=_WAITEXECUTIONREQUEST,
+ output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002+\"&/v2/{name=operations/**}:waitExecution:\001*')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_EXECUTION)
+
+DESCRIPTOR.services_by_name['Execution'] = _EXECUTION
+
+
+_ACTIONCACHE = _descriptor.ServiceDescriptor(
+ name='ActionCache',
+ full_name='build.bazel.remote.execution.v2.ActionCache',
+ file=DESCRIPTOR,
+ index=1,
+ options=None,
+ serialized_start=7034,
+ serialized_end=7504,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='GetActionResult',
+ full_name='build.bazel.remote.execution.v2.ActionCache.GetActionResult',
+ index=0,
+ containing_service=None,
+ input_type=_GETACTIONRESULTREQUEST,
+ output_type=_ACTIONRESULT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002V\022T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='UpdateActionResult',
+ full_name='build.bazel.remote.execution.v2.ActionCache.UpdateActionResult',
+ index=1,
+ containing_service=None,
+ input_type=_UPDATEACTIONRESULTREQUEST,
+ output_type=_ACTIONRESULT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002e\032T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_ACTIONCACHE)
+
+DESCRIPTOR.services_by_name['ActionCache'] = _ACTIONCACHE
+
+
+_CONTENTADDRESSABLESTORAGE = _descriptor.ServiceDescriptor(
+ name='ContentAddressableStorage',
+ full_name='build.bazel.remote.execution.v2.ContentAddressableStorage',
+ file=DESCRIPTOR,
+ index=2,
+ options=None,
+ serialized_start=7507,
+ serialized_end=8119,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='FindMissingBlobs',
+ full_name='build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs',
+ index=0,
+ containing_service=None,
+ input_type=_FINDMISSINGBLOBSREQUEST,
+ output_type=_FINDMISSINGBLOBSRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002-\"(/v2/{instance_name=**}/blobs:findMissing:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='BatchUpdateBlobs',
+ full_name='build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs',
+ index=1,
+ containing_service=None,
+ input_type=_BATCHUPDATEBLOBSREQUEST,
+ output_type=_BATCHUPDATEBLOBSRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002-\"(/v2/{instance_name=**}/blobs:batchUpdate:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='GetTree',
+ full_name='build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree',
+ index=2,
+ containing_service=None,
+ input_type=_GETTREEREQUEST,
+ output_type=_GETTREERESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002R\022P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_CONTENTADDRESSABLESTORAGE)
+
+DESCRIPTOR.services_by_name['ContentAddressableStorage'] = _CONTENTADDRESSABLESTORAGE
+
+
+_CAPABILITIES = _descriptor.ServiceDescriptor(
+ name='Capabilities',
+ full_name='build.bazel.remote.execution.v2.Capabilities',
+ file=DESCRIPTOR,
+ index=3,
+ options=None,
+ serialized_start=8122,
+ serialized_end=8311,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='GetCapabilities',
+ full_name='build.bazel.remote.execution.v2.Capabilities.GetCapabilities',
+ index=0,
+ containing_service=None,
+ input_type=_GETCAPABILITIESREQUEST,
+ output_type=_SERVERCAPABILITIES,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002%\022#/v2/{instance_name=**}/capabilities')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_CAPABILITIES)
+
+DESCRIPTOR.services_by_name['Capabilities'] = _CAPABILITIES
+
+# @@protoc_insertion_point(module_scope)
diff --git a/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py b/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
new file mode 100644
index 000000000..7e78cf442
--- /dev/null
+++ b/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
@@ -0,0 +1,556 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2
+from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
+
+
+class ExecutionStub(object):
+ """The Remote Execution API is used to execute an
+ [Action][build.bazel.remote.execution.v2.Action] on the remote
+ workers.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.Execute = channel.unary_stream(
+ '/build.bazel.remote.execution.v2.Execution/Execute',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
+ self.WaitExecution = channel.unary_stream(
+ '/build.bazel.remote.execution.v2.Execution/WaitExecution',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
+
+
+class ExecutionServicer(object):
+ """The Remote Execution API is used to execute an
+ [Action][build.bazel.remote.execution.v2.Action] on the remote
+ workers.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def Execute(self, request, context):
+ """Execute an action remotely.
+
+ In order to execute an action, the client must first upload all of the
+ inputs, the
+ [Command][build.bazel.remote.execution.v2.Command] to run, and the
+ [Action][build.bazel.remote.execution.v2.Action] into the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ It then calls `Execute` with an `action_digest` referring to them. The
+ server will run the action and eventually return the result.
+
+ The input `Action`'s fields MUST meet the various canonicalization
+ requirements specified in the documentation for their types so that it has
+ the same digest as other logically equivalent `Action`s. The server MAY
+ enforce the requirements and return errors if a non-canonical input is
+ received. It MAY also proceed without verifying some or all of the
+ requirements, such as for performance reasons. If the server does not
+ verify the requirement, then it will treat the `Action` as distinct from
+ another logically equivalent action if they hash differently.
+
+ Returns a stream of
+ [google.longrunning.Operation][google.longrunning.Operation] messages
+ describing the resulting execution, with eventual `response`
+ [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The
+ `metadata` on the operation is of type
+ [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata].
+
+ If the client remains connected after the first response is returned after
+ the server, then updates are streamed as if the client had called
+ [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]
+ until the execution completes or the request reaches an error. The
+ operation can also be queried using [Operations
+ API][google.longrunning.Operations.GetOperation].
+
+ The server NEED NOT implement other methods or functionality of the
+ Operations API.
+
+ Errors discovered during creation of the `Operation` will be reported
+ as gRPC Status errors, while errors that occurred while running the
+ action will be reported in the `status` field of the `ExecuteResponse`. The
+ server MUST NOT set the `error` field of the `Operation` proto.
+ The possible errors include:
+ * `INVALID_ARGUMENT`: One or more arguments are invalid.
+ * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
+ action requested, such as a missing input or command or no worker being
+ available. The client may be able to fix the errors and retry.
+ * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
+ the action.
+ * `UNAVAILABLE`: Due to a transient condition, such as all workers being
+ occupied (and the server does not support a queue), the action could not
+ be started. The client should retry.
+ * `INTERNAL`: An internal error occurred in the execution engine or the
+ worker.
+ * `DEADLINE_EXCEEDED`: The execution timed out.
+
+ In the case of a missing input or command, the server SHOULD additionally
+ send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
+ where, for each requested blob not present in the CAS, there is a
+ `Violation` with a `type` of `MISSING` and a `subject` of
+ `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def WaitExecution(self, request, context):
+ """Wait for an execution operation to complete. When the client initially
+ makes the request, the server immediately responds with the current status
+ of the execution. The server will leave the request stream open until the
+ operation completes, and then respond with the completed operation. The
+ server MAY choose to stream additional updates as execution progresses,
+ such as to provide an update as to the state of the execution.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ExecutionServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'Execute': grpc.unary_stream_rpc_method_handler(
+ servicer.Execute,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ 'WaitExecution': grpc.unary_stream_rpc_method_handler(
+ servicer.WaitExecution,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.Execution', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+class ActionCacheStub(object):
+ """The action cache API is used to query whether a given action has already been
+ performed and, if so, retrieve its result. Unlike the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
+ which addresses blobs by their own content, the action cache addresses the
+ [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
+ digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
+ which produced them.
+
+ The lifetime of entries in the action cache is implementation-specific, but
+ the server SHOULD assume that more recently used entries are more likely to
+ be used again. Additionally, action cache implementations SHOULD ensure that
+ any blobs referenced in the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+ are still valid when returning a result.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetActionResult = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ActionCache/GetActionResult',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
+ )
+ self.UpdateActionResult = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ActionCache/UpdateActionResult',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
+ )
+
+
+class ActionCacheServicer(object):
+ """The action cache API is used to query whether a given action has already been
+ performed and, if so, retrieve its result. Unlike the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
+ which addresses blobs by their own content, the action cache addresses the
+ [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
+ digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
+ which produced them.
+
+ The lifetime of entries in the action cache is implementation-specific, but
+ the server SHOULD assume that more recently used entries are more likely to
+ be used again. Additionally, action cache implementations SHOULD ensure that
+ any blobs referenced in the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+ are still valid when returning a result.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def GetActionResult(self, request, context):
+ """Retrieve a cached execution result.
+
+ Errors:
+ * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def UpdateActionResult(self, request, context):
+ """Upload a new execution result.
+
+ This method is intended for servers which implement the distributed cache
+ independently of the
+ [Execution][build.bazel.remote.execution.v2.Execution] API. As a
+ result, it is OPTIONAL for servers to implement.
+
+ In order to allow the server to perform access control based on the type of
+ action, and to assist with client debugging, the client MUST first upload
+ the [Action][build.bazel.remote.execution.v2.Execution] that produced the
+ result, along with its
+ [Command][build.bazel.remote.execution.v2.Command], into the
+ `ContentAddressableStorage`.
+
+ Errors:
+ * `NOT_IMPLEMENTED`: This method is not supported by the server.
+ * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+ entry to the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ActionCacheServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'GetActionResult': grpc.unary_unary_rpc_method_handler(
+ servicer.GetActionResult,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString,
+ ),
+ 'UpdateActionResult': grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateActionResult,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.ActionCache', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+class ContentAddressableStorageStub(object):
+ """The CAS (content-addressable storage) is used to store the inputs to and
+ outputs from the execution service. Each piece of content is addressed by the
+ digest of its binary data.
+
+ Most of the binary data stored in the CAS is opaque to the execution engine,
+ and is only used as a communication medium. In order to build an
+ [Action][build.bazel.remote.execution.v2.Action],
+ however, the client will need to also upload the
+ [Command][build.bazel.remote.execution.v2.Command] and input root
+ [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
+ The Command and Directory messages must be marshalled to wire format and then
+ uploaded under the hash as with any other piece of content. In practice, the
+ input root directory is likely to refer to other Directories in its
+ hierarchy, which must also each be uploaded on their own.
+
+ For small file uploads the client should group them together and call
+ [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
+ on chunks of no more than 10 MiB. For large uploads, the client must use the
+ [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+ `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+ where `instance_name` is as described in the next paragraph, `uuid` is a
+ version 4 UUID generated by the client, and `hash` and `size` are the
+ [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
+ `uuid` is used only to avoid collisions when multiple clients try to upload
+ the same file (or the same client tries to upload the file multiple times at
+ once on different threads), so the client MAY reuse the `uuid` for uploading
+ different blobs. The `resource_name` may optionally have a trailing filename
+ (or other metadata) for a client to use if it is storing URLs, as in
+ `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+ after the `size` is ignored.
+
+ A single server MAY support multiple instances of the execution system, each
+ with their own workers, storage, cache, etc. The exact relationship between
+ instances is up to the server. If the server does, then the `instance_name`
+ is an identifier, possibly containing multiple path segments, used to
+ distinguish between the various instances on the server, in a manner defined
+ by the server. For servers which do not support multiple instances, then the
+ `instance_name` is the empty path and the leading slash is omitted, so that
+ the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+
+ When attempting an upload, if another client has already completed the upload
+ (which may occur in the middle of a single upload if another client uploads
+ the same blob concurrently), the request will terminate immediately with
+ a response whose `committed_size` is the full size of the uploaded file
+ (regardless of how much data was transmitted by the client). If the client
+ completes the upload but the
+ [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
+ `INVALID_ARGUMENT` error will be returned. In either case, the client should
+ not attempt to retry the upload.
+
+ For downloading blobs, the client must use the
+ [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+ a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+ `instance_name` is the instance name (see above), and `hash` and `size` are
+ the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+
+ The lifetime of entries in the CAS is implementation specific, but it SHOULD
+ be long enough to allow for newly-added and recently looked-up entries to be
+ used in subsequent calls (e.g. to
+ [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.FindMissingBlobs = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/FindMissingBlobs',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.FromString,
+ )
+ self.BatchUpdateBlobs = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchUpdateBlobs',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.FromString,
+ )
+ self.GetTree = channel.unary_stream(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/GetTree',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.FromString,
+ )
+
+
+class ContentAddressableStorageServicer(object):
+ """The CAS (content-addressable storage) is used to store the inputs to and
+ outputs from the execution service. Each piece of content is addressed by the
+ digest of its binary data.
+
+ Most of the binary data stored in the CAS is opaque to the execution engine,
+ and is only used as a communication medium. In order to build an
+ [Action][build.bazel.remote.execution.v2.Action],
+ however, the client will need to also upload the
+ [Command][build.bazel.remote.execution.v2.Command] and input root
+ [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
+ The Command and Directory messages must be marshalled to wire format and then
+ uploaded under the hash as with any other piece of content. In practice, the
+ input root directory is likely to refer to other Directories in its
+ hierarchy, which must also each be uploaded on their own.
+
+ For small file uploads the client should group them together and call
+ [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
+ on chunks of no more than 10 MiB. For large uploads, the client must use the
+ [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+ `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+ where `instance_name` is as described in the next paragraph, `uuid` is a
+ version 4 UUID generated by the client, and `hash` and `size` are the
+ [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
+ `uuid` is used only to avoid collisions when multiple clients try to upload
+ the same file (or the same client tries to upload the file multiple times at
+ once on different threads), so the client MAY reuse the `uuid` for uploading
+ different blobs. The `resource_name` may optionally have a trailing filename
+ (or other metadata) for a client to use if it is storing URLs, as in
+ `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+ after the `size` is ignored.
+
+ A single server MAY support multiple instances of the execution system, each
+ with their own workers, storage, cache, etc. The exact relationship between
+ instances is up to the server. If the server does, then the `instance_name`
+ is an identifier, possibly containing multiple path segments, used to
+ distinguish between the various instances on the server, in a manner defined
+ by the server. For servers which do not support multiple instances, then the
+ `instance_name` is the empty path and the leading slash is omitted, so that
+ the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+
+ When attempting an upload, if another client has already completed the upload
+ (which may occur in the middle of a single upload if another client uploads
+ the same blob concurrently), the request will terminate immediately with
+ a response whose `committed_size` is the full size of the uploaded file
+ (regardless of how much data was transmitted by the client). If the client
+ completes the upload but the
+ [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
+ `INVALID_ARGUMENT` error will be returned. In either case, the client should
+ not attempt to retry the upload.
+
+ For downloading blobs, the client must use the
+ [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+ a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+ `instance_name` is the instance name (see above), and `hash` and `size` are
+ the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+
+ The lifetime of entries in the CAS is implementation specific, but it SHOULD
+ be long enough to allow for newly-added and recently looked-up entries to be
+ used in subsequent calls (e.g. to
+ [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def FindMissingBlobs(self, request, context):
+ """Determine if blobs are present in the CAS.
+
+ Clients can use this API before uploading blobs to determine which ones are
+ already present in the CAS and do not need to be uploaded again.
+
+ There are no method-specific errors.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def BatchUpdateBlobs(self, request, context):
+ """Upload many blobs at once.
+
+ The client MUST NOT upload blobs with a combined total size of more than 10
+ MiB using this API. Such requests should either be split into smaller
+ chunks or uploaded using the
+ [ByteStream API][google.bytestream.ByteStream], as appropriate.
+
+ This request is equivalent to calling a hypothetical `UpdateBlob` request
+ on each individual blob, in parallel. The requests may succeed or fail
+ independently.
+
+ Errors:
+ * `INVALID_ARGUMENT`: The client attempted to upload more than 10 MiB of
+ data.
+
+ Individual requests may return the following errors, additionally:
+ * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
+ * `INVALID_ARGUMENT`: The
+ [Digest][build.bazel.remote.execution.v2.Digest] does not match the
+ provided data.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetTree(self, request, context):
+ """Fetch the entire directory tree rooted at a node.
+
+ This request must be targeted at a
+ [Directory][build.bazel.remote.execution.v2.Directory] stored in the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+ (CAS). The server will enumerate the `Directory` tree recursively and
+ return every node descended from the root.
+
+ The GetTreeRequest.page_token parameter can be used to skip ahead in
+ the stream (e.g. when retrying a partially completed and aborted request),
+ by setting it to a value taken from GetTreeResponse.next_page_token of the
+ last successfully processed GetTreeResponse).
+
+ The exact traversal order is unspecified and, unless retrieving subsequent
+ pages from an earlier request, is not guaranteed to be stable across
+ multiple invocations of `GetTree`.
+
+ If part of the tree is missing from the CAS, the server will return the
+ portion present and omit the rest.
+
+ * `NOT_FOUND`: The requested tree root is not present in the CAS.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ContentAddressableStorageServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'FindMissingBlobs': grpc.unary_unary_rpc_method_handler(
+ servicer.FindMissingBlobs,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.SerializeToString,
+ ),
+ 'BatchUpdateBlobs': grpc.unary_unary_rpc_method_handler(
+ servicer.BatchUpdateBlobs,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.SerializeToString,
+ ),
+ 'GetTree': grpc.unary_stream_rpc_method_handler(
+ servicer.GetTree,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.ContentAddressableStorage', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+class CapabilitiesStub(object):
+ """The Capabilities service may be used by remote execution clients to query
+ various server properties, in order to self-configure or return meaningful
+ error messages.
+
+ The query may include a particular `instance_name`, in which case the values
+ returned will pertain to that instance.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetCapabilities = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.Capabilities/GetCapabilities',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.FromString,
+ )
+
+
+class CapabilitiesServicer(object):
+ """The Capabilities service may be used by remote execution clients to query
+ various server properties, in order to self-configure or return meaningful
+ error messages.
+
+ The query may include a particular `instance_name`, in which case the values
+ returned will pertain to that instance.
+ """
+
+ def GetCapabilities(self, request, context):
+ """GetCapabilities returns the server capabilities configuration.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_CapabilitiesServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'GetCapabilities': grpc.unary_unary_rpc_method_handler(
+ servicer.GetCapabilities,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.Capabilities', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/buildstream/_protos/build/bazel/semver/__init__.py b/buildstream/_protos/build/bazel/semver/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/build/bazel/semver/__init__.py
diff --git a/buildstream/_protos/build/bazel/semver/semver.proto b/buildstream/_protos/build/bazel/semver/semver.proto
new file mode 100644
index 000000000..2caf76bcc
--- /dev/null
+++ b/buildstream/_protos/build/bazel/semver/semver.proto
@@ -0,0 +1,24 @@
+// Copyright 2018 The Bazel Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package build.bazel.semver;
+
+message SemVer {
+ int32 major = 1;
+ int32 minor = 2;
+ int32 patch = 3;
+ string prerelease = 4;
+}
diff --git a/buildstream/_protos/build/bazel/semver/semver_pb2.py b/buildstream/_protos/build/bazel/semver/semver_pb2.py
new file mode 100644
index 000000000..a36cf722a
--- /dev/null
+++ b/buildstream/_protos/build/bazel/semver/semver_pb2.py
@@ -0,0 +1,90 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: build/bazel/semver/semver.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='build/bazel/semver/semver.proto',
+ package='build.bazel.semver',
+ syntax='proto3',
+ serialized_pb=_b('\n\x1f\x62uild/bazel/semver/semver.proto\x12\x12\x62uild.bazel.semver\"I\n\x06SemVer\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\x12\n\nprerelease\x18\x04 \x01(\tb\x06proto3')
+)
+
+
+
+
+_SEMVER = _descriptor.Descriptor(
+ name='SemVer',
+ full_name='build.bazel.semver.SemVer',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='major', full_name='build.bazel.semver.SemVer.major', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='minor', full_name='build.bazel.semver.SemVer.minor', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='patch', full_name='build.bazel.semver.SemVer.patch', index=2,
+ number=3, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='prerelease', full_name='build.bazel.semver.SemVer.prerelease', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=55,
+ serialized_end=128,
+)
+
+DESCRIPTOR.message_types_by_name['SemVer'] = _SEMVER
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+SemVer = _reflection.GeneratedProtocolMessageType('SemVer', (_message.Message,), dict(
+ DESCRIPTOR = _SEMVER,
+ __module__ = 'build.bazel.semver.semver_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.semver.SemVer)
+ ))
+_sym_db.RegisterMessage(SemVer)
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/buildstream/_protos/build/bazel/semver/semver_pb2_grpc.py b/buildstream/_protos/build/bazel/semver/semver_pb2_grpc.py
new file mode 100644
index 000000000..a89435267
--- /dev/null
+++ b/buildstream/_protos/build/bazel/semver/semver_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/buildstream/_protos/buildstream/__init__.py b/buildstream/_protos/buildstream/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/buildstream/__init__.py
diff --git a/buildstream/_protos/buildstream/buildstream.proto b/buildstream/_protos/buildstream/buildstream.proto
new file mode 100644
index 000000000..edd95c6fc
--- /dev/null
+++ b/buildstream/_protos/buildstream/buildstream.proto
@@ -0,0 +1,78 @@
+syntax = "proto3";
+
+package buildstream;
+
+import "build/bazel/remote/execution/v2/remote_execution.proto";
+import "google/api/annotations.proto";
+
+service ArtifactCache {
+ // Retrieve a cached artifact.
+ //
+ // Errors:
+ // * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+ rpc GetArtifact(GetArtifactRequest) returns (GetArtifactResponse) {
+ option (google.api.http) = { get: "/v1test/{instance_name=**}/buildstream/artifacts/{key}" };
+ }
+
+ // Associate a cache key with a CAS build artifact.
+ //
+ // Errors:
+ // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+ // entry to the cache.
+ rpc UpdateArtifact(UpdateArtifactRequest) returns (UpdateArtifactResponse) {
+ option (google.api.http) = { put: "/v1test/{instance_name=**}/buildstream/artifacts/{key}" body: "artifact" };
+ }
+
+ rpc Status(StatusRequest) returns (StatusResponse) {
+ option (google.api.http) = { put: "/v1test/{instance_name=**}/buildstream/artifacts:status" };
+ }
+}
+
+message GetArtifactRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The BuildStream cache key.
+ string key = 2;
+}
+
+message GetArtifactResponse {
+ // The digest of the artifact [Directory][build.bazel.remote.execution.v2.Directory].
+ build.bazel.remote.execution.v2.Digest artifact = 1;
+}
+
+message UpdateArtifactRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The BuildStream cache key.
+ repeated string keys = 2;
+
+ // The digest of the artifact [Directory][build.bazel.remote.execution.v2.Directory]
+ // to store in the cache.
+ build.bazel.remote.execution.v2.Digest artifact = 3;
+}
+
+message UpdateArtifactResponse {
+}
+
+message StatusRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+}
+
+message StatusResponse {
+ bool allow_updates = 1;
+}
diff --git a/buildstream/_protos/buildstream/buildstream_pb2.py b/buildstream/_protos/buildstream/buildstream_pb2.py
new file mode 100644
index 000000000..91a844418
--- /dev/null
+++ b/buildstream/_protos/buildstream/buildstream_pb2.py
@@ -0,0 +1,325 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: buildstream/buildstream.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2
+from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='buildstream/buildstream.proto',
+ package='buildstream',
+ syntax='proto3',
+ serialized_pb=_b('\n\x1d\x62uildstream/buildstream.proto\x12\x0b\x62uildstream\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"8\n\x12GetArtifactRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"P\n\x13GetArtifactResponse\x12\x39\n\x08\x61rtifact\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"w\n\x15UpdateArtifactRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04keys\x18\x02 \x03(\t\x12\x39\n\x08\x61rtifact\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x18\n\x16UpdateArtifactResponse\"&\n\rStatusRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\'\n\x0eStatusResponse\x12\x15\n\rallow_updates\x18\x01 \x01(\x08\x32\xcd\x03\n\rArtifactCache\x12\x90\x01\n\x0bGetArtifact\x12\x1f.buildstream.GetArtifactRequest\x1a .buildstream.GetArtifactResponse\">\x82\xd3\xe4\x93\x02\x38\x12\x36/v1test/{instance_name=**}/buildstream/artifacts/{key}\x12\xa3\x01\n\x0eUpdateArtifact\x12\".buildstream.UpdateArtifactRequest\x1a#.buildstream.UpdateArtifactResponse\"H\x82\xd3\xe4\x93\x02\x42\x1a\x36/v1test/{instance_name=**}/buildstream/artifacts/{key}:\x08\x61rtifact\x12\x82\x01\n\x06Status\x12\x1a.buildstream.StatusRequest\x1a\x1b.buildstream.StatusResponse\"?\x82\xd3\xe4\x93\x02\x39\x1a\x37/v1test/{instance_name=**}/buildstream/artifacts:statusb\x06proto3')
+ ,
+ dependencies=[build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
+
+
+
+
+_GETARTIFACTREQUEST = _descriptor.Descriptor(
+ name='GetArtifactRequest',
+ full_name='buildstream.GetArtifactRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='buildstream.GetArtifactRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='key', full_name='buildstream.GetArtifactRequest.key', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=132,
+ serialized_end=188,
+)
+
+
+_GETARTIFACTRESPONSE = _descriptor.Descriptor(
+ name='GetArtifactResponse',
+ full_name='buildstream.GetArtifactResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='artifact', full_name='buildstream.GetArtifactResponse.artifact', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=190,
+ serialized_end=270,
+)
+
+
+_UPDATEARTIFACTREQUEST = _descriptor.Descriptor(
+ name='UpdateArtifactRequest',
+ full_name='buildstream.UpdateArtifactRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='buildstream.UpdateArtifactRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='keys', full_name='buildstream.UpdateArtifactRequest.keys', index=1,
+ number=2, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='artifact', full_name='buildstream.UpdateArtifactRequest.artifact', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=272,
+ serialized_end=391,
+)
+
+
+_UPDATEARTIFACTRESPONSE = _descriptor.Descriptor(
+ name='UpdateArtifactResponse',
+ full_name='buildstream.UpdateArtifactResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=393,
+ serialized_end=417,
+)
+
+
+_STATUSREQUEST = _descriptor.Descriptor(
+ name='StatusRequest',
+ full_name='buildstream.StatusRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='buildstream.StatusRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=419,
+ serialized_end=457,
+)
+
+
+_STATUSRESPONSE = _descriptor.Descriptor(
+ name='StatusResponse',
+ full_name='buildstream.StatusResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='allow_updates', full_name='buildstream.StatusResponse.allow_updates', index=0,
+ number=1, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=459,
+ serialized_end=498,
+)
+
+_GETARTIFACTRESPONSE.fields_by_name['artifact'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_UPDATEARTIFACTREQUEST.fields_by_name['artifact'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+DESCRIPTOR.message_types_by_name['GetArtifactRequest'] = _GETARTIFACTREQUEST
+DESCRIPTOR.message_types_by_name['GetArtifactResponse'] = _GETARTIFACTRESPONSE
+DESCRIPTOR.message_types_by_name['UpdateArtifactRequest'] = _UPDATEARTIFACTREQUEST
+DESCRIPTOR.message_types_by_name['UpdateArtifactResponse'] = _UPDATEARTIFACTRESPONSE
+DESCRIPTOR.message_types_by_name['StatusRequest'] = _STATUSREQUEST
+DESCRIPTOR.message_types_by_name['StatusResponse'] = _STATUSRESPONSE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+GetArtifactRequest = _reflection.GeneratedProtocolMessageType('GetArtifactRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETARTIFACTREQUEST,
+ __module__ = 'buildstream.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.GetArtifactRequest)
+ ))
+_sym_db.RegisterMessage(GetArtifactRequest)
+
+GetArtifactResponse = _reflection.GeneratedProtocolMessageType('GetArtifactResponse', (_message.Message,), dict(
+ DESCRIPTOR = _GETARTIFACTRESPONSE,
+ __module__ = 'buildstream.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.GetArtifactResponse)
+ ))
+_sym_db.RegisterMessage(GetArtifactResponse)
+
+UpdateArtifactRequest = _reflection.GeneratedProtocolMessageType('UpdateArtifactRequest', (_message.Message,), dict(
+ DESCRIPTOR = _UPDATEARTIFACTREQUEST,
+ __module__ = 'buildstream.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.UpdateArtifactRequest)
+ ))
+_sym_db.RegisterMessage(UpdateArtifactRequest)
+
+UpdateArtifactResponse = _reflection.GeneratedProtocolMessageType('UpdateArtifactResponse', (_message.Message,), dict(
+ DESCRIPTOR = _UPDATEARTIFACTRESPONSE,
+ __module__ = 'buildstream.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.UpdateArtifactResponse)
+ ))
+_sym_db.RegisterMessage(UpdateArtifactResponse)
+
+StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), dict(
+ DESCRIPTOR = _STATUSREQUEST,
+ __module__ = 'buildstream.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.StatusRequest)
+ ))
+_sym_db.RegisterMessage(StatusRequest)
+
+StatusResponse = _reflection.GeneratedProtocolMessageType('StatusResponse', (_message.Message,), dict(
+ DESCRIPTOR = _STATUSRESPONSE,
+ __module__ = 'buildstream.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.StatusResponse)
+ ))
+_sym_db.RegisterMessage(StatusResponse)
+
+
+
+_ARTIFACTCACHE = _descriptor.ServiceDescriptor(
+ name='ArtifactCache',
+ full_name='buildstream.ArtifactCache',
+ file=DESCRIPTOR,
+ index=0,
+ options=None,
+ serialized_start=501,
+ serialized_end=962,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='GetArtifact',
+ full_name='buildstream.ArtifactCache.GetArtifact',
+ index=0,
+ containing_service=None,
+ input_type=_GETARTIFACTREQUEST,
+ output_type=_GETARTIFACTRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0028\0226/v1test/{instance_name=**}/buildstream/artifacts/{key}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='UpdateArtifact',
+ full_name='buildstream.ArtifactCache.UpdateArtifact',
+ index=1,
+ containing_service=None,
+ input_type=_UPDATEARTIFACTREQUEST,
+ output_type=_UPDATEARTIFACTRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002B\0326/v1test/{instance_name=**}/buildstream/artifacts/{key}:\010artifact')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='Status',
+ full_name='buildstream.ArtifactCache.Status',
+ index=2,
+ containing_service=None,
+ input_type=_STATUSREQUEST,
+ output_type=_STATUSRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0029\0327/v1test/{instance_name=**}/buildstream/artifacts:status')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_ARTIFACTCACHE)
+
+DESCRIPTOR.services_by_name['ArtifactCache'] = _ARTIFACTCACHE
+
+# @@protoc_insertion_point(module_scope)
diff --git a/buildstream/_protos/buildstream/buildstream_pb2_grpc.py b/buildstream/_protos/buildstream/buildstream_pb2_grpc.py
new file mode 100644
index 000000000..4ff0b3bff
--- /dev/null
+++ b/buildstream/_protos/buildstream/buildstream_pb2_grpc.py
@@ -0,0 +1,87 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream._protos.buildstream import buildstream_pb2 as buildstream_dot_buildstream__pb2
+
+
+class ArtifactCacheStub(object):
+ # missing associated documentation comment in .proto file
+ pass
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetArtifact = channel.unary_unary(
+ '/buildstream.ArtifactCache/GetArtifact',
+ request_serializer=buildstream_dot_buildstream__pb2.GetArtifactRequest.SerializeToString,
+ response_deserializer=buildstream_dot_buildstream__pb2.GetArtifactResponse.FromString,
+ )
+ self.UpdateArtifact = channel.unary_unary(
+ '/buildstream.ArtifactCache/UpdateArtifact',
+ request_serializer=buildstream_dot_buildstream__pb2.UpdateArtifactRequest.SerializeToString,
+ response_deserializer=buildstream_dot_buildstream__pb2.UpdateArtifactResponse.FromString,
+ )
+ self.Status = channel.unary_unary(
+ '/buildstream.ArtifactCache/Status',
+ request_serializer=buildstream_dot_buildstream__pb2.StatusRequest.SerializeToString,
+ response_deserializer=buildstream_dot_buildstream__pb2.StatusResponse.FromString,
+ )
+
+
+class ArtifactCacheServicer(object):
+ # missing associated documentation comment in .proto file
+ pass
+
+ def GetArtifact(self, request, context):
+ """Retrieve a cached artifact.
+
+ Errors:
+ * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def UpdateArtifact(self, request, context):
+ """Associate a cache key with a CAS build artifact.
+
+ Errors:
+ * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+ entry to the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Status(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ArtifactCacheServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'GetArtifact': grpc.unary_unary_rpc_method_handler(
+ servicer.GetArtifact,
+ request_deserializer=buildstream_dot_buildstream__pb2.GetArtifactRequest.FromString,
+ response_serializer=buildstream_dot_buildstream__pb2.GetArtifactResponse.SerializeToString,
+ ),
+ 'UpdateArtifact': grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateArtifact,
+ request_deserializer=buildstream_dot_buildstream__pb2.UpdateArtifactRequest.FromString,
+ response_serializer=buildstream_dot_buildstream__pb2.UpdateArtifactResponse.SerializeToString,
+ ),
+ 'Status': grpc.unary_unary_rpc_method_handler(
+ servicer.Status,
+ request_deserializer=buildstream_dot_buildstream__pb2.StatusRequest.FromString,
+ response_serializer=buildstream_dot_buildstream__pb2.StatusResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'buildstream.ArtifactCache', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/buildstream/_protos/google/__init__.py b/buildstream/_protos/google/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/google/__init__.py
diff --git a/buildstream/_protos/google/api/__init__.py b/buildstream/_protos/google/api/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/google/api/__init__.py
diff --git a/buildstream/_protos/google/api/annotations.proto b/buildstream/_protos/google/api/annotations.proto
new file mode 100644
index 000000000..85c361b47
--- /dev/null
+++ b/buildstream/_protos/google/api/annotations.proto
@@ -0,0 +1,31 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/http.proto";
+import "google/protobuf/descriptor.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+extend google.protobuf.MethodOptions {
+ // See `HttpRule`.
+ HttpRule http = 72295728;
+}
diff --git a/buildstream/_protos/google/api/annotations_pb2.py b/buildstream/_protos/google/api/annotations_pb2.py
new file mode 100644
index 000000000..092c46de7
--- /dev/null
+++ b/buildstream/_protos/google/api/annotations_pb2.py
@@ -0,0 +1,46 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/api/annotations.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.google.api import http_pb2 as google_dot_api_dot_http__pb2
+from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/api/annotations.proto',
+ package='google.api',
+ syntax='proto3',
+ serialized_pb=_b('\n\x1cgoogle/api/annotations.proto\x12\ngoogle.api\x1a\x15google/api/http.proto\x1a google/protobuf/descriptor.proto:E\n\x04http\x12\x1e.google.protobuf.MethodOptions\x18\xb0\xca\xbc\" \x01(\x0b\x32\x14.google.api.HttpRuleBn\n\x0e\x63om.google.apiB\x10\x41nnotationsProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xa2\x02\x04GAPIb\x06proto3')
+ ,
+ dependencies=[google_dot_api_dot_http__pb2.DESCRIPTOR,google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
+
+
+HTTP_FIELD_NUMBER = 72295728
+http = _descriptor.FieldDescriptor(
+ name='http', full_name='google.api.http', index=0,
+ number=72295728, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=True, extension_scope=None,
+ options=None, file=DESCRIPTOR)
+
+DESCRIPTOR.extensions_by_name['http'] = http
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+http.message_type = google_dot_api_dot_http__pb2._HTTPRULE
+google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(http)
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\020AnnotationsProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\242\002\004GAPI'))
+# @@protoc_insertion_point(module_scope)
diff --git a/buildstream/_protos/google/api/annotations_pb2_grpc.py b/buildstream/_protos/google/api/annotations_pb2_grpc.py
new file mode 100644
index 000000000..a89435267
--- /dev/null
+++ b/buildstream/_protos/google/api/annotations_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/buildstream/_protos/google/api/http.proto b/buildstream/_protos/google/api/http.proto
new file mode 100644
index 000000000..78d515d4b
--- /dev/null
+++ b/buildstream/_protos/google/api/http.proto
@@ -0,0 +1,313 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "HttpProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Defines the HTTP configuration for an API service. It contains a list of
+// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
+// to one or more HTTP REST API methods.
+message Http {
+ // A list of HTTP configuration rules that apply to individual API methods.
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ repeated HttpRule rules = 1;
+
+ // When set to true, URL path parmeters will be fully URI-decoded except in
+ // cases of single segment matches in reserved expansion, where "%2F" will be
+ // left encoded.
+ //
+ // The default behavior is to not decode RFC 6570 reserved characters in multi
+ // segment matches.
+ bool fully_decode_reserved_expansion = 2;
+}
+
+// `HttpRule` defines the mapping of an RPC method to one or more HTTP
+// REST API methods. The mapping specifies how different portions of the RPC
+// request message are mapped to URL path, URL query parameters, and
+// HTTP request body. The mapping is typically specified as an
+// `google.api.http` annotation on the RPC method,
+// see "google/api/annotations.proto" for details.
+//
+// The mapping consists of a field specifying the path template and
+// method kind. The path template can refer to fields in the request
+// message, as in the example below which describes a REST GET
+// operation on a resource collection of messages:
+//
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
+// }
+// }
+// message GetMessageRequest {
+// message SubMessage {
+// string subfield = 1;
+// }
+// string message_id = 1; // mapped to the URL
+// SubMessage sub = 2; // `sub.subfield` is url-mapped
+// }
+// message Message {
+// string text = 1; // content of the resource
+// }
+//
+// The same http annotation can alternatively be expressed inside the
+// `GRPC API Configuration` YAML file.
+//
+// http:
+// rules:
+// - selector: <proto_package_name>.Messaging.GetMessage
+// get: /v1/messages/{message_id}/{sub.subfield}
+//
+// This definition enables an automatic, bidrectional mapping of HTTP
+// JSON to RPC. Example:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
+//
+// In general, not only fields but also field paths can be referenced
+// from a path pattern. Fields mapped to the path pattern cannot be
+// repeated and must have a primitive (non-message) type.
+//
+// Any fields in the request message which are not bound by the path
+// pattern automatically become (optional) HTTP query
+// parameters. Assume the following definition of the request message:
+//
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http).get = "/v1/messages/{message_id}";
+// }
+// }
+// message GetMessageRequest {
+// message SubMessage {
+// string subfield = 1;
+// }
+// string message_id = 1; // mapped to the URL
+// int64 revision = 2; // becomes a parameter
+// SubMessage sub = 3; // `sub.subfield` becomes a parameter
+// }
+//
+//
+// This enables a HTTP JSON to RPC mapping as below:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
+//
+// Note that fields which are mapped to HTTP parameters must have a
+// primitive type or a repeated primitive type. Message types are not
+// allowed. In the case of a repeated type, the parameter can be
+// repeated in the URL, as in `...?param=A&param=B`.
+//
+// For HTTP method kinds which allow a request body, the `body` field
+// specifies the mapping. Consider a REST update method on the
+// message resource collection:
+//
+//
+// service Messaging {
+// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// put: "/v1/messages/{message_id}"
+// body: "message"
+// };
+// }
+// }
+// message UpdateMessageRequest {
+// string message_id = 1; // mapped to the URL
+// Message message = 2; // mapped to the body
+// }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled, where the
+// representation of the JSON in the request body is determined by
+// protos JSON encoding:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
+//
+// The special name `*` can be used in the body mapping to define that
+// every field not bound by the path template should be mapped to the
+// request body. This enables the following alternative definition of
+// the update method:
+//
+// service Messaging {
+// rpc UpdateMessage(Message) returns (Message) {
+// option (google.api.http) = {
+// put: "/v1/messages/{message_id}"
+// body: "*"
+// };
+// }
+// }
+// message Message {
+// string message_id = 1;
+// string text = 2;
+// }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
+//
+// Note that when using `*` in the body mapping, it is not possible to
+// have HTTP parameters, as all fields not bound by the path end in
+// the body. This makes this option more rarely used in practice of
+// defining REST APIs. The common usage of `*` is in custom methods
+// which don't use the URL at all for transferring data.
+//
+// It is possible to define multiple HTTP methods for one RPC by using
+// the `additional_bindings` option. Example:
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// get: "/v1/messages/{message_id}"
+// additional_bindings {
+// get: "/v1/users/{user_id}/messages/{message_id}"
+// }
+// };
+// }
+// }
+// message GetMessageRequest {
+// string message_id = 1;
+// string user_id = 2;
+// }
+//
+//
+// This enables the following two alternative HTTP JSON to RPC
+// mappings:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
+// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
+//
+// # Rules for HTTP mapping
+//
+// The rules for mapping HTTP path, query parameters, and body fields
+// to the request message are as follows:
+//
+// 1. The `body` field specifies either `*` or a field path, or is
+// omitted. If omitted, it indicates there is no HTTP request body.
+// 2. Leaf fields (recursive expansion of nested messages in the
+// request) can be classified into three types:
+// (a) Matched in the URL template.
+// (b) Covered by body (if body is `*`, everything except (a) fields;
+// else everything under the body field)
+// (c) All other fields.
+// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
+// 4. Any body sent with an HTTP request can contain only (b) fields.
+//
+// The syntax of the path template is as follows:
+//
+// Template = "/" Segments [ Verb ] ;
+// Segments = Segment { "/" Segment } ;
+// Segment = "*" | "**" | LITERAL | Variable ;
+// Variable = "{" FieldPath [ "=" Segments ] "}" ;
+// FieldPath = IDENT { "." IDENT } ;
+// Verb = ":" LITERAL ;
+//
+// The syntax `*` matches a single path segment. The syntax `**` matches zero
+// or more path segments, which must be the last part of the path except the
+// `Verb`. The syntax `LITERAL` matches literal text in the path.
+//
+// The syntax `Variable` matches part of the URL path as specified by its
+// template. A variable template must not contain other variables. If a variable
+// matches a single path segment, its template may be omitted, e.g. `{var}`
+// is equivalent to `{var=*}`.
+//
+// If a variable contains exactly one path segment, such as `"{var}"` or
+// `"{var=*}"`, when such a variable is expanded into a URL path, all characters
+// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the
+// Discovery Document as `{var}`.
+//
+// If a variable contains one or more path segments, such as `"{var=foo/*}"`
+// or `"{var=**}"`, when such a variable is expanded into a URL path, all
+// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables
+// show up in the Discovery Document as `{+var}`.
+//
+// NOTE: While the single segment variable matches the semantics of
+// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2
+// Simple String Expansion, the multi segment variable **does not** match
+// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion
+// does not expand special characters like `?` and `#`, which would lead
+// to invalid URLs.
+//
+// NOTE: the field paths in variables and in the `body` must not refer to
+// repeated fields or map fields.
+message HttpRule {
+ // Selects methods to which this rule applies.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // Determines the URL pattern is matched by this rules. This pattern can be
+ // used with any of the {get|put|post|delete|patch} methods. A custom method
+ // can be defined using the 'custom' field.
+ oneof pattern {
+ // Used for listing and getting information about resources.
+ string get = 2;
+
+ // Used for updating a resource.
+ string put = 3;
+
+ // Used for creating a resource.
+ string post = 4;
+
+ // Used for deleting a resource.
+ string delete = 5;
+
+ // Used for updating a resource.
+ string patch = 6;
+
+ // The custom pattern is used for specifying an HTTP method that is not
+ // included in the `pattern` field, such as HEAD, or "*" to leave the
+ // HTTP method unspecified for this rule. The wild-card rule is useful
+ // for services that provide content to Web (HTML) clients.
+ CustomHttpPattern custom = 8;
+ }
+
+ // The name of the request field whose value is mapped to the HTTP body, or
+ // `*` for mapping all fields not captured by the path pattern to the HTTP
+ // body. NOTE: the referred field must not be a repeated field and must be
+ // present at the top-level of request message type.
+ string body = 7;
+
+ // Additional HTTP bindings for the selector. Nested bindings must
+ // not contain an `additional_bindings` field themselves (that is,
+ // the nesting may only be one level deep).
+ repeated HttpRule additional_bindings = 11;
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+message CustomHttpPattern {
+ // The name of this custom HTTP verb.
+ string kind = 1;
+
+ // The path matched by this custom verb.
+ string path = 2;
+}
diff --git a/buildstream/_protos/google/api/http_pb2.py b/buildstream/_protos/google/api/http_pb2.py
new file mode 100644
index 000000000..aad9ddb97
--- /dev/null
+++ b/buildstream/_protos/google/api/http_pb2.py
@@ -0,0 +1,243 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/api/http.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/api/http.proto',
+ package='google.api',
+ syntax='proto3',
+ serialized_pb=_b('\n\x15google/api/http.proto\x12\ngoogle.api\"T\n\x04Http\x12#\n\x05rules\x18\x01 \x03(\x0b\x32\x14.google.api.HttpRule\x12\'\n\x1f\x66ully_decode_reserved_expansion\x18\x02 \x01(\x08\"\xea\x01\n\x08HttpRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12\r\n\x03get\x18\x02 \x01(\tH\x00\x12\r\n\x03put\x18\x03 \x01(\tH\x00\x12\x0e\n\x04post\x18\x04 \x01(\tH\x00\x12\x10\n\x06\x64\x65lete\x18\x05 \x01(\tH\x00\x12\x0f\n\x05patch\x18\x06 \x01(\tH\x00\x12/\n\x06\x63ustom\x18\x08 \x01(\x0b\x32\x1d.google.api.CustomHttpPatternH\x00\x12\x0c\n\x04\x62ody\x18\x07 \x01(\t\x12\x31\n\x13\x61\x64\x64itional_bindings\x18\x0b \x03(\x0b\x32\x14.google.api.HttpRuleB\t\n\x07pattern\"/\n\x11\x43ustomHttpPattern\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\tBj\n\x0e\x63om.google.apiB\tHttpProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xf8\x01\x01\xa2\x02\x04GAPIb\x06proto3')
+)
+
+
+
+
+_HTTP = _descriptor.Descriptor(
+ name='Http',
+ full_name='google.api.Http',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='rules', full_name='google.api.Http.rules', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='fully_decode_reserved_expansion', full_name='google.api.Http.fully_decode_reserved_expansion', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=37,
+ serialized_end=121,
+)
+
+
+_HTTPRULE = _descriptor.Descriptor(
+ name='HttpRule',
+ full_name='google.api.HttpRule',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='selector', full_name='google.api.HttpRule.selector', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='get', full_name='google.api.HttpRule.get', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='put', full_name='google.api.HttpRule.put', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='post', full_name='google.api.HttpRule.post', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='delete', full_name='google.api.HttpRule.delete', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='patch', full_name='google.api.HttpRule.patch', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='custom', full_name='google.api.HttpRule.custom', index=6,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='body', full_name='google.api.HttpRule.body', index=7,
+ number=7, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='additional_bindings', full_name='google.api.HttpRule.additional_bindings', index=8,
+ number=11, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ _descriptor.OneofDescriptor(
+ name='pattern', full_name='google.api.HttpRule.pattern',
+ index=0, containing_type=None, fields=[]),
+ ],
+ serialized_start=124,
+ serialized_end=358,
+)
+
+
+_CUSTOMHTTPPATTERN = _descriptor.Descriptor(
+ name='CustomHttpPattern',
+ full_name='google.api.CustomHttpPattern',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='kind', full_name='google.api.CustomHttpPattern.kind', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='path', full_name='google.api.CustomHttpPattern.path', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=360,
+ serialized_end=407,
+)
+
+_HTTP.fields_by_name['rules'].message_type = _HTTPRULE
+_HTTPRULE.fields_by_name['custom'].message_type = _CUSTOMHTTPPATTERN
+_HTTPRULE.fields_by_name['additional_bindings'].message_type = _HTTPRULE
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['get'])
+_HTTPRULE.fields_by_name['get'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['put'])
+_HTTPRULE.fields_by_name['put'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['post'])
+_HTTPRULE.fields_by_name['post'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['delete'])
+_HTTPRULE.fields_by_name['delete'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['patch'])
+_HTTPRULE.fields_by_name['patch'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['custom'])
+_HTTPRULE.fields_by_name['custom'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+DESCRIPTOR.message_types_by_name['Http'] = _HTTP
+DESCRIPTOR.message_types_by_name['HttpRule'] = _HTTPRULE
+DESCRIPTOR.message_types_by_name['CustomHttpPattern'] = _CUSTOMHTTPPATTERN
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Http = _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), dict(
+ DESCRIPTOR = _HTTP,
+ __module__ = 'google.api.http_pb2'
+ # @@protoc_insertion_point(class_scope:google.api.Http)
+ ))
+_sym_db.RegisterMessage(Http)
+
+HttpRule = _reflection.GeneratedProtocolMessageType('HttpRule', (_message.Message,), dict(
+ DESCRIPTOR = _HTTPRULE,
+ __module__ = 'google.api.http_pb2'
+ # @@protoc_insertion_point(class_scope:google.api.HttpRule)
+ ))
+_sym_db.RegisterMessage(HttpRule)
+
+CustomHttpPattern = _reflection.GeneratedProtocolMessageType('CustomHttpPattern', (_message.Message,), dict(
+ DESCRIPTOR = _CUSTOMHTTPPATTERN,
+ __module__ = 'google.api.http_pb2'
+ # @@protoc_insertion_point(class_scope:google.api.CustomHttpPattern)
+ ))
+_sym_db.RegisterMessage(CustomHttpPattern)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\tHttpProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\370\001\001\242\002\004GAPI'))
+# @@protoc_insertion_point(module_scope)
diff --git a/buildstream/_protos/google/api/http_pb2_grpc.py b/buildstream/_protos/google/api/http_pb2_grpc.py
new file mode 100644
index 000000000..a89435267
--- /dev/null
+++ b/buildstream/_protos/google/api/http_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/buildstream/_protos/google/bytestream/__init__.py b/buildstream/_protos/google/bytestream/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/google/bytestream/__init__.py
diff --git a/buildstream/_protos/google/bytestream/bytestream.proto b/buildstream/_protos/google/bytestream/bytestream.proto
new file mode 100644
index 000000000..85e386fc2
--- /dev/null
+++ b/buildstream/_protos/google/bytestream/bytestream.proto
@@ -0,0 +1,181 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bytestream;
+
+import "google/api/annotations.proto";
+import "google/protobuf/wrappers.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bytestream;bytestream";
+option java_outer_classname = "ByteStreamProto";
+option java_package = "com.google.bytestream";
+
+
+// #### Introduction
+//
+// The Byte Stream API enables a client to read and write a stream of bytes to
+// and from a resource. Resources have names, and these names are supplied in
+// the API calls below to identify the resource that is being read from or
+// written to.
+//
+// All implementations of the Byte Stream API export the interface defined here:
+//
+// * `Read()`: Reads the contents of a resource.
+//
+// * `Write()`: Writes the contents of a resource. The client can call `Write()`
+// multiple times with the same resource and can check the status of the write
+// by calling `QueryWriteStatus()`.
+//
+// #### Service parameters and metadata
+//
+// The ByteStream API provides no direct way to access/modify any metadata
+// associated with the resource.
+//
+// #### Errors
+//
+// The errors returned by the service are in the Google canonical error space.
+service ByteStream {
+ // `Read()` is used to retrieve the contents of a resource as a sequence
+ // of bytes. The bytes are returned in a sequence of responses, and the
+ // responses are delivered as the results of a server-side streaming RPC.
+ rpc Read(ReadRequest) returns (stream ReadResponse);
+
+ // `Write()` is used to send the contents of a resource as a sequence of
+ // bytes. The bytes are sent in a sequence of request protos of a client-side
+ // streaming RPC.
+ //
+ // A `Write()` action is resumable. If there is an error or the connection is
+ // broken during the `Write()`, the client should check the status of the
+ // `Write()` by calling `QueryWriteStatus()` and continue writing from the
+ // returned `committed_size`. This may be less than the amount of data the
+ // client previously sent.
+ //
+ // Calling `Write()` on a resource name that was previously written and
+ // finalized could cause an error, depending on whether the underlying service
+ // allows over-writing of previously written resources.
+ //
+ // When the client closes the request channel, the service will respond with
+ // a `WriteResponse`. The service will not view the resource as `complete`
+ // until the client has sent a `WriteRequest` with `finish_write` set to
+ // `true`. Sending any requests on a stream after sending a request with
+ // `finish_write` set to `true` will cause an error. The client **should**
+ // check the `WriteResponse` it receives to determine how much data the
+ // service was able to commit and whether the service views the resource as
+ // `complete` or not.
+ rpc Write(stream WriteRequest) returns (WriteResponse);
+
+ // `QueryWriteStatus()` is used to find the `committed_size` for a resource
+ // that is being written, which can then be used as the `write_offset` for
+ // the next `Write()` call.
+ //
+ // If the resource does not exist (i.e., the resource has been deleted, or the
+ // first `Write()` has not yet reached the service), this method returns the
+ // error `NOT_FOUND`.
+ //
+ // The client **may** call `QueryWriteStatus()` at any time to determine how
+ // much data has been processed for this resource. This is useful if the
+ // client is buffering data and needs to know which data can be safely
+ // evicted. For any sequence of `QueryWriteStatus()` calls for a given
+ // resource name, the sequence of returned `committed_size` values will be
+ // non-decreasing.
+ rpc QueryWriteStatus(QueryWriteStatusRequest) returns (QueryWriteStatusResponse);
+}
+
+// Request object for ByteStream.Read.
+message ReadRequest {
+ // The name of the resource to read.
+ string resource_name = 1;
+
+ // The offset for the first byte to return in the read, relative to the start
+ // of the resource.
+ //
+ // A `read_offset` that is negative or greater than the size of the resource
+ // will cause an `OUT_OF_RANGE` error.
+ int64 read_offset = 2;
+
+ // The maximum number of `data` bytes the server is allowed to return in the
+ // sum of all `ReadResponse` messages. A `read_limit` of zero indicates that
+ // there is no limit, and a negative `read_limit` will cause an error.
+ //
+ // If the stream returns fewer bytes than allowed by the `read_limit` and no
+ // error occurred, the stream includes all data from the `read_offset` to the
+ // end of the resource.
+ int64 read_limit = 3;
+}
+
+// Response object for ByteStream.Read.
+message ReadResponse {
+ // A portion of the data for the resource. The service **may** leave `data`
+ // empty for any given `ReadResponse`. This enables the service to inform the
+ // client that the request is still live while it is running an operation to
+ // generate more data.
+ bytes data = 10;
+}
+
+// Request object for ByteStream.Write.
+message WriteRequest {
+ // The name of the resource to write. This **must** be set on the first
+ // `WriteRequest` of each `Write()` action. If it is set on subsequent calls,
+ // it **must** match the value of the first request.
+ string resource_name = 1;
+
+ // The offset from the beginning of the resource at which the data should be
+ // written. It is required on all `WriteRequest`s.
+ //
+ // In the first `WriteRequest` of a `Write()` action, it indicates
+ // the initial offset for the `Write()` call. The value **must** be equal to
+ // the `committed_size` that a call to `QueryWriteStatus()` would return.
+ //
+ // On subsequent calls, this value **must** be set and **must** be equal to
+ // the sum of the first `write_offset` and the sizes of all `data` bundles
+ // sent previously on this stream.
+ //
+ // An incorrect value will cause an error.
+ int64 write_offset = 2;
+
+ // If `true`, this indicates that the write is complete. Sending any
+ // `WriteRequest`s subsequent to one in which `finish_write` is `true` will
+ // cause an error.
+ bool finish_write = 3;
+
+ // A portion of the data for the resource. The client **may** leave `data`
+ // empty for any given `WriteRequest`. This enables the client to inform the
+ // service that the request is still live while it is running an operation to
+ // generate more data.
+ bytes data = 10;
+}
+
+// Response object for ByteStream.Write.
+message WriteResponse {
+ // The number of bytes that have been processed for the given resource.
+ int64 committed_size = 1;
+}
+
+// Request object for ByteStream.QueryWriteStatus.
+message QueryWriteStatusRequest {
+ // The name of the resource whose write status is being requested.
+ string resource_name = 1;
+}
+
+// Response object for ByteStream.QueryWriteStatus.
+message QueryWriteStatusResponse {
+ // The number of bytes that have been processed for the given resource.
+ int64 committed_size = 1;
+
+ // `complete` is `true` only if the client has sent a `WriteRequest` with
+ // `finish_write` set to true, and the server has processed that request.
+ bool complete = 2;
+}
diff --git a/buildstream/_protos/google/bytestream/bytestream_pb2.py b/buildstream/_protos/google/bytestream/bytestream_pb2.py
new file mode 100644
index 000000000..c8487d6a0
--- /dev/null
+++ b/buildstream/_protos/google/bytestream/bytestream_pb2.py
@@ -0,0 +1,353 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/bytestream/bytestream.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/bytestream/bytestream.proto',
+ package='google.bytestream',
+ syntax='proto3',
+ serialized_pb=_b('\n\"google/bytestream/bytestream.proto\x12\x11google.bytestream\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/wrappers.proto\"M\n\x0bReadRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x13\n\x0bread_offset\x18\x02 \x01(\x03\x12\x12\n\nread_limit\x18\x03 \x01(\x03\"\x1c\n\x0cReadResponse\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\x0c\"_\n\x0cWriteRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x14\n\x0cwrite_offset\x18\x02 \x01(\x03\x12\x14\n\x0c\x66inish_write\x18\x03 \x01(\x08\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\x0c\"\'\n\rWriteResponse\x12\x16\n\x0e\x63ommitted_size\x18\x01 \x01(\x03\"0\n\x17QueryWriteStatusRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\"D\n\x18QueryWriteStatusResponse\x12\x16\n\x0e\x63ommitted_size\x18\x01 \x01(\x03\x12\x10\n\x08\x63omplete\x18\x02 \x01(\x08\x32\x92\x02\n\nByteStream\x12I\n\x04Read\x12\x1e.google.bytestream.ReadRequest\x1a\x1f.google.bytestream.ReadResponse0\x01\x12L\n\x05Write\x12\x1f.google.bytestream.WriteRequest\x1a .google.bytestream.WriteResponse(\x01\x12k\n\x10QueryWriteStatus\x12*.google.bytestream.QueryWriteStatusRequest\x1a+.google.bytestream.QueryWriteStatusResponseBe\n\x15\x63om.google.bytestreamB\x0f\x42yteStreamProtoZ;google.golang.org/genproto/googleapis/bytestream;bytestreamb\x06proto3')
+ ,
+ dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,])
+
+
+
+
+_READREQUEST = _descriptor.Descriptor(
+ name='ReadRequest',
+ full_name='google.bytestream.ReadRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='resource_name', full_name='google.bytestream.ReadRequest.resource_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='read_offset', full_name='google.bytestream.ReadRequest.read_offset', index=1,
+ number=2, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='read_limit', full_name='google.bytestream.ReadRequest.read_limit', index=2,
+ number=3, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=119,
+ serialized_end=196,
+)
+
+
+_READRESPONSE = _descriptor.Descriptor(
+ name='ReadResponse',
+ full_name='google.bytestream.ReadResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='data', full_name='google.bytestream.ReadResponse.data', index=0,
+ number=10, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=198,
+ serialized_end=226,
+)
+
+
+_WRITEREQUEST = _descriptor.Descriptor(
+ name='WriteRequest',
+ full_name='google.bytestream.WriteRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='resource_name', full_name='google.bytestream.WriteRequest.resource_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='write_offset', full_name='google.bytestream.WriteRequest.write_offset', index=1,
+ number=2, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='finish_write', full_name='google.bytestream.WriteRequest.finish_write', index=2,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='data', full_name='google.bytestream.WriteRequest.data', index=3,
+ number=10, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=228,
+ serialized_end=323,
+)
+
+
+_WRITERESPONSE = _descriptor.Descriptor(
+ name='WriteResponse',
+ full_name='google.bytestream.WriteResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='committed_size', full_name='google.bytestream.WriteResponse.committed_size', index=0,
+ number=1, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=325,
+ serialized_end=364,
+)
+
+
+_QUERYWRITESTATUSREQUEST = _descriptor.Descriptor(
+ name='QueryWriteStatusRequest',
+ full_name='google.bytestream.QueryWriteStatusRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='resource_name', full_name='google.bytestream.QueryWriteStatusRequest.resource_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=366,
+ serialized_end=414,
+)
+
+
+_QUERYWRITESTATUSRESPONSE = _descriptor.Descriptor(
+ name='QueryWriteStatusResponse',
+ full_name='google.bytestream.QueryWriteStatusResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='committed_size', full_name='google.bytestream.QueryWriteStatusResponse.committed_size', index=0,
+ number=1, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='complete', full_name='google.bytestream.QueryWriteStatusResponse.complete', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=416,
+ serialized_end=484,
+)
+
+DESCRIPTOR.message_types_by_name['ReadRequest'] = _READREQUEST
+DESCRIPTOR.message_types_by_name['ReadResponse'] = _READRESPONSE
+DESCRIPTOR.message_types_by_name['WriteRequest'] = _WRITEREQUEST
+DESCRIPTOR.message_types_by_name['WriteResponse'] = _WRITERESPONSE
+DESCRIPTOR.message_types_by_name['QueryWriteStatusRequest'] = _QUERYWRITESTATUSREQUEST
+DESCRIPTOR.message_types_by_name['QueryWriteStatusResponse'] = _QUERYWRITESTATUSRESPONSE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+ReadRequest = _reflection.GeneratedProtocolMessageType('ReadRequest', (_message.Message,), dict(
+ DESCRIPTOR = _READREQUEST,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.ReadRequest)
+ ))
+_sym_db.RegisterMessage(ReadRequest)
+
+ReadResponse = _reflection.GeneratedProtocolMessageType('ReadResponse', (_message.Message,), dict(
+ DESCRIPTOR = _READRESPONSE,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.ReadResponse)
+ ))
+_sym_db.RegisterMessage(ReadResponse)
+
+WriteRequest = _reflection.GeneratedProtocolMessageType('WriteRequest', (_message.Message,), dict(
+ DESCRIPTOR = _WRITEREQUEST,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.WriteRequest)
+ ))
+_sym_db.RegisterMessage(WriteRequest)
+
+WriteResponse = _reflection.GeneratedProtocolMessageType('WriteResponse', (_message.Message,), dict(
+ DESCRIPTOR = _WRITERESPONSE,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.WriteResponse)
+ ))
+_sym_db.RegisterMessage(WriteResponse)
+
+QueryWriteStatusRequest = _reflection.GeneratedProtocolMessageType('QueryWriteStatusRequest', (_message.Message,), dict(
+ DESCRIPTOR = _QUERYWRITESTATUSREQUEST,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.QueryWriteStatusRequest)
+ ))
+_sym_db.RegisterMessage(QueryWriteStatusRequest)
+
+QueryWriteStatusResponse = _reflection.GeneratedProtocolMessageType('QueryWriteStatusResponse', (_message.Message,), dict(
+ DESCRIPTOR = _QUERYWRITESTATUSRESPONSE,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.QueryWriteStatusResponse)
+ ))
+_sym_db.RegisterMessage(QueryWriteStatusResponse)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\025com.google.bytestreamB\017ByteStreamProtoZ;google.golang.org/genproto/googleapis/bytestream;bytestream'))
+
+_BYTESTREAM = _descriptor.ServiceDescriptor(
+ name='ByteStream',
+ full_name='google.bytestream.ByteStream',
+ file=DESCRIPTOR,
+ index=0,
+ options=None,
+ serialized_start=487,
+ serialized_end=761,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='Read',
+ full_name='google.bytestream.ByteStream.Read',
+ index=0,
+ containing_service=None,
+ input_type=_READREQUEST,
+ output_type=_READRESPONSE,
+ options=None,
+ ),
+ _descriptor.MethodDescriptor(
+ name='Write',
+ full_name='google.bytestream.ByteStream.Write',
+ index=1,
+ containing_service=None,
+ input_type=_WRITEREQUEST,
+ output_type=_WRITERESPONSE,
+ options=None,
+ ),
+ _descriptor.MethodDescriptor(
+ name='QueryWriteStatus',
+ full_name='google.bytestream.ByteStream.QueryWriteStatus',
+ index=2,
+ containing_service=None,
+ input_type=_QUERYWRITESTATUSREQUEST,
+ output_type=_QUERYWRITESTATUSRESPONSE,
+ options=None,
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_BYTESTREAM)
+
+DESCRIPTOR.services_by_name['ByteStream'] = _BYTESTREAM
+
+# @@protoc_insertion_point(module_scope)
diff --git a/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py b/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py
new file mode 100644
index 000000000..ef993e040
--- /dev/null
+++ b/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py
@@ -0,0 +1,160 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream._protos.google.bytestream import bytestream_pb2 as google_dot_bytestream_dot_bytestream__pb2
+
+
+class ByteStreamStub(object):
+ """#### Introduction
+
+ The Byte Stream API enables a client to read and write a stream of bytes to
+ and from a resource. Resources have names, and these names are supplied in
+ the API calls below to identify the resource that is being read from or
+ written to.
+
+ All implementations of the Byte Stream API export the interface defined here:
+
+ * `Read()`: Reads the contents of a resource.
+
+ * `Write()`: Writes the contents of a resource. The client can call `Write()`
+ multiple times with the same resource and can check the status of the write
+ by calling `QueryWriteStatus()`.
+
+ #### Service parameters and metadata
+
+ The ByteStream API provides no direct way to access/modify any metadata
+ associated with the resource.
+
+ #### Errors
+
+ The errors returned by the service are in the Google canonical error space.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.Read = channel.unary_stream(
+ '/google.bytestream.ByteStream/Read',
+ request_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.SerializeToString,
+ response_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.FromString,
+ )
+ self.Write = channel.stream_unary(
+ '/google.bytestream.ByteStream/Write',
+ request_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.SerializeToString,
+ response_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.FromString,
+ )
+ self.QueryWriteStatus = channel.unary_unary(
+ '/google.bytestream.ByteStream/QueryWriteStatus',
+ request_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.SerializeToString,
+ response_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.FromString,
+ )
+
+
+class ByteStreamServicer(object):
+ """#### Introduction
+
+ The Byte Stream API enables a client to read and write a stream of bytes to
+ and from a resource. Resources have names, and these names are supplied in
+ the API calls below to identify the resource that is being read from or
+ written to.
+
+ All implementations of the Byte Stream API export the interface defined here:
+
+ * `Read()`: Reads the contents of a resource.
+
+ * `Write()`: Writes the contents of a resource. The client can call `Write()`
+ multiple times with the same resource and can check the status of the write
+ by calling `QueryWriteStatus()`.
+
+ #### Service parameters and metadata
+
+ The ByteStream API provides no direct way to access/modify any metadata
+ associated with the resource.
+
+ #### Errors
+
+ The errors returned by the service are in the Google canonical error space.
+ """
+
+ def Read(self, request, context):
+ """`Read()` is used to retrieve the contents of a resource as a sequence
+ of bytes. The bytes are returned in a sequence of responses, and the
+ responses are delivered as the results of a server-side streaming RPC.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Write(self, request_iterator, context):
+ """`Write()` is used to send the contents of a resource as a sequence of
+ bytes. The bytes are sent in a sequence of request protos of a client-side
+ streaming RPC.
+
+ A `Write()` action is resumable. If there is an error or the connection is
+ broken during the `Write()`, the client should check the status of the
+ `Write()` by calling `QueryWriteStatus()` and continue writing from the
+ returned `committed_size`. This may be less than the amount of data the
+ client previously sent.
+
+ Calling `Write()` on a resource name that was previously written and
+ finalized could cause an error, depending on whether the underlying service
+ allows over-writing of previously written resources.
+
+ When the client closes the request channel, the service will respond with
+ a `WriteResponse`. The service will not view the resource as `complete`
+ until the client has sent a `WriteRequest` with `finish_write` set to
+ `true`. Sending any requests on a stream after sending a request with
+ `finish_write` set to `true` will cause an error. The client **should**
+ check the `WriteResponse` it receives to determine how much data the
+ service was able to commit and whether the service views the resource as
+ `complete` or not.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def QueryWriteStatus(self, request, context):
+ """`QueryWriteStatus()` is used to find the `committed_size` for a resource
+ that is being written, which can then be used as the `write_offset` for
+ the next `Write()` call.
+
+ If the resource does not exist (i.e., the resource has been deleted, or the
+ first `Write()` has not yet reached the service), this method returns the
+ error `NOT_FOUND`.
+
+ The client **may** call `QueryWriteStatus()` at any time to determine how
+ much data has been processed for this resource. This is useful if the
+ client is buffering data and needs to know which data can be safely
+ evicted. For any sequence of `QueryWriteStatus()` calls for a given
+ resource name, the sequence of returned `committed_size` values will be
+ non-decreasing.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ByteStreamServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'Read': grpc.unary_stream_rpc_method_handler(
+ servicer.Read,
+ request_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.FromString,
+ response_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.SerializeToString,
+ ),
+ 'Write': grpc.stream_unary_rpc_method_handler(
+ servicer.Write,
+ request_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.FromString,
+ response_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.SerializeToString,
+ ),
+ 'QueryWriteStatus': grpc.unary_unary_rpc_method_handler(
+ servicer.QueryWriteStatus,
+ request_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.FromString,
+ response_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'google.bytestream.ByteStream', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/buildstream/_protos/google/longrunning/__init__.py b/buildstream/_protos/google/longrunning/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/google/longrunning/__init__.py
diff --git a/buildstream/_protos/google/longrunning/operations.proto b/buildstream/_protos/google/longrunning/operations.proto
new file mode 100644
index 000000000..76fef29c3
--- /dev/null
+++ b/buildstream/_protos/google/longrunning/operations.proto
@@ -0,0 +1,160 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.longrunning;
+
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/empty.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Google.LongRunning";
+option go_package = "google.golang.org/genproto/googleapis/longrunning;longrunning";
+option java_multiple_files = true;
+option java_outer_classname = "OperationsProto";
+option java_package = "com.google.longrunning";
+option php_namespace = "Google\\LongRunning";
+
+
+// Manages long-running operations with an API service.
+//
+// When an API method normally takes long time to complete, it can be designed
+// to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+// interface to receive the real response asynchronously by polling the
+// operation resource, or pass the operation resource to another API (such as
+// Google Cloud Pub/Sub API) to receive the response. Any API service that
+// returns long-running operations should implement the `Operations` interface
+// so developers can have a consistent client experience.
+service Operations {
+ // Lists operations that match the specified filter in the request. If the
+ // server doesn't support this method, it returns `UNIMPLEMENTED`.
+ //
+ // NOTE: the `name` binding below allows API services to override the binding
+ // to use different resource name schemes, such as `users/*/operations`.
+ rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
+ option (google.api.http) = { get: "/v1/{name=operations}" };
+ }
+
+ // Gets the latest state of a long-running operation. Clients can use this
+ // method to poll the operation result at intervals as recommended by the API
+ // service.
+ rpc GetOperation(GetOperationRequest) returns (Operation) {
+ option (google.api.http) = { get: "/v1/{name=operations/**}" };
+ }
+
+ // Deletes a long-running operation. This method indicates that the client is
+ // no longer interested in the operation result. It does not cancel the
+ // operation. If the server doesn't support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`.
+ rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=operations/**}" };
+ }
+
+ // Starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not
+ // guaranteed. If the server doesn't support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`. Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ // corresponding to `Code.CANCELLED`.
+ rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" };
+ }
+}
+
+// This resource represents a long-running operation that is the result of a
+// network API call.
+message Operation {
+ // The server-assigned name, which is only unique within the same service that
+ // originally returns it. If you use the default HTTP mapping, the
+ // `name` should have the format of `operations/some/unique/name`.
+ string name = 1;
+
+ // Service-specific metadata associated with the operation. It typically
+ // contains progress information and common metadata such as create time.
+ // Some services might not provide such metadata. Any method that returns a
+ // long-running operation should document the metadata type, if any.
+ google.protobuf.Any metadata = 2;
+
+ // If the value is `false`, it means the operation is still in progress.
+ // If true, the operation is completed, and either `error` or `response` is
+ // available.
+ bool done = 3;
+
+ // The operation result, which can be either an `error` or a valid `response`.
+ // If `done` == `false`, neither `error` nor `response` is set.
+ // If `done` == `true`, exactly one of `error` or `response` is set.
+ oneof result {
+ // The error result of the operation in case of failure or cancellation.
+ google.rpc.Status error = 4;
+
+ // The normal response of the operation in case of success. If the original
+ // method returns no data on success, such as `Delete`, the response is
+ // `google.protobuf.Empty`. If the original method is standard
+ // `Get`/`Create`/`Update`, the response should be the resource. For other
+ // methods, the response should have the type `XxxResponse`, where `Xxx`
+ // is the original method name. For example, if the original method name
+ // is `TakeSnapshot()`, the inferred response type is
+ // `TakeSnapshotResponse`.
+ google.protobuf.Any response = 5;
+ }
+}
+
+// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
+message GetOperationRequest {
+ // The name of the operation resource.
+ string name = 1;
+}
+
+// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
+message ListOperationsRequest {
+ // The name of the operation collection.
+ string name = 4;
+
+ // The standard list filter.
+ string filter = 1;
+
+ // The standard list page size.
+ int32 page_size = 2;
+
+ // The standard list page token.
+ string page_token = 3;
+}
+
+// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
+message ListOperationsResponse {
+ // A list of operations that matches the specified filter in the request.
+ repeated Operation operations = 1;
+
+ // The standard List next-page token.
+ string next_page_token = 2;
+}
+
+// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
+message CancelOperationRequest {
+ // The name of the operation resource to be cancelled.
+ string name = 1;
+}
+
+// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
+message DeleteOperationRequest {
+ // The name of the operation resource to be deleted.
+ string name = 1;
+}
+
diff --git a/buildstream/_protos/google/longrunning/operations_pb2.py b/buildstream/_protos/google/longrunning/operations_pb2.py
new file mode 100644
index 000000000..9fd89937f
--- /dev/null
+++ b/buildstream/_protos/google/longrunning/operations_pb2.py
@@ -0,0 +1,391 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/longrunning/operations.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/longrunning/operations.proto',
+ package='google.longrunning',
+ syntax='proto3',
+ serialized_pb=_b('\n#google/longrunning/operations.proto\x12\x12google.longrunning\x1a\x1cgoogle/api/annotations.proto\x1a\x19google/protobuf/any.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x17google/rpc/status.proto\"\xa8\x01\n\tOperation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x08metadata\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12\x0c\n\x04\x64one\x18\x03 \x01(\x08\x12#\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x12.google.rpc.StatusH\x00\x12(\n\x08response\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x08\n\x06result\"#\n\x13GetOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\\\n\x15ListOperationsRequest\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"d\n\x16ListOperationsResponse\x12\x31\n\noperations\x18\x01 \x03(\x0b\x32\x1d.google.longrunning.Operation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"&\n\x16\x43\x61ncelOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"&\n\x16\x44\x65leteOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t2\x8c\x04\n\nOperations\x12\x86\x01\n\x0eListOperations\x12).google.longrunning.ListOperationsRequest\x1a*.google.longrunning.ListOperationsResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/v1/{name=operations}\x12x\n\x0cGetOperation\x12\'.google.longrunning.GetOperationRequest\x1a\x1d.google.longrunning.Operation\" \x82\xd3\xe4\x93\x02\x1a\x12\x18/v1/{name=operations/**}\x12w\n\x0f\x44\x65leteOperation\x12*.google.longrunning.DeleteOperationRequest\x1a\x16.google.protobuf.Empty\" \x82\xd3\xe4\x93\x02\x1a*\x18/v1/{name=operations/**}\x12\x81\x01\n\x0f\x43\x61ncelOperation\x12*.google.longrunning.CancelOperationRequest\x1a\x16.google.protobuf.Empty\"*\x82\xd3\xe4\x93\x02$\"\x1f/v1/{name=operations/**}:cancel:\x01*B\x94\x01\n\x16\x63om.google.longrunningB\x0fOperationsProtoP\x01Z=google.golang.org/genproto/googleapis/longrunning;longrunning\xaa\x02\x12Google.LongRunning\xca\x02\x12Google\\LongRunningb\x06proto3')
+ ,
+ dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
+
+
+
+
+_OPERATION = _descriptor.Descriptor(
+ name='Operation',
+ full_name='google.longrunning.Operation',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='google.longrunning.Operation.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='metadata', full_name='google.longrunning.Operation.metadata', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='done', full_name='google.longrunning.Operation.done', index=2,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='error', full_name='google.longrunning.Operation.error', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='response', full_name='google.longrunning.Operation.response', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ _descriptor.OneofDescriptor(
+ name='result', full_name='google.longrunning.Operation.result',
+ index=0, containing_type=None, fields=[]),
+ ],
+ serialized_start=171,
+ serialized_end=339,
+)
+
+
+_GETOPERATIONREQUEST = _descriptor.Descriptor(
+ name='GetOperationRequest',
+ full_name='google.longrunning.GetOperationRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='google.longrunning.GetOperationRequest.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=341,
+ serialized_end=376,
+)
+
+
+_LISTOPERATIONSREQUEST = _descriptor.Descriptor(
+ name='ListOperationsRequest',
+ full_name='google.longrunning.ListOperationsRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='google.longrunning.ListOperationsRequest.name', index=0,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='filter', full_name='google.longrunning.ListOperationsRequest.filter', index=1,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_size', full_name='google.longrunning.ListOperationsRequest.page_size', index=2,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_token', full_name='google.longrunning.ListOperationsRequest.page_token', index=3,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=378,
+ serialized_end=470,
+)
+
+
+_LISTOPERATIONSRESPONSE = _descriptor.Descriptor(
+ name='ListOperationsResponse',
+ full_name='google.longrunning.ListOperationsResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='operations', full_name='google.longrunning.ListOperationsResponse.operations', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='next_page_token', full_name='google.longrunning.ListOperationsResponse.next_page_token', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=472,
+ serialized_end=572,
+)
+
+
+_CANCELOPERATIONREQUEST = _descriptor.Descriptor(
+ name='CancelOperationRequest',
+ full_name='google.longrunning.CancelOperationRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='google.longrunning.CancelOperationRequest.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=574,
+ serialized_end=612,
+)
+
+
+_DELETEOPERATIONREQUEST = _descriptor.Descriptor(
+ name='DeleteOperationRequest',
+ full_name='google.longrunning.DeleteOperationRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='google.longrunning.DeleteOperationRequest.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=614,
+ serialized_end=652,
+)
+
+_OPERATION.fields_by_name['metadata'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+_OPERATION.fields_by_name['error'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_OPERATION.fields_by_name['response'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+_OPERATION.oneofs_by_name['result'].fields.append(
+ _OPERATION.fields_by_name['error'])
+_OPERATION.fields_by_name['error'].containing_oneof = _OPERATION.oneofs_by_name['result']
+_OPERATION.oneofs_by_name['result'].fields.append(
+ _OPERATION.fields_by_name['response'])
+_OPERATION.fields_by_name['response'].containing_oneof = _OPERATION.oneofs_by_name['result']
+_LISTOPERATIONSRESPONSE.fields_by_name['operations'].message_type = _OPERATION
+DESCRIPTOR.message_types_by_name['Operation'] = _OPERATION
+DESCRIPTOR.message_types_by_name['GetOperationRequest'] = _GETOPERATIONREQUEST
+DESCRIPTOR.message_types_by_name['ListOperationsRequest'] = _LISTOPERATIONSREQUEST
+DESCRIPTOR.message_types_by_name['ListOperationsResponse'] = _LISTOPERATIONSRESPONSE
+DESCRIPTOR.message_types_by_name['CancelOperationRequest'] = _CANCELOPERATIONREQUEST
+DESCRIPTOR.message_types_by_name['DeleteOperationRequest'] = _DELETEOPERATIONREQUEST
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), dict(
+ DESCRIPTOR = _OPERATION,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.Operation)
+ ))
+_sym_db.RegisterMessage(Operation)
+
+GetOperationRequest = _reflection.GeneratedProtocolMessageType('GetOperationRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETOPERATIONREQUEST,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.GetOperationRequest)
+ ))
+_sym_db.RegisterMessage(GetOperationRequest)
+
+ListOperationsRequest = _reflection.GeneratedProtocolMessageType('ListOperationsRequest', (_message.Message,), dict(
+ DESCRIPTOR = _LISTOPERATIONSREQUEST,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsRequest)
+ ))
+_sym_db.RegisterMessage(ListOperationsRequest)
+
+ListOperationsResponse = _reflection.GeneratedProtocolMessageType('ListOperationsResponse', (_message.Message,), dict(
+ DESCRIPTOR = _LISTOPERATIONSRESPONSE,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsResponse)
+ ))
+_sym_db.RegisterMessage(ListOperationsResponse)
+
+CancelOperationRequest = _reflection.GeneratedProtocolMessageType('CancelOperationRequest', (_message.Message,), dict(
+ DESCRIPTOR = _CANCELOPERATIONREQUEST,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.CancelOperationRequest)
+ ))
+_sym_db.RegisterMessage(CancelOperationRequest)
+
+DeleteOperationRequest = _reflection.GeneratedProtocolMessageType('DeleteOperationRequest', (_message.Message,), dict(
+ DESCRIPTOR = _DELETEOPERATIONREQUEST,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.DeleteOperationRequest)
+ ))
+_sym_db.RegisterMessage(DeleteOperationRequest)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.longrunningB\017OperationsProtoP\001Z=google.golang.org/genproto/googleapis/longrunning;longrunning\252\002\022Google.LongRunning\312\002\022Google\\LongRunning'))
+
+_OPERATIONS = _descriptor.ServiceDescriptor(
+ name='Operations',
+ full_name='google.longrunning.Operations',
+ file=DESCRIPTOR,
+ index=0,
+ options=None,
+ serialized_start=655,
+ serialized_end=1179,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='ListOperations',
+ full_name='google.longrunning.Operations.ListOperations',
+ index=0,
+ containing_service=None,
+ input_type=_LISTOPERATIONSREQUEST,
+ output_type=_LISTOPERATIONSRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/v1/{name=operations}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='GetOperation',
+ full_name='google.longrunning.Operations.GetOperation',
+ index=1,
+ containing_service=None,
+ input_type=_GETOPERATIONREQUEST,
+ output_type=_OPERATION,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\022\030/v1/{name=operations/**}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='DeleteOperation',
+ full_name='google.longrunning.Operations.DeleteOperation',
+ index=2,
+ containing_service=None,
+ input_type=_DELETEOPERATIONREQUEST,
+ output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032*\030/v1/{name=operations/**}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='CancelOperation',
+ full_name='google.longrunning.Operations.CancelOperation',
+ index=3,
+ containing_service=None,
+ input_type=_CANCELOPERATIONREQUEST,
+ output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002$\"\037/v1/{name=operations/**}:cancel:\001*')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_OPERATIONS)
+
+DESCRIPTOR.services_by_name['Operations'] = _OPERATIONS
+
+# @@protoc_insertion_point(module_scope)
diff --git a/buildstream/_protos/google/longrunning/operations_pb2_grpc.py b/buildstream/_protos/google/longrunning/operations_pb2_grpc.py
new file mode 100644
index 000000000..8f89862e7
--- /dev/null
+++ b/buildstream/_protos/google/longrunning/operations_pb2_grpc.py
@@ -0,0 +1,132 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+
+class OperationsStub(object):
+ """Manages long-running operations with an API service.
+
+ When an API method normally takes long time to complete, it can be designed
+ to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+ interface to receive the real response asynchronously by polling the
+ operation resource, or pass the operation resource to another API (such as
+ Google Cloud Pub/Sub API) to receive the response. Any API service that
+ returns long-running operations should implement the `Operations` interface
+ so developers can have a consistent client experience.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.ListOperations = channel.unary_unary(
+ '/google.longrunning.Operations/ListOperations',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString,
+ )
+ self.GetOperation = channel.unary_unary(
+ '/google.longrunning.Operations/GetOperation',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
+ self.DeleteOperation = channel.unary_unary(
+ '/google.longrunning.Operations/DeleteOperation',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ )
+ self.CancelOperation = channel.unary_unary(
+ '/google.longrunning.Operations/CancelOperation',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ )
+
+
+class OperationsServicer(object):
+ """Manages long-running operations with an API service.
+
+ When an API method normally takes long time to complete, it can be designed
+ to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+ interface to receive the real response asynchronously by polling the
+ operation resource, or pass the operation resource to another API (such as
+ Google Cloud Pub/Sub API) to receive the response. Any API service that
+ returns long-running operations should implement the `Operations` interface
+ so developers can have a consistent client experience.
+ """
+
+ def ListOperations(self, request, context):
+ """Lists operations that match the specified filter in the request. If the
+ server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+ NOTE: the `name` binding below allows API services to override the binding
+ to use different resource name schemes, such as `users/*/operations`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetOperation(self, request, context):
+ """Gets the latest state of a long-running operation. Clients can use this
+ method to poll the operation result at intervals as recommended by the API
+ service.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def DeleteOperation(self, request, context):
+ """Deletes a long-running operation. This method indicates that the client is
+ no longer interested in the operation result. It does not cancel the
+ operation. If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def CancelOperation(self, request, context):
+ """Starts asynchronous cancellation on a long-running operation. The server
+ makes a best effort to cancel the operation, but success is not
+ guaranteed. If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`. Clients can use
+ [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ other methods to check whether the cancellation succeeded or whether the
+ operation completed despite cancellation. On successful cancellation,
+ the operation is not deleted; instead, it becomes an operation with
+ an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ corresponding to `Code.CANCELLED`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_OperationsServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'ListOperations': grpc.unary_unary_rpc_method_handler(
+ servicer.ListOperations,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.SerializeToString,
+ ),
+ 'GetOperation': grpc.unary_unary_rpc_method_handler(
+ servicer.GetOperation,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ 'DeleteOperation': grpc.unary_unary_rpc_method_handler(
+ servicer.DeleteOperation,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ 'CancelOperation': grpc.unary_unary_rpc_method_handler(
+ servicer.CancelOperation,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'google.longrunning.Operations', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/buildstream/_protos/google/rpc/__init__.py b/buildstream/_protos/google/rpc/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/buildstream/_protos/google/rpc/__init__.py
diff --git a/buildstream/_protos/google/rpc/status.proto b/buildstream/_protos/google/rpc/status.proto
new file mode 100644
index 000000000..0839ee966
--- /dev/null
+++ b/buildstream/_protos/google/rpc/status.proto
@@ -0,0 +1,92 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+import "google/protobuf/any.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/rpc/status;status";
+option java_multiple_files = true;
+option java_outer_classname = "StatusProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// The `Status` type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs. It is used by
+// [gRPC](https://github.com/grpc). The error model is designed to be:
+//
+// - Simple to use and understand for most users
+// - Flexible enough to meet unexpected needs
+//
+// # Overview
+//
+// The `Status` message contains three pieces of data: error code, error message,
+// and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
+// error message should be a developer-facing English message that helps
+// developers *understand* and *resolve* the error. If a localized user-facing
+// error message is needed, put the localized message in the error details or
+// localize it in the client. The optional error details may contain arbitrary
+// information about the error. There is a predefined set of error detail types
+// in the package `google.rpc` that can be used for common error conditions.
+//
+// # Language mapping
+//
+// The `Status` message is the logical representation of the error model, but it
+// is not necessarily the actual wire format. When the `Status` message is
+// exposed in different client libraries and different wire protocols, it can be
+// mapped differently. For example, it will likely be mapped to some exceptions
+// in Java, but more likely mapped to some error codes in C.
+//
+// # Other uses
+//
+// The error model and the `Status` message can be used in a variety of
+// environments, either with or without APIs, to provide a
+// consistent developer experience across different environments.
+//
+// Example uses of this error model include:
+//
+// - Partial errors. If a service needs to return partial errors to the client,
+// it may embed the `Status` in the normal response to indicate the partial
+// errors.
+//
+// - Workflow errors. A typical workflow has multiple steps. Each step may
+// have a `Status` message for error reporting.
+//
+// - Batch operations. If a client uses batch request and batch response, the
+// `Status` message should be used directly inside batch response, one for
+// each error sub-response.
+//
+// - Asynchronous operations. If an API call embeds asynchronous operation
+// results in its response, the status of those operations should be
+// represented directly using the `Status` message.
+//
+// - Logging. If some API errors are stored in logs, the message `Status` could
+// be used directly after any stripping needed for security/privacy reasons.
+message Status {
+ // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+ int32 code = 1;
+
+ // A developer-facing error message, which should be in English. Any
+ // user-facing error message should be localized and sent in the
+ // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+ string message = 2;
+
+ // A list of messages that carry the error details. There is a common set of
+ // message types for APIs to use.
+ repeated google.protobuf.Any details = 3;
+}
diff --git a/buildstream/_protos/google/rpc/status_pb2.py b/buildstream/_protos/google/rpc/status_pb2.py
new file mode 100644
index 000000000..6c4772311
--- /dev/null
+++ b/buildstream/_protos/google/rpc/status_pb2.py
@@ -0,0 +1,88 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/rpc/status.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/rpc/status.proto',
+ package='google.rpc',
+ syntax='proto3',
+ serialized_pb=_b('\n\x17google/rpc/status.proto\x12\ngoogle.rpc\x1a\x19google/protobuf/any.proto\"N\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12%\n\x07\x64\x65tails\x18\x03 \x03(\x0b\x32\x14.google.protobuf.AnyB^\n\x0e\x63om.google.rpcB\x0bStatusProtoP\x01Z7google.golang.org/genproto/googleapis/rpc/status;status\xa2\x02\x03RPCb\x06proto3')
+ ,
+ dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
+
+
+
+
+_STATUS = _descriptor.Descriptor(
+ name='Status',
+ full_name='google.rpc.Status',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='code', full_name='google.rpc.Status.code', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='message', full_name='google.rpc.Status.message', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='details', full_name='google.rpc.Status.details', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=66,
+ serialized_end=144,
+)
+
+_STATUS.fields_by_name['details'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+DESCRIPTOR.message_types_by_name['Status'] = _STATUS
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), dict(
+ DESCRIPTOR = _STATUS,
+ __module__ = 'google.rpc.status_pb2'
+ # @@protoc_insertion_point(class_scope:google.rpc.Status)
+ ))
+_sym_db.RegisterMessage(Status)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.rpcB\013StatusProtoP\001Z7google.golang.org/genproto/googleapis/rpc/status;status\242\002\003RPC'))
+# @@protoc_insertion_point(module_scope)
diff --git a/buildstream/_protos/google/rpc/status_pb2_grpc.py b/buildstream/_protos/google/rpc/status_pb2_grpc.py
new file mode 100644
index 000000000..a89435267
--- /dev/null
+++ b/buildstream/_protos/google/rpc/status_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/buildstream/_signals.py b/buildstream/_signals.py
index f1e520d37..0035485a5 100644
--- a/buildstream/_signals.py
+++ b/buildstream/_signals.py
@@ -19,6 +19,7 @@
import os
import signal
import sys
+import threading
import traceback
from contextlib import contextmanager, ExitStack
from collections import deque
@@ -71,6 +72,11 @@ def terminator_handler(signal_, frame):
def terminator(terminate_func):
global terminator_stack # pylint: disable=global-statement
+ # Signal handling only works in the main thread
+ if threading.current_thread() != threading.main_thread():
+ yield
+ return
+
outermost = False if terminator_stack else True
terminator_stack.append(terminate_func)
diff --git a/doc/Makefile b/doc/Makefile
index eaef15a61..3557ac505 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -76,7 +76,7 @@ clean: templates-clean sessions-clean
templates:
mkdir -p source/elements
mkdir -p source/sources
- $(SPHINXAPIDOC) --force --separate --module-first --no-headings --no-toc -o source $(CURDIR)/../buildstream
+ $(SPHINXAPIDOC) --force --separate --module-first --no-headings --no-toc -o source $(CURDIR)/../buildstream *_pb2*.py
$(call plugin-doc-skeleton,$(CURDIR)/../buildstream/plugins/elements,elements)
$(call plugin-doc-skeleton,$(CURDIR)/../buildstream/plugins/sources,sources)
diff --git a/doc/source/install_artifacts.rst b/doc/source/install_artifacts.rst
index e0808e8fe..e96ca972c 100644
--- a/doc/source/install_artifacts.rst
+++ b/doc/source/install_artifacts.rst
@@ -25,12 +25,11 @@ Users can declare additional remote caches in the :ref:`user configuration
define its own cache, it may be useful to have a local mirror of its cache, or
you may have a reason to share artifacts privately.
-Remote artifact caches are identified by their URL. There are currently three
+Remote artifact caches are identified by their URL. There are currently two
supported protocols:
-* ``http``: Pull-only access, without transport-layer security
-* ``https``: Pull-only access, with transport-layer security
-* ``ssh``: Push access, authenticated via SSH
+* ``http``: Pull and push access, without transport-layer security
+* ``https``: Pull and push access, with transport-layer security
BuildStream allows you to configure as many caches as you like, and will query
them in a specific order:
@@ -54,17 +53,23 @@ The rest of this page outlines how to set up a shared artifact cache.
Setting up the user
~~~~~~~~~~~~~~~~~~~
-A specific user is not needed for downloading artifacts, but since we
-are going to use ssh to upload the artifacts, you will want a dedicated
-user to own the artifact cache.
+A specific user is not needed, however, a dedicated user to own the
+artifact cache is recommended.
.. code:: bash
useradd artifacts
+The recommended approach is to run two instances on different ports.
+One instance has push disabled and doesn't require client authentication.
+The other instance has push enabled and requires client authentication.
-Installing the receiver
-~~~~~~~~~~~~~~~~~~~~~~~
+Alternatively, you can set up a reverse proxy and handle authentication
+and authorization there.
+
+
+Installing the server
+~~~~~~~~~~~~~~~~~~~~~
You will also need to install BuildStream on the artifact server in order
to receive uploaded artifacts over ssh. Follow the instructions for installing
BuildStream :ref:`here <install>`
@@ -74,10 +79,10 @@ in a system wide location, with ``pip3 install .`` in the BuildStream
checkout directory.
Otherwise, some tinkering is required to ensure BuildStream is available
-in ``PATH`` when it's companion ``bst-artifact-receive`` program is run
+in ``PATH`` when it's companion ``bst-artifact-server`` program is run
remotely.
-You can install only the artifact receiver companion program without
+You can install only the artifact server companion program without
requiring BuildStream's more exigent dependencies by setting the
``BST_ARTIFACTS_ONLY`` environment variable at install time, like so:
@@ -86,81 +91,57 @@ requiring BuildStream's more exigent dependencies by setting the
BST_ARTIFACTS_ONLY=1 pip3 install .
-Initializing the cache
-~~~~~~~~~~~~~~~~~~~~~~
-Now that you have a dedicated user to own the artifact cache, change
-to that user, and create the artifact cache ostree repository directly
-in it's home directory as such:
+Command reference
+~~~~~~~~~~~~~~~~~
-.. code:: bash
+.. click:: buildstream._artifactcache.casserver:server_main
+ :prog: bst-artifact-server
- ostree init --mode archive-z2 --repo artifacts
-This should result in an artifact cache residing at the path ``/home/artifacts/artifacts``
+Key pair for the server
+~~~~~~~~~~~~~~~~~~~~~~~
+For TLS you need a key pair for the server. The following example creates
+a self-signed key, which requires clients to have a copy of the server certificate
+(e.g., in the project directory).
+You can also use a key pair obtained from a trusted certificate authority instead.
-Serve the cache over https
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-This part should be pretty simple, you can do this with various technologies, all
-we really require is that you make the artifacts available over https (you can use
-http but until we figure out using gpg signed ostree commits for the artifacts, it's
-better to serve over https).
+.. code:: bash
-Here is an example, note that you must have a certificate **pem** file to use, as
-is the case for hosting anything over https.
+ openssl req -new -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -batch -subj "/CN=artifacts.com" -out server.crt -keyout server.key
-.. code:: python
- import http.server, ssl, os
+Authenticating users
+~~~~~~~~~~~~~~~~~~~~
+In order to give permission to a given user to upload
+artifacts, create a TLS key pair on the client.
- # Maybe use a custom port, especially if you are serving
- # other web pages on the same computer
- server_address = ('localhost', 443)
- artifact_path = '/home/artifacts'
+.. code:: bash
- # The http server will serve from it's current
- # working directory
- os.chdir(artifact_path)
+ openssl req -new -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -batch -subj "/CN=client" -out client.crt -keyout client.key
- # Create Server
- httpd = http.server.HTTPServer(
- server_address,
- http.server.SimpleHTTPRequestHandler)
+Copy the public client certificate ``client.crt`` to the server and then add it
+to the authorized keys, like so:
- # Add ssl
- httpd.socket = ssl.wrap_socket(httpd.socket,
- server_side=True,
- certfile='localhost.pem',
- ssl_version=ssl.PROTOCOL_TLSv1)
+.. code:: bash
- # Run it
- httpd.serve_forever()
+ cat client.crt >> /home/artifacts/authorized.crt
-Configure and run sshd
-~~~~~~~~~~~~~~~~~~~~~~
-You will need to run the sshd service to allow uploading artifacts.
+Serve the cache over https
+~~~~~~~~~~~~~~~~~~~~~~~~~~
-For this you will want something like the following in your ``/etc/ssh/sshd_config``
+Public instance without push:
.. code:: bash
- # Allow ssh logins/commands with the artifacts user
- AllowUsers artifacts
+ bst-artifact-server --port 11001 --server-key server.key --server-cert server.crt /home/artifacts/artifacts
- # Some specifics for the artifacts user
- Match user artifacts
+Instance with push and requiring client authentication:
- # Dont allow password authentication for artifacts user
- #
- PasswordAuthentication no
+.. code:: bash
- # Also lets dedicate this login for only running the
- # bst-artifact-receive program, note that the full
- # command must be specified here; 'artifacts' is
- # the HOME relative path to the artifact cache.
- # The exact pull URL must also be specified.
- ForceCommand bst-artifact-receive --pull-url https://example.com/artifacts --verbose artifacts
+ bst-artifact-server --port 11002 --server-key server.key --server-cert server.crt --client-certs authorized.crt --enable-push /home/artifacts/artifacts
User configuration
@@ -172,6 +153,8 @@ Assuming you have the same setup used in this document, and that your
host is reachable on the internet as ``artifacts.com`` (for example),
then a user can use the following user configuration:
+Pull-only:
+
.. code:: yaml
#
@@ -179,22 +162,27 @@ then a user can use the following user configuration:
#
artifacts:
- url: https://artifacts.com/artifacts
+ url: https://artifacts.com:11001
- # Alternative form if you have push access to the cache
- #url: ssh://artifacts@artifacts.com:22200/artifacts
- #push: true
+ # Optional server certificate if not trusted by system root certificates
+ server-cert: server.crt
+Pull and push:
-Authenticating users
-~~~~~~~~~~~~~~~~~~~~
-In order to give permission to a given user to upload
-artifacts, simply use the regular ``ssh`` method.
+.. code:: yaml
-First obtain the user's public ssh key, and add it
-to the authorized keys, like so:
+ #
+ # Artifacts
+ #
+ artifacts:
-.. code:: bash
+ url: https://artifacts.com:11002
+
+ # Optional server certificate if not trusted by system root certificates
+ server-cert: server.crt
- cat user_id_rsa.pub >> /home/artifacts/.ssh/authorized_keys
+ # Optional client key pair for authentication
+ client-key: client.key
+ client-cert: client.crt
+ push: true
diff --git a/setup.cfg b/setup.cfg
index e0b3c9970..d37db7839 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -23,5 +23,7 @@ pep8ignore =
*/bin/* ALL
buildstream/_fuse/fuse.py ALL
.eggs/* ALL
+ *_pb2.py ALL
+ *_pb2_grpc.py ALL
env =
D:BST_TEST_SUITE=True
diff --git a/setup.py b/setup.py
index 1f9ff0081..50761b6ae 100755
--- a/setup.py
+++ b/setup.py
@@ -19,6 +19,7 @@
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
import os
+import re
import shutil
import subprocess
import sys
@@ -29,7 +30,7 @@ if sys.version_info[0] != 3 or sys.version_info[1] < 4:
sys.exit(1)
try:
- from setuptools import setup, find_packages
+ from setuptools import setup, find_packages, Command
from setuptools.command.easy_install import ScriptWriter
except ImportError:
print("BuildStream requires setuptools in order to build. Install it using"
@@ -82,47 +83,6 @@ def assert_bwrap():
exit_bwrap("Bubblewrap too old")
-##################################################################
-# OSTree version requirements
-##################################################################
-REQUIRED_OSTREE_YEAR = 2017
-REQUIRED_OSTREE_RELEASE = 8
-
-
-def exit_ostree(reason):
- print(reason +
- "\nBuildStream requires OSTree >= v{}.{} with Python bindings. "
- .format(REQUIRED_OSTREE_YEAR, REQUIRED_OSTREE_RELEASE) +
- "Install it using your package manager (usually ostree or gir1.2-ostree-1.0).")
- sys.exit(1)
-
-
-def assert_ostree_version():
- platform = os.environ.get('BST_FORCE_BACKEND', '') or sys.platform
- if platform.startswith('linux'):
- try:
- import gi
- except ImportError:
- print("BuildStream requires PyGObject (aka PyGI). Install it using"
- " your package manager (usually pygobject3 or python-gi).")
- sys.exit(1)
-
- try:
- gi.require_version('OSTree', '1.0')
- from gi.repository import OSTree
- except ValueError:
- exit_ostree("OSTree not found")
-
- try:
- if OSTree.YEAR_VERSION < REQUIRED_OSTREE_YEAR or \
- (OSTree.YEAR_VERSION == REQUIRED_OSTREE_YEAR and
- OSTree.RELEASE_VERSION < REQUIRED_OSTREE_RELEASE):
- exit_ostree("OSTree v{}.{} is too old."
- .format(OSTree.YEAR_VERSION, OSTree.RELEASE_VERSION))
- except AttributeError:
- exit_ostree("OSTree is too old.")
-
-
###########################################
# List the pre-built man pages to install #
###########################################
@@ -154,13 +114,12 @@ def list_man_pages():
# So screw it, lets just use an env var.
bst_install_entry_points = {
'console_scripts': [
- 'bst-artifact-receive = buildstream._artifactcache.pushreceive:receive_main'
+ 'bst-artifact-server = buildstream._artifactcache.casserver:server_main'
],
}
if not os.environ.get('BST_ARTIFACTS_ONLY', ''):
assert_bwrap()
- assert_ostree_version()
bst_install_entry_points['console_scripts'] += [
'bst = buildstream._frontend:cli'
]
@@ -206,12 +165,66 @@ ScriptWriter.get_args = get_args
#####################################################
+# gRPC command for code generation #
+#####################################################
+class BuildGRPC(Command):
+ """Command to generate project *_pb2.py modules from proto files."""
+
+ description = 'build gRPC protobuf modules'
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ try:
+ import grpc_tools.command
+ except ImportError:
+ print("BuildStream requires grpc_tools in order to build gRPC modules.\n"
+ "Install it via pip (pip3 install grpcio-tools).")
+ exit(1)
+
+ protos_root = 'buildstream/_protos'
+
+ grpc_tools.command.build_package_protos(protos_root)
+
+ # Postprocess imports in generated code
+ for root, _, files in os.walk(protos_root):
+ for filename in files:
+ if filename.endswith('.py'):
+ path = os.path.join(root, filename)
+ with open(path, 'r') as f:
+ code = f.read()
+
+ # All protos are in buildstream._protos
+ code = re.sub(r'^from ', r'from buildstream._protos.',
+ code, flags=re.MULTILINE)
+ # Except for the core google.protobuf protos
+ code = re.sub(r'^from buildstream._protos.google.protobuf', r'from google.protobuf',
+ code, flags=re.MULTILINE)
+
+ with open(path, 'w') as f:
+ f.write(code)
+
+
+def get_cmdclass():
+ cmdclass = {
+ 'build_grpc': BuildGRPC,
+ }
+ cmdclass.update(versioneer.get_cmdclass())
+ return cmdclass
+
+
+#####################################################
# Main setup() Invocation #
#####################################################
setup(name='BuildStream',
# Use versioneer
version=versioneer.get_version(),
- cmdclass=versioneer.get_cmdclass(),
+ cmdclass=get_cmdclass(),
description='A framework for modelling build pipelines in YAML',
license='LGPL',
@@ -243,6 +256,8 @@ setup(name='BuildStream',
'Click',
'blessings',
'jinja2 >= 2.10',
+ 'protobuf >= 3.5',
+ 'grpcio',
],
entry_points=bst_install_entry_points,
setup_requires=['pytest-runner'],
diff --git a/tests/artifactcache/junctions.py b/tests/artifactcache/junctions.py
index 12423f937..378d007a0 100644
--- a/tests/artifactcache/junctions.py
+++ b/tests/artifactcache/junctions.py
@@ -2,7 +2,6 @@ import os
import shutil
import pytest
from tests.testutils import cli, create_artifact_share
-from tests.testutils.site import IS_LINUX
from buildstream import _yaml
@@ -37,60 +36,53 @@ def project_set_artifacts(project, url):
_yaml.dump(_yaml.node_sanitize(project_config), filename=project_conf_file)
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
@pytest.mark.datafiles(DATA_DIR)
def test_push_pull(cli, tmpdir, datafiles):
project = os.path.join(str(datafiles), 'foo')
base_project = os.path.join(str(project), 'base')
- share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare-foo'))
- base_share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare-base'))
-
- # First build it without the artifact cache configured
- result = cli.run(project=project, args=['build', 'target.bst'])
- assert result.exit_code == 0
-
- # Assert that we are now cached locally
- state = cli.get_element_state(project, 'target.bst')
- assert state == 'cached'
- state = cli.get_element_state(base_project, 'target.bst')
- assert state == 'cached'
-
- project_set_artifacts(project, share.repo)
- project_set_artifacts(base_project, base_share.repo)
-
- # Now try bst push
- result = cli.run(project=project, args=['push', '--deps', 'all', 'target.bst'])
- assert result.exit_code == 0
-
- # And finally assert that the artifacts are in the right shares
- assert_shared(cli, share, 'foo', project, 'target.bst')
- assert_shared(cli, base_share, 'base', base_project, 'target.bst')
-
- # Make sure we update the summary in our artifact shares,
- # we dont have a real server around to do it
- #
- share.update_summary()
- base_share.update_summary()
-
- # Now we've pushed, delete the user's local artifact cache
- # directory and try to redownload it from the share
- #
- artifacts = os.path.join(cli.directory, 'artifacts')
- shutil.rmtree(artifacts)
-
- # Assert that nothing is cached locally anymore
- state = cli.get_element_state(project, 'target.bst')
- assert state != 'cached'
- state = cli.get_element_state(base_project, 'target.bst')
- assert state != 'cached'
-
- # Now try bst pull
- result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
- assert result.exit_code == 0
-
- # And assert that they are again in the local cache, without having built
- state = cli.get_element_state(project, 'target.bst')
- assert state == 'cached'
- state = cli.get_element_state(base_project, 'target.bst')
- assert state == 'cached'
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare-foo')) as share,\
+ create_artifact_share(os.path.join(str(tmpdir), 'artifactshare-base')) as base_share:
+
+ # First build it without the artifact cache configured
+ result = cli.run(project=project, args=['build', 'target.bst'])
+ assert result.exit_code == 0
+
+ # Assert that we are now cached locally
+ state = cli.get_element_state(project, 'target.bst')
+ assert state == 'cached'
+ state = cli.get_element_state(base_project, 'target.bst')
+ assert state == 'cached'
+
+ project_set_artifacts(project, share.repo)
+ project_set_artifacts(base_project, base_share.repo)
+
+ # Now try bst push
+ result = cli.run(project=project, args=['push', '--deps', 'all', 'target.bst'])
+ assert result.exit_code == 0
+
+ # And finally assert that the artifacts are in the right shares
+ assert_shared(cli, share, 'foo', project, 'target.bst')
+ assert_shared(cli, base_share, 'base', base_project, 'target.bst')
+
+ # Now we've pushed, delete the user's local artifact cache
+ # directory and try to redownload it from the share
+ #
+ artifacts = os.path.join(cli.directory, 'artifacts')
+ shutil.rmtree(artifacts)
+
+ # Assert that nothing is cached locally anymore
+ state = cli.get_element_state(project, 'target.bst')
+ assert state != 'cached'
+ state = cli.get_element_state(base_project, 'target.bst')
+ assert state != 'cached'
+
+ # Now try bst pull
+ result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
+ assert result.exit_code == 0
+
+ # And assert that they are again in the local cache, without having built
+ state = cli.get_element_state(project, 'target.bst')
+ assert state == 'cached'
+ state = cli.get_element_state(base_project, 'target.bst')
+ assert state == 'cached'
diff --git a/tests/artifactcache/tar.py b/tests/artifactcache/tar.py
deleted file mode 100644
index ef39be31c..000000000
--- a/tests/artifactcache/tar.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import os
-import tarfile
-import tempfile
-from contextlib import ExitStack
-
-import pytest
-
-from buildstream._artifactcache.tarcache import _Tar
-from buildstream import utils, ProgramNotFoundError
-
-
-# Test that it 'works' - this may be equivalent to test_archive_no_tar()
-# on some systems.
-def test_archive_default():
- with ExitStack() as stack:
- src = stack.enter_context(tempfile.TemporaryDirectory())
- tar_dir = stack.enter_context(tempfile.TemporaryDirectory())
- scratch = stack.enter_context(tempfile.TemporaryDirectory())
- test_file = stack.enter_context(open(os.path.join(src, 'test'), 'a'))
- test_file.write('Test')
-
- _Tar.archive(os.path.join(tar_dir, 'test.tar'), '.', src)
-
- with tarfile.open(os.path.join(tar_dir, 'test.tar')) as tar:
- tar.extractall(path=scratch)
-
- assert os.listdir(scratch) == os.listdir(src)
-
-
-def test_archive_no_tar():
- # Modify the path to exclude 'tar'
- old_path = os.environ.get('PATH')
- os.environ['PATH'] = ''
-
- # Ensure we can't find 'tar' or 'gtar'
- try:
- for tar in ['gtar', 'tar']:
- with pytest.raises(ProgramNotFoundError):
- utils.get_host_tool(tar)
-
- # Run the same test as before, this time 'tar' should not be available
- test_archive_default()
-
- # Reset the environment
- finally:
- os.environ['PATH'] = old_path
-
-
-# Same thing as test_archive_default()
-def test_extract_default():
- with ExitStack() as stack:
- src = stack.enter_context(tempfile.TemporaryDirectory())
- tar_dir = stack.enter_context(tempfile.TemporaryDirectory())
- scratch = stack.enter_context(tempfile.TemporaryDirectory())
- test_file = stack.enter_context(open(os.path.join(src, 'test'), 'a'))
- test_file.write('Test')
-
- with tarfile.open(os.path.join(tar_dir, 'test.tar'), 'a:') as tar:
- tar.add(src, 'contents')
-
- _Tar.extract(os.path.join(tar_dir, 'test.tar'), scratch)
-
- assert os.listdir(os.path.join(scratch, 'contents')) == os.listdir(src)
-
-
-def test_extract_no_tar():
- # Modify the path to exclude 'tar'
- old_path = os.environ.get('PATH')
- os.environ['PATH'] = ''
-
- # Ensure we can't find 'tar' or 'gtar'
- for tar in ['gtar', 'tar']:
- with pytest.raises(ProgramNotFoundError):
- utils.get_host_tool(tar)
-
- # Run the same test as before, this time 'tar' should not be available
- try:
- test_extract_default()
-
- # Reset the environment
- finally:
- os.environ['PATH'] = old_path
diff --git a/tests/cachekey/project/elements/build1.expected b/tests/cachekey/project/elements/build1.expected
index ab8adf225..7c5af6054 100644
--- a/tests/cachekey/project/elements/build1.expected
+++ b/tests/cachekey/project/elements/build1.expected
@@ -1 +1 @@
-93594f53df6c599598ea9c1d5101a8f7e57bbd82cac521494ce680e6f84de67d \ No newline at end of file
+3db51572837956b28ffbc4aabdce659b4a1d91dcbb8b75954210346959ed5fa9 \ No newline at end of file
diff --git a/tests/cachekey/project/elements/build2.expected b/tests/cachekey/project/elements/build2.expected
index 94990176a..e1bd91218 100644
--- a/tests/cachekey/project/elements/build2.expected
+++ b/tests/cachekey/project/elements/build2.expected
@@ -1 +1 @@
-3ae596efed1126d440780ef33d2144a06cb7215a778c4f59b12a2f77fa0ee3b2 \ No newline at end of file
+bcde6fc389b7d8bb7788989b68f68653ab8ed658117012c0611f218f4a585d38 \ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose1.expected b/tests/cachekey/project/elements/compose1.expected
index e912fbe62..86a2a2f2a 100644
--- a/tests/cachekey/project/elements/compose1.expected
+++ b/tests/cachekey/project/elements/compose1.expected
@@ -1 +1 @@
-d67fccd867504706010f9f36b07cd35b3129e9d79ae287c3dc2bf9ec03e309ea \ No newline at end of file
+6736bbcc055e1801a19288d3a64b622e0b9223164f8ad2ce842b18a4eaa0cfb9 \ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose2.expected b/tests/cachekey/project/elements/compose2.expected
index 4c3b901b1..a811cc421 100644
--- a/tests/cachekey/project/elements/compose2.expected
+++ b/tests/cachekey/project/elements/compose2.expected
@@ -1 +1 @@
-743eaac4f261d389d2c12fb9c8605eb70d5e42c8a0bccadef9f651dd137cedde \ No newline at end of file
+9294428a0b5c0d44fdb3ab0f883ee87f9e62d51f96c7de1e5e81ed5e3934d403 \ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose3.expected b/tests/cachekey/project/elements/compose3.expected
index 85843a190..ce28c853a 100644
--- a/tests/cachekey/project/elements/compose3.expected
+++ b/tests/cachekey/project/elements/compose3.expected
@@ -1 +1 @@
-5b401864d1d91809f59c258d37f78b410b244fcb20cab4bd0c1da17257515643 \ No newline at end of file
+4f1569b9a6317280e6299f9f7f706a6adcc89603030cde51d529dd6dfe2851be \ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose4.expected b/tests/cachekey/project/elements/compose4.expected
index 38060ae5a..8d95a3d87 100644
--- a/tests/cachekey/project/elements/compose4.expected
+++ b/tests/cachekey/project/elements/compose4.expected
@@ -1 +1 @@
-450664eb37302835e3289b95dfb38cab0b24e6c30c4b7b59a5dc1b5a7f1f01e0 \ No newline at end of file
+4c83744bec21c8c38bce2d48396b8df1eb4df7b2f155424016bd012743efd808 \ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose5.expected b/tests/cachekey/project/elements/compose5.expected
index 2f6307ca9..183534aa4 100644
--- a/tests/cachekey/project/elements/compose5.expected
+++ b/tests/cachekey/project/elements/compose5.expected
@@ -1 +1 @@
-fedaf8a315f8a9fb94d11c6f74a409188ff9397eac710e5ba6d9532162bd6973 \ No newline at end of file
+97385aa2192ef0295dd2601e78491d8bdf6b74e98938d0f8011747c2caf3a5c6 \ No newline at end of file
diff --git a/tests/cachekey/project/elements/import1.expected b/tests/cachekey/project/elements/import1.expected
index 4669ed485..387da88b7 100644
--- a/tests/cachekey/project/elements/import1.expected
+++ b/tests/cachekey/project/elements/import1.expected
@@ -1 +1 @@
-20582fab199a8d110fd65b5616f45bc08ae3eccc7bfe8b94ba987f3986b69ce5 \ No newline at end of file
+99c8f61d415de3a6c96e48299fda5554bf4bbaf56bb4b5acd85861ab37ede0c3 \ No newline at end of file
diff --git a/tests/cachekey/project/elements/import2.expected b/tests/cachekey/project/elements/import2.expected
index 2b071ac53..0893dde2a 100644
--- a/tests/cachekey/project/elements/import2.expected
+++ b/tests/cachekey/project/elements/import2.expected
@@ -1 +1 @@
-4fcc04697288b0fdc0785b7350c308c3b40177d2ad0ec47ee4e59afbbe7634a9 \ No newline at end of file
+5f5884c5e4bb7066eede3a135e49753ec06b757a30983513a7a4e0cdd2a8f402 \ No newline at end of file
diff --git a/tests/cachekey/project/elements/import3.expected b/tests/cachekey/project/elements/import3.expected
index 538daae37..6d0fe864a 100644
--- a/tests/cachekey/project/elements/import3.expected
+++ b/tests/cachekey/project/elements/import3.expected
@@ -1 +1 @@
-203a3749724d461a237f22ff261870616cedfe34bfb59603c935fd05644059b3 \ No newline at end of file
+e11f93ec629bc3556e15bd374e67a0b5e34350e1e9b1d1f98f8de984a27bbead \ No newline at end of file
diff --git a/tests/cachekey/project/elements/script1.expected b/tests/cachekey/project/elements/script1.expected
index cf1213901..e8d5b24c4 100644
--- a/tests/cachekey/project/elements/script1.expected
+++ b/tests/cachekey/project/elements/script1.expected
@@ -1 +1 @@
-93de2701d76db777a560e1e531883b7922b07683d4e7c14ea26b0500946f2c62 \ No newline at end of file
+d8388b756de5c8441375ba32cedd9560a65a8f9a85e41038837d342c8fb10004 \ No newline at end of file
diff --git a/tests/cachekey/project/sources/bzr1.expected b/tests/cachekey/project/sources/bzr1.expected
index 0e2a851e5..ca11c959a 100644
--- a/tests/cachekey/project/sources/bzr1.expected
+++ b/tests/cachekey/project/sources/bzr1.expected
@@ -1 +1 @@
-8509b1e54cc11bc2681425a11498037ad3841295c26fec86ff61a6b09d83e10a \ No newline at end of file
+519ee88fcca7fea091245713ec68baa048e3d876ea22559d4b2035d3d2ab2494 \ No newline at end of file
diff --git a/tests/cachekey/project/sources/git1.expected b/tests/cachekey/project/sources/git1.expected
index 07fc21cfd..85dc88500 100644
--- a/tests/cachekey/project/sources/git1.expected
+++ b/tests/cachekey/project/sources/git1.expected
@@ -1 +1 @@
-c1931acaea82971f1fc243dbe035a228c6103d52e09e618c7eda85f141c726cc \ No newline at end of file
+a5424aa7cc25f0ada9ac1245b33d55d078559ae6c50b10bea3db9acb964b058c \ No newline at end of file
diff --git a/tests/cachekey/project/sources/git2.expected b/tests/cachekey/project/sources/git2.expected
index b08e08cf7..9a643c000 100644
--- a/tests/cachekey/project/sources/git2.expected
+++ b/tests/cachekey/project/sources/git2.expected
@@ -1 +1 @@
-6d1ee891d29e0af504ed59ccd46c653b74946d3778d7e941f4d8b6e68cf3ca50 \ No newline at end of file
+93bf7344c118664f0d7f2b8e5a6731b2a95de6df83ba7fa2a2ab28227b0b3e8b \ No newline at end of file
diff --git a/tests/cachekey/project/sources/local1.expected b/tests/cachekey/project/sources/local1.expected
index 4669ed485..387da88b7 100644
--- a/tests/cachekey/project/sources/local1.expected
+++ b/tests/cachekey/project/sources/local1.expected
@@ -1 +1 @@
-20582fab199a8d110fd65b5616f45bc08ae3eccc7bfe8b94ba987f3986b69ce5 \ No newline at end of file
+99c8f61d415de3a6c96e48299fda5554bf4bbaf56bb4b5acd85861ab37ede0c3 \ No newline at end of file
diff --git a/tests/cachekey/project/sources/local2.expected b/tests/cachekey/project/sources/local2.expected
index 4a0796ec2..598fe73ba 100644
--- a/tests/cachekey/project/sources/local2.expected
+++ b/tests/cachekey/project/sources/local2.expected
@@ -1 +1 @@
-527685945072d971075edf6e4a06ce7146ef1cd023da0001c6e1613d525c76aa \ No newline at end of file
+780a7e62bbe5bc0f975ec6cd749de6a85f9080d3628f16f881605801597916a7 \ No newline at end of file
diff --git a/tests/cachekey/project/sources/ostree1.expected b/tests/cachekey/project/sources/ostree1.expected
index 5b4bf12e9..0e8e83014 100644
--- a/tests/cachekey/project/sources/ostree1.expected
+++ b/tests/cachekey/project/sources/ostree1.expected
@@ -1 +1 @@
-b78e79c5ba297cf5cb41d6eaa5f4ca170216c967b84935364d30938021202341 \ No newline at end of file
+9b06b6e0c213a5475d2b0fcfee537c41dbec579e6109e95f7e7aeb0488f079f6 \ No newline at end of file
diff --git a/tests/cachekey/project/sources/patch1.expected b/tests/cachekey/project/sources/patch1.expected
index a04b8fd40..d7cf73c34 100644
--- a/tests/cachekey/project/sources/patch1.expected
+++ b/tests/cachekey/project/sources/patch1.expected
@@ -1 +1 @@
-84830ad8577e5fa5a9dab14ce3f995b4dc16699aebc33122aa2dc5fade34528d \ No newline at end of file
+d5b0f1fa5b4e3e7aa617de303125268c7a7461e415ecf1eccc8aee2cda56897e \ No newline at end of file
diff --git a/tests/cachekey/project/sources/patch2.expected b/tests/cachekey/project/sources/patch2.expected
index 3fafb87b8..56a92dc8e 100644
--- a/tests/cachekey/project/sources/patch2.expected
+++ b/tests/cachekey/project/sources/patch2.expected
@@ -1 +1 @@
-1d137c65e7f2f9c8a0a74a46461dfe9ba5c675d53a1ff96a4bf15f0889891883 \ No newline at end of file
+6decb6b49e48a5869b2a438254c911423275662aff73348cd95e64148011c097 \ No newline at end of file
diff --git a/tests/cachekey/project/sources/patch3.expected b/tests/cachekey/project/sources/patch3.expected
index 6a62b7049..f1257bb31 100644
--- a/tests/cachekey/project/sources/patch3.expected
+++ b/tests/cachekey/project/sources/patch3.expected
@@ -1 +1 @@
-fd1f209c8f44fd629fb5201d6f299c47567b64828235b470b2ff8ff6edba4478 \ No newline at end of file
+ab91e0ab9e167c4e9d31480c96a6a91a47ff27246f4eeff4ce6b671cbd865901 \ No newline at end of file
diff --git a/tests/cachekey/project/sources/tar1.expected b/tests/cachekey/project/sources/tar1.expected
index 5b52a4c00..ab0bd56ea 100644
--- a/tests/cachekey/project/sources/tar1.expected
+++ b/tests/cachekey/project/sources/tar1.expected
@@ -1 +1 @@
-003d5c53c81ab4bf7e375c4e9704bdbc260473fecb334c9f78ed24ec5c1a908e \ No newline at end of file
+ccb35d04789b0d83fd93a6c2f8688c4abfe20f5bc77420f63054893450b2a832 \ No newline at end of file
diff --git a/tests/cachekey/project/sources/tar2.expected b/tests/cachekey/project/sources/tar2.expected
index d823bdee0..03241f460 100644
--- a/tests/cachekey/project/sources/tar2.expected
+++ b/tests/cachekey/project/sources/tar2.expected
@@ -1 +1 @@
-f501ed7c8df19071712634049fed1a1fb22fbeb6f27973595bc8139e56c6c446 \ No newline at end of file
+441c80ed92c77df8247344337f470ac7ab7fe91d2fe3900b498708b0faeac4b5 \ No newline at end of file
diff --git a/tests/cachekey/project/sources/zip1.expected b/tests/cachekey/project/sources/zip1.expected
index 64c0655b4..a3ac93ecf 100644
--- a/tests/cachekey/project/sources/zip1.expected
+++ b/tests/cachekey/project/sources/zip1.expected
@@ -1 +1 @@
-6a3c3a788c6a6ddae204a013d0622b6c352a91ff31cdf6d652b96ad0ac5eda52 \ No newline at end of file
+be47de64162c9cce0322d0af327092c7afc3a890ba9d6ef92eef016dcced5bae \ No newline at end of file
diff --git a/tests/cachekey/project/sources/zip2.expected b/tests/cachekey/project/sources/zip2.expected
index 64bb77289..49bd45fd0 100644
--- a/tests/cachekey/project/sources/zip2.expected
+++ b/tests/cachekey/project/sources/zip2.expected
@@ -1 +1 @@
-50a555bf892822b8f5e4d59b940ba4359afe8e6d01dff013d918a3befd9c3d8f \ No newline at end of file
+bedd330938f9405e2febcf1de8428b7180eb62ab73f8e31e49871874ae351735 \ No newline at end of file
diff --git a/tests/cachekey/project/target.expected b/tests/cachekey/project/target.expected
index dcb6a6642..4f4c7c1f8 100644
--- a/tests/cachekey/project/target.expected
+++ b/tests/cachekey/project/target.expected
@@ -1 +1 @@
-0de68ec99d39b12857a5350ebfdc7f49fdde9a3457a31b2330896307fb503f7b \ No newline at end of file
+a408b3e4b6ba4d6a6338bd3153728be89a18b74b13bde554411a4371fda487bc \ No newline at end of file
diff --git a/tests/frontend/pull.py b/tests/frontend/pull.py
index c43cc83e3..a41c3498a 100644
--- a/tests/frontend/pull.py
+++ b/tests/frontend/pull.py
@@ -2,7 +2,6 @@ import os
import shutil
import pytest
from tests.testutils import cli, create_artifact_share
-from tests.testutils.site import IS_LINUX
from . import generate_junction
@@ -42,43 +41,42 @@ def assert_not_shared(cli, share, project, element_name):
# * `bst build` pushes all build elements to configured 'push' cache
# * `bst pull --deps all` downloads everything from cache after local deletion
#
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
@pytest.mark.datafiles(DATA_DIR)
def test_push_pull_all(cli, tmpdir, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename)
- share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
-
- # First build the target element and push to the remote.
- cli.configure({
- 'artifacts': {'url': share.repo, 'push': True}
- })
- result = cli.run(project=project, args=['build', 'target.bst'])
- result.assert_success()
- assert cli.get_element_state(project, 'target.bst') == 'cached'
-
- # Assert that everything is now cached in the remote.
- share.update_summary()
- all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst']
- for element_name in all_elements:
- assert_shared(cli, share, project, element_name)
-
- # Now we've pushed, delete the user's local artifact cache
- # directory and try to redownload it from the share
- #
- artifacts = os.path.join(cli.directory, 'artifacts')
- shutil.rmtree(artifacts)
- # Assert that nothing is cached locally anymore
- for element_name in all_elements:
- assert cli.get_element_state(project, element_name) != 'cached'
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+
+ # First build the target element and push to the remote.
+ cli.configure({
+ 'artifacts': {'url': share.repo, 'push': True}
+ })
+ result = cli.run(project=project, args=['build', 'target.bst'])
+ result.assert_success()
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
+
+ # Assert that everything is now cached in the remote.
+ all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst']
+ for element_name in all_elements:
+ assert_shared(cli, share, project, element_name)
+
+ # Now we've pushed, delete the user's local artifact cache
+ # directory and try to redownload it from the share
+ #
+ artifacts = os.path.join(cli.directory, 'artifacts')
+ shutil.rmtree(artifacts)
- # Now try bst pull
- result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
- result.assert_success()
+ # Assert that nothing is cached locally anymore
+ for element_name in all_elements:
+ assert cli.get_element_state(project, element_name) != 'cached'
- # And assert that it's again in the local cache, without having built
- for element_name in all_elements:
- assert cli.get_element_state(project, element_name) == 'cached'
+ # Now try bst pull
+ result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
+ result.assert_success()
+
+ # And assert that it's again in the local cache, without having built
+ for element_name in all_elements:
+ assert cli.get_element_state(project, element_name) == 'cached'
# Tests that:
@@ -86,44 +84,40 @@ def test_push_pull_all(cli, tmpdir, datafiles):
# * `bst build` pushes all build elements ONLY to configured 'push' cache
# * `bst pull` finds artifacts that are available only in the secondary cache
#
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
@pytest.mark.datafiles(DATA_DIR)
def test_pull_secondary_cache(cli, tmpdir, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename)
- share1 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1'))
- share2 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2'))
-
- # Build the target and push it to share2 only.
- cli.configure({
- 'artifacts': [
- {'url': share1.repo, 'push': False},
- {'url': share2.repo, 'push': True},
- ]
- })
- result = cli.run(project=project, args=['build', 'target.bst'])
- result.assert_success()
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1')) as share1,\
+ create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2:
- share1.update_summary()
- share2.update_summary()
+ # Build the target and push it to share2 only.
+ cli.configure({
+ 'artifacts': [
+ {'url': share1.repo, 'push': False},
+ {'url': share2.repo, 'push': True},
+ ]
+ })
+ result = cli.run(project=project, args=['build', 'target.bst'])
+ result.assert_success()
- assert_not_shared(cli, share1, project, 'target.bst')
- assert_shared(cli, share2, project, 'target.bst')
+ assert_not_shared(cli, share1, project, 'target.bst')
+ assert_shared(cli, share2, project, 'target.bst')
- # Delete the user's local artifact cache.
- artifacts = os.path.join(cli.directory, 'artifacts')
- shutil.rmtree(artifacts)
+ # Delete the user's local artifact cache.
+ artifacts = os.path.join(cli.directory, 'artifacts')
+ shutil.rmtree(artifacts)
- # Assert that the element is not cached anymore.
- assert cli.get_element_state(project, 'target.bst') != 'cached'
+ # Assert that the element is not cached anymore.
+ assert cli.get_element_state(project, 'target.bst') != 'cached'
- # Now try bst pull
- result = cli.run(project=project, args=['pull', 'target.bst'])
- result.assert_success()
+ # Now try bst pull
+ result = cli.run(project=project, args=['pull', 'target.bst'])
+ result.assert_success()
- # And assert that it's again in the local cache, without having built,
- # i.e. we found it in share2.
- assert cli.get_element_state(project, 'target.bst') == 'cached'
+ # And assert that it's again in the local cache, without having built,
+ # i.e. we found it in share2.
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
# Tests that:
@@ -131,182 +125,175 @@ def test_pull_secondary_cache(cli, tmpdir, datafiles):
# * `bst push --remote` pushes to the given remote, not one from the config
# * `bst pull --remote` pulls from the given remote
#
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
@pytest.mark.datafiles(DATA_DIR)
def test_push_pull_specific_remote(cli, tmpdir, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename)
- good_share = create_artifact_share(os.path.join(str(tmpdir), 'goodartifactshare'))
- bad_share = create_artifact_share(os.path.join(str(tmpdir), 'badartifactshare'))
+ with create_artifact_share(os.path.join(str(tmpdir), 'goodartifactshare')) as good_share,\
+ create_artifact_share(os.path.join(str(tmpdir), 'badartifactshare')) as bad_share:
- # Build the target so we have it cached locally only.
- result = cli.run(project=project, args=['build', 'target.bst'])
- result.assert_success()
+ # Build the target so we have it cached locally only.
+ result = cli.run(project=project, args=['build', 'target.bst'])
+ result.assert_success()
- state = cli.get_element_state(project, 'target.bst')
- assert state == 'cached'
+ state = cli.get_element_state(project, 'target.bst')
+ assert state == 'cached'
- # Configure the default push location to be bad_share; we will assert that
- # nothing actually gets pushed there.
- cli.configure({
- 'artifacts': {'url': bad_share.repo, 'push': True},
- })
+ # Configure the default push location to be bad_share; we will assert that
+ # nothing actually gets pushed there.
+ cli.configure({
+ 'artifacts': {'url': bad_share.repo, 'push': True},
+ })
- # Now try `bst push` to the good_share.
- result = cli.run(project=project, args=[
- 'push', 'target.bst', '--remote', good_share.repo
- ])
- result.assert_success()
+ # Now try `bst push` to the good_share.
+ result = cli.run(project=project, args=[
+ 'push', 'target.bst', '--remote', good_share.repo
+ ])
+ result.assert_success()
- good_share.update_summary()
- bad_share.update_summary()
+ # Assert that all the artifacts are in the share we pushed
+ # to, and not the other.
+ assert_shared(cli, good_share, project, 'target.bst')
+ assert_not_shared(cli, bad_share, project, 'target.bst')
- # Assert that all the artifacts are in the share we pushed
- # to, and not the other.
- assert_shared(cli, good_share, project, 'target.bst')
- assert_not_shared(cli, bad_share, project, 'target.bst')
-
- # Now we've pushed, delete the user's local artifact cache
- # directory and try to redownload it from the good_share.
- #
- artifacts = os.path.join(cli.directory, 'artifacts')
- shutil.rmtree(artifacts)
+ # Now we've pushed, delete the user's local artifact cache
+ # directory and try to redownload it from the good_share.
+ #
+ artifacts = os.path.join(cli.directory, 'artifacts')
+ shutil.rmtree(artifacts)
- result = cli.run(project=project, args=['pull', 'target.bst', '--remote',
- good_share.repo])
- result.assert_success()
+ result = cli.run(project=project, args=['pull', 'target.bst', '--remote',
+ good_share.repo])
+ result.assert_success()
- # And assert that it's again in the local cache, without having built
- assert cli.get_element_state(project, 'target.bst') == 'cached'
+ # And assert that it's again in the local cache, without having built
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
# Tests that:
#
# * In non-strict mode, dependency changes don't block artifact reuse
#
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
@pytest.mark.datafiles(DATA_DIR)
def test_push_pull_non_strict(cli, tmpdir, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename)
- share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
- workspace = os.path.join(str(tmpdir), 'workspace')
-
- # First build the target element and push to the remote.
- cli.configure({
- 'artifacts': {'url': share.repo, 'push': True},
- 'projects': {
- 'test': {'strict': False}
- }
- })
- result = cli.run(project=project, args=['build', 'target.bst'])
- result.assert_success()
- assert cli.get_element_state(project, 'target.bst') == 'cached'
-
- # Assert that everything is now cached in the remote.
- share.update_summary()
- all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst']
- for element_name in all_elements:
- assert_shared(cli, share, project, element_name)
-
- # Now we've pushed, delete the user's local artifact cache
- # directory and try to redownload it from the share
- #
- artifacts = os.path.join(cli.directory, 'artifacts')
- shutil.rmtree(artifacts)
- # Assert that nothing is cached locally anymore
- for element_name in all_elements:
- assert cli.get_element_state(project, element_name) != 'cached'
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+ workspace = os.path.join(str(tmpdir), 'workspace')
+
+ # First build the target element and push to the remote.
+ cli.configure({
+ 'artifacts': {'url': share.repo, 'push': True},
+ 'projects': {
+ 'test': {'strict': False}
+ }
+ })
+ result = cli.run(project=project, args=['build', 'target.bst'])
+ result.assert_success()
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
+
+ # Assert that everything is now cached in the remote.
+ all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst']
+ for element_name in all_elements:
+ assert_shared(cli, share, project, element_name)
+
+ # Now we've pushed, delete the user's local artifact cache
+ # directory and try to redownload it from the share
+ #
+ artifacts = os.path.join(cli.directory, 'artifacts')
+ shutil.rmtree(artifacts)
- # Add a file to force change in strict cache key of import-bin.bst
- with open(os.path.join(str(project), 'files', 'bin-files', 'usr', 'bin', 'world'), 'w') as f:
- f.write('world')
+ # Assert that nothing is cached locally anymore
+ for element_name in all_elements:
+ assert cli.get_element_state(project, element_name) != 'cached'
- # Assert that the workspaced element requires a rebuild
- assert cli.get_element_state(project, 'import-bin.bst') == 'buildable'
- # Assert that the target is still waiting due to --no-strict
- assert cli.get_element_state(project, 'target.bst') == 'waiting'
+ # Add a file to force change in strict cache key of import-bin.bst
+ with open(os.path.join(str(project), 'files', 'bin-files', 'usr', 'bin', 'world'), 'w') as f:
+ f.write('world')
- # Now try bst pull
- result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
- result.assert_success()
+ # Assert that the workspaced element requires a rebuild
+ assert cli.get_element_state(project, 'import-bin.bst') == 'buildable'
+ # Assert that the target is still waiting due to --no-strict
+ assert cli.get_element_state(project, 'target.bst') == 'waiting'
- # And assert that the target is again in the local cache, without having built
- assert cli.get_element_state(project, 'target.bst') == 'cached'
+ # Now try bst pull
+ result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst'])
+ result.assert_success()
+
+ # And assert that the target is again in the local cache, without having built
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
# Regression test for https://gitlab.com/BuildStream/buildstream/issues/202
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
@pytest.mark.datafiles(DATA_DIR)
def test_push_pull_track_non_strict(cli, tmpdir, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename)
- share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
-
- # First build the target element and push to the remote.
- cli.configure({
- 'artifacts': {'url': share.repo, 'push': True},
- 'projects': {
- 'test': {'strict': False}
- }
- })
- result = cli.run(project=project, args=['build', 'target.bst'])
- result.assert_success()
- assert cli.get_element_state(project, 'target.bst') == 'cached'
-
- # Assert that everything is now cached in the remote.
- share.update_summary()
- all_elements = {'target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst'}
- for element_name in all_elements:
- assert_shared(cli, share, project, element_name)
-
- # Now we've pushed, delete the user's local artifact cache
- # directory and try to redownload it from the share
- #
- artifacts = os.path.join(cli.directory, 'artifacts')
- shutil.rmtree(artifacts)
- # Assert that nothing is cached locally anymore
- for element_name in all_elements:
- assert cli.get_element_state(project, element_name) != 'cached'
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+
+ # First build the target element and push to the remote.
+ cli.configure({
+ 'artifacts': {'url': share.repo, 'push': True},
+ 'projects': {
+ 'test': {'strict': False}
+ }
+ })
+ result = cli.run(project=project, args=['build', 'target.bst'])
+ result.assert_success()
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
+
+ # Assert that everything is now cached in the remote.
+ all_elements = {'target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst'}
+ for element_name in all_elements:
+ assert_shared(cli, share, project, element_name)
+
+ # Now we've pushed, delete the user's local artifact cache
+ # directory and try to redownload it from the share
+ #
+ artifacts = os.path.join(cli.directory, 'artifacts')
+ shutil.rmtree(artifacts)
+
+ # Assert that nothing is cached locally anymore
+ for element_name in all_elements:
+ assert cli.get_element_state(project, element_name) != 'cached'
+
+ # Now try bst build with tracking and pulling.
+ # Tracking will be skipped for target.bst as it doesn't have any sources.
+ # With the non-strict build plan target.bst immediately enters the pull queue.
+ # However, pulling has to be deferred until the dependencies have been
+ # tracked as the strict cache key needs to be calculated before querying
+ # the caches.
+ result = cli.run(project=project, args=['build', '--track-all', '--all', 'target.bst'])
+ result.assert_success()
+ assert set(result.get_pulled_elements()) == all_elements
- # Now try bst build with tracking and pulling.
- # Tracking will be skipped for target.bst as it doesn't have any sources.
- # With the non-strict build plan target.bst immediately enters the pull queue.
- # However, pulling has to be deferred until the dependencies have been
- # tracked as the strict cache key needs to be calculated before querying
- # the caches.
- result = cli.run(project=project, args=['build', '--track-all', '--all', 'target.bst'])
- result.assert_success()
- assert set(result.get_pulled_elements()) == all_elements
-
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
@pytest.mark.datafiles(DATA_DIR)
def test_push_pull_cross_junction(cli, tmpdir, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename)
- share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
- subproject_path = os.path.join(project, 'files', 'sub-project')
- junction_path = os.path.join(project, 'elements', 'junction.bst')
- generate_junction(tmpdir, subproject_path, junction_path, store_ref=True)
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+ subproject_path = os.path.join(project, 'files', 'sub-project')
+ junction_path = os.path.join(project, 'elements', 'junction.bst')
+
+ generate_junction(tmpdir, subproject_path, junction_path, store_ref=True)
- # First build the target element and push to the remote.
- cli.configure({
- 'artifacts': {'url': share.repo, 'push': True}
- })
- result = cli.run(project=project, args=['build', 'junction.bst:import-etc.bst'])
- result.assert_success()
- assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'cached'
+ # First build the target element and push to the remote.
+ cli.configure({
+ 'artifacts': {'url': share.repo, 'push': True}
+ })
+ result = cli.run(project=project, args=['build', 'junction.bst:import-etc.bst'])
+ result.assert_success()
+ assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'cached'
- cache_dir = os.path.join(project, 'cache', 'artifacts')
- shutil.rmtree(cache_dir)
+ cache_dir = os.path.join(project, 'cache', 'artifacts')
+ shutil.rmtree(cache_dir)
- share.update_summary()
- assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'buildable'
+ assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'buildable'
- # Now try bst pull
- result = cli.run(project=project, args=['pull', 'junction.bst:import-etc.bst'])
- result.assert_success()
+ # Now try bst pull
+ result = cli.run(project=project, args=['pull', 'junction.bst:import-etc.bst'])
+ result.assert_success()
- # And assert that it's again in the local cache, without having built
- assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'cached'
+ # And assert that it's again in the local cache, without having built
+ assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'cached'
diff --git a/tests/frontend/push.py b/tests/frontend/push.py
index ca46b0447..e8f7d7d04 100644
--- a/tests/frontend/push.py
+++ b/tests/frontend/push.py
@@ -1,12 +1,10 @@
import os
-import shutil
import pytest
from collections import namedtuple
from unittest.mock import MagicMock
from buildstream._exceptions import ErrorDomain
from tests.testutils import cli, create_artifact_share, create_element_size
-from tests.testutils.site import IS_LINUX
from . import configure_project, generate_junction
@@ -58,256 +56,247 @@ def test_push(cli, tmpdir, datafiles):
assert cli.get_element_state(project, 'target.bst') == 'cached'
# Set up two artifact shares.
- share1 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1'))
- share2 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2'))
-
- # Try pushing with no remotes configured. This should fail.
- result = cli.run(project=project, args=['push', 'target.bst'])
- result.assert_main_error(ErrorDomain.STREAM, None)
-
- # Configure bst to pull but not push from a cache and run `bst push`.
- # This should also fail.
- cli.configure({
- 'artifacts': {'url': share1.repo, 'push': False},
- })
- result = cli.run(project=project, args=['push', 'target.bst'])
- result.assert_main_error(ErrorDomain.STREAM, None)
-
- # Configure bst to push to one of the caches and run `bst push`. This works.
- cli.configure({
- 'artifacts': [
- {'url': share1.repo, 'push': False},
- {'url': share2.repo, 'push': True},
- ]
- })
- result = cli.run(project=project, args=['push', 'target.bst'])
-
- assert_not_shared(cli, share1, project, 'target.bst')
- assert_shared(cli, share2, project, 'target.bst')
-
- # Now try pushing to both (making sure to empty the cache we just pushed
- # to).
- shutil.rmtree(share2.directory)
- share2 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2'))
- cli.configure({
- 'artifacts': [
- {'url': share1.repo, 'push': True},
- {'url': share2.repo, 'push': True},
- ]
- })
- result = cli.run(project=project, args=['push', 'target.bst'])
-
- assert_shared(cli, share1, project, 'target.bst')
- assert_shared(cli, share2, project, 'target.bst')
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1')) as share1:
+
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2:
+
+ # Try pushing with no remotes configured. This should fail.
+ result = cli.run(project=project, args=['push', 'target.bst'])
+ result.assert_main_error(ErrorDomain.STREAM, None)
+
+ # Configure bst to pull but not push from a cache and run `bst push`.
+ # This should also fail.
+ cli.configure({
+ 'artifacts': {'url': share1.repo, 'push': False},
+ })
+ result = cli.run(project=project, args=['push', 'target.bst'])
+ result.assert_main_error(ErrorDomain.STREAM, None)
+
+ # Configure bst to push to one of the caches and run `bst push`. This works.
+ cli.configure({
+ 'artifacts': [
+ {'url': share1.repo, 'push': False},
+ {'url': share2.repo, 'push': True},
+ ]
+ })
+ result = cli.run(project=project, args=['push', 'target.bst'])
+
+ assert_not_shared(cli, share1, project, 'target.bst')
+ assert_shared(cli, share2, project, 'target.bst')
+
+ # Now try pushing to both
+
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2:
+ cli.configure({
+ 'artifacts': [
+ {'url': share1.repo, 'push': True},
+ {'url': share2.repo, 'push': True},
+ ]
+ })
+ result = cli.run(project=project, args=['push', 'target.bst'])
+
+ assert_shared(cli, share1, project, 'target.bst')
+ assert_shared(cli, share2, project, 'target.bst')
# Tests that `bst push --deps all` pushes all dependencies of the given element.
#
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
@pytest.mark.datafiles(DATA_DIR)
def test_push_all(cli, tmpdir, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename)
- share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
-
- # First build it without the artifact cache configured
- result = cli.run(project=project, args=['build', 'target.bst'])
- result.assert_success()
-
- # Assert that we are now cached locally
- assert cli.get_element_state(project, 'target.bst') == 'cached'
- # Configure artifact share
- cli.configure({
- #
- # FIXME: This test hangs "sometimes" if we allow
- # concurrent push.
- #
- # It's not too bad to ignore since we're
- # using the local artifact cache functionality
- # only, but it should probably be fixed.
- #
- 'scheduler': {
- 'pushers': 1
- },
- 'artifacts': {
- 'url': share.repo,
- 'push': True,
- }
- })
-
- # Now try bst push all the deps
- result = cli.run(project=project, args=[
- 'push', 'target.bst',
- '--deps', 'all'
- ])
- result.assert_success()
-
- # And finally assert that all the artifacts are in the share
- assert_shared(cli, share, project, 'target.bst')
- assert_shared(cli, share, project, 'import-bin.bst')
- assert_shared(cli, share, project, 'import-dev.bst')
- assert_shared(cli, share, project, 'compose-all.bst')
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+
+ # First build it without the artifact cache configured
+ result = cli.run(project=project, args=['build', 'target.bst'])
+ result.assert_success()
+
+ # Assert that we are now cached locally
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
+
+ # Configure artifact share
+ cli.configure({
+ #
+ # FIXME: This test hangs "sometimes" if we allow
+ # concurrent push.
+ #
+ # It's not too bad to ignore since we're
+ # using the local artifact cache functionality
+ # only, but it should probably be fixed.
+ #
+ 'scheduler': {
+ 'pushers': 1
+ },
+ 'artifacts': {
+ 'url': share.repo,
+ 'push': True,
+ }
+ })
+
+ # Now try bst push all the deps
+ result = cli.run(project=project, args=[
+ 'push', 'target.bst',
+ '--deps', 'all'
+ ])
+ result.assert_success()
+
+ # And finally assert that all the artifacts are in the share
+ assert_shared(cli, share, project, 'target.bst')
+ assert_shared(cli, share, project, 'import-bin.bst')
+ assert_shared(cli, share, project, 'import-dev.bst')
+ assert_shared(cli, share, project, 'compose-all.bst')
# Tests that `bst build` won't push artifacts to the cache it just pulled from.
#
# Regression test for https://gitlab.com/BuildStream/buildstream/issues/233.
-@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
@pytest.mark.datafiles(DATA_DIR)
def test_push_after_pull(cli, tmpdir, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename)
# Set up two artifact shares.
- share1 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1'))
- share2 = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2'))
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1')) as share1,\
+ create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2:
- # Set the scene: share1 has the artifact, share2 does not.
- #
- cli.configure({
- 'artifacts': {'url': share1.repo, 'push': True},
- })
+ # Set the scene: share1 has the artifact, share2 does not.
+ #
+ cli.configure({
+ 'artifacts': {'url': share1.repo, 'push': True},
+ })
- result = cli.run(project=project, args=['build', 'target.bst'])
- result.assert_success()
+ result = cli.run(project=project, args=['build', 'target.bst'])
+ result.assert_success()
- share1.update_summary()
- cli.remove_artifact_from_cache(project, 'target.bst')
+ cli.remove_artifact_from_cache(project, 'target.bst')
- assert_shared(cli, share1, project, 'target.bst')
- assert_not_shared(cli, share2, project, 'target.bst')
- assert cli.get_element_state(project, 'target.bst') != 'cached'
+ assert_shared(cli, share1, project, 'target.bst')
+ assert_not_shared(cli, share2, project, 'target.bst')
+ assert cli.get_element_state(project, 'target.bst') != 'cached'
- # Now run the build again. Correct `bst build` behaviour is to download the
- # artifact from share1 but not push it back again.
- #
- result = cli.run(project=project, args=['build', 'target.bst'])
- result.assert_success()
- assert result.get_pulled_elements() == ['target.bst']
- assert result.get_pushed_elements() == []
-
- # Delete the artifact locally again.
- cli.remove_artifact_from_cache(project, 'target.bst')
-
- # Now we add share2 into the mix as a second push remote. This time,
- # `bst build` should push to share2 after pulling from share1.
- cli.configure({
- 'artifacts': [
- {'url': share1.repo, 'push': True},
- {'url': share2.repo, 'push': True},
- ]
- })
- result = cli.run(project=project, args=['build', 'target.bst'])
- result.assert_success()
- assert result.get_pulled_elements() == ['target.bst']
- assert result.get_pushed_elements() == ['target.bst']
+ # Now run the build again. Correct `bst build` behaviour is to download the
+ # artifact from share1 but not push it back again.
+ #
+ result = cli.run(project=project, args=['build', 'target.bst'])
+ result.assert_success()
+ assert result.get_pulled_elements() == ['target.bst']
+ assert result.get_pushed_elements() == []
+
+ # Delete the artifact locally again.
+ cli.remove_artifact_from_cache(project, 'target.bst')
+
+ # Now we add share2 into the mix as a second push remote. This time,
+ # `bst build` should push to share2 after pulling from share1.
+ cli.configure({
+ 'artifacts': [
+ {'url': share1.repo, 'push': True},
+ {'url': share2.repo, 'push': True},
+ ]
+ })
+ result = cli.run(project=project, args=['build', 'target.bst'])
+ result.assert_success()
+ assert result.get_pulled_elements() == ['target.bst']
+ assert result.get_pushed_elements() == ['target.bst']
# Ensure that when an artifact's size exceeds available disk space
# the least recently pushed artifact is deleted in order to make room for
# the incoming artifact.
+@pytest.mark.xfail
@pytest.mark.datafiles(DATA_DIR)
def test_artifact_expires(cli, datafiles, tmpdir):
project = os.path.join(datafiles.dirname, datafiles.basename)
element_path = os.path.join(project, 'elements')
# Create an artifact share (remote artifact cache) in the tmpdir/artifactshare
- share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
-
- # Mock the os.statvfs() call to return a named tuple which emulates an
- # os.statvfs_result object
- statvfs_result = namedtuple('statvfs_result', 'f_blocks f_bfree f_bsize')
- os.statvfs = MagicMock(return_value=statvfs_result(f_blocks=int(10e9),
- f_bfree=(int(12e6) + int(2e9)),
- f_bsize=1))
-
- # Configure bst to push to the cache
- cli.configure({
- 'artifacts': {'url': share.repo, 'push': True},
- })
-
- # Create and build an element of 5 MB
- create_element_size('element1.bst', element_path, [], int(5e6)) # [] => no deps
- result = cli.run(project=project, args=['build', 'element1.bst'])
- result.assert_success()
-
- # Create and build an element of 5 MB
- create_element_size('element2.bst', element_path, [], int(5e6)) # [] => no deps
- result = cli.run(project=project, args=['build', 'element2.bst'])
- result.assert_success()
-
- # update the share
- share.update_summary()
-
- # check that element's 1 and 2 are cached both locally and remotely
- assert cli.get_element_state(project, 'element1.bst') == 'cached'
- assert_shared(cli, share, project, 'element1.bst')
- assert cli.get_element_state(project, 'element2.bst') == 'cached'
- assert_shared(cli, share, project, 'element2.bst')
-
- # update mocked available disk space now that two 5 MB artifacts have been added
- os.statvfs = MagicMock(return_value=statvfs_result(f_blocks=int(10e9),
- f_bfree=(int(2e6) + int(2e9)),
- f_bsize=1))
-
- # Create and build another element of 5 MB (This will exceed the free disk space available)
- create_element_size('element3.bst', element_path, [], int(5e6))
- result = cli.run(project=project, args=['build', 'element3.bst'])
- result.assert_success()
-
- # update the share
- share.update_summary()
-
- # Ensure it is cached both locally and remotely
- assert cli.get_element_state(project, 'element3.bst') == 'cached'
- assert_shared(cli, share, project, 'element3.bst')
-
- # Ensure element1 has been removed from the share
- assert_not_shared(cli, share, project, 'element1.bst')
- # Ensure that elemen2 remains
- assert_shared(cli, share, project, 'element2.bst')
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+
+ # Mock the os.statvfs() call to return a named tuple which emulates an
+ # os.statvfs_result object
+ statvfs_result = namedtuple('statvfs_result', 'f_blocks f_bfree f_bsize')
+ os.statvfs = MagicMock(return_value=statvfs_result(f_blocks=int(10e9),
+ f_bfree=(int(12e6) + int(2e9)),
+ f_bsize=1))
+
+ # Configure bst to push to the cache
+ cli.configure({
+ 'artifacts': {'url': share.repo, 'push': True},
+ })
+
+ # Create and build an element of 5 MB
+ create_element_size('element1.bst', element_path, [], int(5e6)) # [] => no deps
+ result = cli.run(project=project, args=['build', 'element1.bst'])
+ result.assert_success()
+
+ # Create and build an element of 5 MB
+ create_element_size('element2.bst', element_path, [], int(5e6)) # [] => no deps
+ result = cli.run(project=project, args=['build', 'element2.bst'])
+ result.assert_success()
+
+ # check that element's 1 and 2 are cached both locally and remotely
+ assert cli.get_element_state(project, 'element1.bst') == 'cached'
+ assert_shared(cli, share, project, 'element1.bst')
+ assert cli.get_element_state(project, 'element2.bst') == 'cached'
+ assert_shared(cli, share, project, 'element2.bst')
+
+ # update mocked available disk space now that two 5 MB artifacts have been added
+ os.statvfs = MagicMock(return_value=statvfs_result(f_blocks=int(10e9),
+ f_bfree=(int(2e6) + int(2e9)),
+ f_bsize=1))
+
+ # Create and build another element of 5 MB (This will exceed the free disk space available)
+ create_element_size('element3.bst', element_path, [], int(5e6))
+ result = cli.run(project=project, args=['build', 'element3.bst'])
+ result.assert_success()
+
+ # Ensure it is cached both locally and remotely
+ assert cli.get_element_state(project, 'element3.bst') == 'cached'
+ assert_shared(cli, share, project, 'element3.bst')
+
+ # Ensure element1 has been removed from the share
+ assert_not_shared(cli, share, project, 'element1.bst')
+ # Ensure that elemen2 remains
+ assert_shared(cli, share, project, 'element2.bst')
# Test that a large artifact, whose size exceeds the quota, is not pushed
# to the remote share
+@pytest.mark.xfail
@pytest.mark.datafiles(DATA_DIR)
def test_artifact_too_large(cli, datafiles, tmpdir):
project = os.path.join(datafiles.dirname, datafiles.basename)
element_path = os.path.join(project, 'elements')
# Create an artifact share (remote cache) in tmpdir/artifactshare
- share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
-
- # Mock a file system with 5 MB total space
- statvfs_result = namedtuple('statvfs_result', 'f_blocks f_bfree f_bsize')
- os.statvfs = MagicMock(return_value=statvfs_result(f_blocks=int(5e6) + int(2e9),
- f_bfree=(int(5e6) + int(2e9)),
- f_bsize=1))
-
- # Configure bst to push to the remote cache
- cli.configure({
- 'artifacts': {'url': share.repo, 'push': True},
- })
-
- # Create and push a 3MB element
- create_element_size('small_element.bst', element_path, [], int(3e6))
- result = cli.run(project=project, args=['build', 'small_element.bst'])
- result.assert_success()
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
- # Create and try to push a 6MB element.
- create_element_size('large_element.bst', element_path, [], int(6e6))
- result = cli.run(project=project, args=['build', 'large_element.bst'])
- result.assert_success()
+ # Mock a file system with 5 MB total space
+ statvfs_result = namedtuple('statvfs_result', 'f_blocks f_bfree f_bsize')
+ os.statvfs = MagicMock(return_value=statvfs_result(f_blocks=int(5e6) + int(2e9),
+ f_bfree=(int(5e6) + int(2e9)),
+ f_bsize=1))
+
+ # Configure bst to push to the remote cache
+ cli.configure({
+ 'artifacts': {'url': share.repo, 'push': True},
+ })
- # update the cache
- share.update_summary()
+ # Create and push a 3MB element
+ create_element_size('small_element.bst', element_path, [], int(3e6))
+ result = cli.run(project=project, args=['build', 'small_element.bst'])
+ result.assert_success()
- # Ensure that the small artifact is still in the share
- assert cli.get_element_state(project, 'small_element.bst') == 'cached'
- assert_shared(cli, share, project, 'small_element.bst')
+ # Create and try to push a 6MB element.
+ create_element_size('large_element.bst', element_path, [], int(6e6))
+ result = cli.run(project=project, args=['build', 'large_element.bst'])
+ result.assert_success()
- # Ensure that the artifact is cached locally but NOT remotely
- assert cli.get_element_state(project, 'large_element.bst') == 'cached'
- assert_not_shared(cli, share, project, 'large_element.bst')
+ # Ensure that the small artifact is still in the share
+ assert cli.get_element_state(project, 'small_element.bst') == 'cached'
+ assert_shared(cli, share, project, 'small_element.bst')
+
+ # Ensure that the artifact is cached locally but NOT remotely
+ assert cli.get_element_state(project, 'large_element.bst') == 'cached'
+ assert_not_shared(cli, share, project, 'large_element.bst')
# Test that when an element is pulled recently, it is not considered the LRU element.
@@ -321,64 +310,60 @@ def test_recently_pulled_artifact_does_not_expire(cli, datafiles, tmpdir):
element_path = os.path.join(project, 'elements')
# Create an artifact share (remote cache) in tmpdir/artifactshare
- share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
-
- # Mock a file system with 12 MB free disk space
- statvfs_result = namedtuple('statvfs_result', 'f_blocks f_bfree f_bsize')
- os.statvfs = MagicMock(return_value=statvfs_result(f_blocks=int(10e9) + int(2e9),
- f_bfree=(int(12e6) + int(2e9)),
- f_bsize=1))
-
- # Configure bst to push to the cache
- cli.configure({
- 'artifacts': {'url': share.repo, 'push': True},
- })
-
- # Create and build 2 elements, each of 5 MB.
- create_element_size('element1.bst', element_path, [], int(5e6))
- result = cli.run(project=project, args=['build', 'element1.bst'])
- result.assert_success()
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
- create_element_size('element2.bst', element_path, [], int(5e6))
- result = cli.run(project=project, args=['build', 'element2.bst'])
- result.assert_success()
+ # Mock a file system with 12 MB free disk space
+ statvfs_result = namedtuple('statvfs_result', 'f_blocks f_bfree f_bsize')
+ os.statvfs = MagicMock(return_value=statvfs_result(f_blocks=int(10e9) + int(2e9),
+ f_bfree=(int(12e6) + int(2e9)),
+ f_bsize=1))
- share.update_summary()
+ # Configure bst to push to the cache
+ cli.configure({
+ 'artifacts': {'url': share.repo, 'push': True},
+ })
- # Ensure they are cached locally
- assert cli.get_element_state(project, 'element1.bst') == 'cached'
- assert cli.get_element_state(project, 'element2.bst') == 'cached'
+ # Create and build 2 elements, each of 5 MB.
+ create_element_size('element1.bst', element_path, [], int(5e6))
+ result = cli.run(project=project, args=['build', 'element1.bst'])
+ result.assert_success()
- # Ensure that they have been pushed to the cache
- assert_shared(cli, share, project, 'element1.bst')
- assert_shared(cli, share, project, 'element2.bst')
+ create_element_size('element2.bst', element_path, [], int(5e6))
+ result = cli.run(project=project, args=['build', 'element2.bst'])
+ result.assert_success()
- # Remove element1 from the local cache
- cli.remove_artifact_from_cache(project, 'element1.bst')
- assert cli.get_element_state(project, 'element1.bst') != 'cached'
+ # Ensure they are cached locally
+ assert cli.get_element_state(project, 'element1.bst') == 'cached'
+ assert cli.get_element_state(project, 'element2.bst') == 'cached'
- # Pull the element1 from the remote cache (this should update its mtime)
- result = cli.run(project=project, args=['pull', 'element1.bst', '--remote',
- share.repo])
- result.assert_success()
+ # Ensure that they have been pushed to the cache
+ assert_shared(cli, share, project, 'element1.bst')
+ assert_shared(cli, share, project, 'element2.bst')
- # Ensure element1 is cached locally
- assert cli.get_element_state(project, 'element1.bst') == 'cached'
+ # Remove element1 from the local cache
+ cli.remove_artifact_from_cache(project, 'element1.bst')
+ assert cli.get_element_state(project, 'element1.bst') != 'cached'
- # Create and build the element3 (of 5 MB)
- create_element_size('element3.bst', element_path, [], int(5e6))
- result = cli.run(project=project, args=['build', 'element3.bst'])
- result.assert_success()
+ # Pull the element1 from the remote cache (this should update its mtime)
+ result = cli.run(project=project, args=['pull', 'element1.bst', '--remote',
+ share.repo])
+ result.assert_success()
+
+ # Ensure element1 is cached locally
+ assert cli.get_element_state(project, 'element1.bst') == 'cached'
- share.update_summary()
+ # Create and build the element3 (of 5 MB)
+ create_element_size('element3.bst', element_path, [], int(5e6))
+ result = cli.run(project=project, args=['build', 'element3.bst'])
+ result.assert_success()
- # Make sure it's cached locally and remotely
- assert cli.get_element_state(project, 'element3.bst') == 'cached'
- assert_shared(cli, share, project, 'element3.bst')
+ # Make sure it's cached locally and remotely
+ assert cli.get_element_state(project, 'element3.bst') == 'cached'
+ assert_shared(cli, share, project, 'element3.bst')
- # Ensure that element2 was deleted from the share and element1 remains
- assert_not_shared(cli, share, project, 'element2.bst')
- assert_shared(cli, share, project, 'element1.bst')
+ # Ensure that element2 was deleted from the share and element1 remains
+ assert_not_shared(cli, share, project, 'element2.bst')
+ assert_shared(cli, share, project, 'element1.bst')
@pytest.mark.datafiles(DATA_DIR)
@@ -394,11 +379,11 @@ def test_push_cross_junction(cli, tmpdir, datafiles):
assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'cached'
- share = create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'))
- cli.configure({
- 'artifacts': {'url': share.repo, 'push': True},
- })
- result = cli.run(project=project, args=['push', 'junction.bst:import-etc.bst'])
+ with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+ cli.configure({
+ 'artifacts': {'url': share.repo, 'push': True},
+ })
+ result = cli.run(project=project, args=['push', 'junction.bst:import-etc.bst'])
- cache_key = cli.get_element_key(project, 'junction.bst:import-etc.bst')
- assert share.has_artifact('subtest', 'import-etc.bst', cache_key)
+ cache_key = cli.get_element_key(project, 'junction.bst:import-etc.bst')
+ assert share.has_artifact('subtest', 'import-etc.bst', cache_key)
diff --git a/tests/integration/workspace.py b/tests/integration/workspace.py
index 6eae1efc8..102d053fc 100644
--- a/tests/integration/workspace.py
+++ b/tests/integration/workspace.py
@@ -216,7 +216,6 @@ def test_updated_dependency_nested(cli, tmpdir, datafiles):
@pytest.mark.integration
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(not IS_LINUX, reason='Incremental builds are not supported by the unix platform')
def test_incremental_configure_commands_run_only_once(cli, tmpdir, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename)
workspace = os.path.join(cli.directory, 'workspace')
diff --git a/tests/testutils/artifactshare.py b/tests/testutils/artifactshare.py
index 8664c69d0..6b9117b48 100644
--- a/tests/testutils/artifactshare.py
+++ b/tests/testutils/artifactshare.py
@@ -2,10 +2,18 @@ import string
import pytest
import subprocess
import os
+import shutil
+import signal
-from buildstream import _yaml
+from contextlib import contextmanager
+from multiprocessing import Process, Queue
+import pytest_cov
-from .site import HAVE_OSTREE_CLI
+from buildstream import _yaml
+from buildstream._artifactcache.cascache import CASCache
+from buildstream._artifactcache.casserver import create_server
+from buildstream._context import Context
+from buildstream._exceptions import ArtifactError
# ArtifactShare()
@@ -20,11 +28,6 @@ class ArtifactShare():
def __init__(self, directory):
- # We need the ostree CLI for tests which use this
- #
- if not HAVE_OSTREE_CLI:
- pytest.skip("ostree cli is not available")
-
# The working directory for the artifact share (in case it
# needs to do something outside of it's backend's storage folder).
#
@@ -35,34 +38,42 @@ class ArtifactShare():
# Unless this gets more complicated, just use this directly
# in tests as a remote artifact push/pull configuration
#
- self.repo = os.path.join(self.directory, 'repo')
+ self.repodir = os.path.join(self.directory, 'repo')
- os.makedirs(self.repo)
+ os.makedirs(self.repodir)
- self.init()
- self.update_summary()
+ context = Context()
+ context.artifactdir = self.repodir
- # init():
- #
- # Initializes the artifact share
- #
- # Returns:
- # (smth): A new ref corresponding to this commit, which can
- # be passed as the ref in the Repo.source_config() API.
- #
- def init(self):
- subprocess.call(['ostree', 'init',
- '--repo', self.repo,
- '--mode', 'archive-z2'])
+ self.cas = CASCache(context)
+
+ q = Queue()
+
+ self.process = Process(target=self.run, args=(q,))
+ self.process.start()
- # update_summary():
+ # Retrieve port from server subprocess
+ port = q.get()
+
+ self.repo = 'http://localhost:{}'.format(port)
+
+ # run():
#
- # Ensure that the summary is up to date
+ # Run the artifact server.
#
- def update_summary(self):
- subprocess.call(['ostree', 'summary',
- '--update',
- '--repo', self.repo])
+ def run(self, q):
+ pytest_cov.embed.cleanup_on_sigterm()
+
+ server = create_server(self.repodir, enable_push=True)
+ port = server.add_insecure_port('localhost:0')
+
+ server.start()
+
+ # Send port to parent
+ q.put(port)
+
+ # Sleep until termination by signal
+ signal.pause()
# has_artifact():
#
@@ -77,8 +88,8 @@ class ArtifactShare():
# (bool): True if the artifact exists in the share, otherwise false.
def has_artifact(self, project_name, element_name, cache_key):
- # NOTE: This should be kept in line with our ostree
- # based artifact cache code, the below is the
+ # NOTE: This should be kept in line with our
+ # artifact cache code, the below is the
# same algo for creating an artifact reference
#
@@ -93,18 +104,31 @@ class ArtifactShare():
])
artifact_key = '{0}/{1}/{2}'.format(project_name, element_name, cache_key)
- if not subprocess.call(['ostree', 'rev-parse',
- '--repo', self.repo,
- artifact_key]):
+ try:
+ tree = self.cas.resolve_ref(artifact_key)
return True
+ except ArtifactError:
+ return False
- return False
+ # close():
+ #
+ # Remove the artifact share.
+ #
+ def close(self):
+ self.process.terminate()
+ self.process.join()
+
+ shutil.rmtree(self.directory)
# create_artifact_share()
#
# Create an ArtifactShare for use in a test case
#
+@contextmanager
def create_artifact_share(directory):
-
- return ArtifactShare(directory)
+ share = ArtifactShare(directory)
+ try:
+ yield share
+ finally:
+ share.close()
diff --git a/tests/testutils/runcli.py b/tests/testutils/runcli.py
index 658e38874..96d4ea457 100644
--- a/tests/testutils/runcli.py
+++ b/tests/testutils/runcli.py
@@ -19,8 +19,6 @@ import pytest
#
from _pytest.capture import MultiCapture, FDCapture
-from tests.testutils.site import IS_LINUX
-
# Import the main cli entrypoint
from buildstream._frontend import cli as bst_cli
from buildstream import _yaml
@@ -203,10 +201,7 @@ class Cli():
def remove_artifact_from_cache(self, project, element_name):
cache_dir = os.path.join(project, 'cache', 'artifacts')
- if IS_LINUX:
- cache_dir = os.path.join(cache_dir, 'ostree', 'refs', 'heads')
- else:
- cache_dir = os.path.join(cache_dir, 'tar')
+ cache_dir = os.path.join(cache_dir, 'cas', 'refs', 'heads')
cache_dir = os.path.splitext(os.path.join(cache_dir, 'test', element_name))[0]
shutil.rmtree(cache_dir)