summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbst-marge-bot <marge-bot@buildstream.build>2020-08-13 10:49:20 +0000
committerbst-marge-bot <marge-bot@buildstream.build>2020-08-13 10:49:20 +0000
commit10e208d0c40ebacc43cc9da2ebaf73c10bbc37da (patch)
treecb0566c72640c292676ab8654d273666b088a88b
parentccdac3517f7a241aa9d3b5637dea6640a63667fd (diff)
parentaea06a75c6d6ac404729bc48c5cdfb6aca7ae7ac (diff)
downloadbuildstream-10e208d0c40ebacc43cc9da2ebaf73c10bbc37da.tar.gz
Merge branch 'juerg/remote-asset' into 'master'
Use Remote Asset API for artifact and source caches Closes #1273 and #1272 See merge request BuildStream/buildstream!1978
-rw-r--r--NEWS8
-rw-r--r--doc/source/arch_caches.rst13
-rw-r--r--doc/source/arch_remote_execution.rst2
-rw-r--r--doc/source/using_configuring_cache_server.rst6
-rw-r--r--src/buildstream/_artifactcache.py259
-rw-r--r--src/buildstream/_assetcache.py (renamed from src/buildstream/_basecache.py)254
-rw-r--r--src/buildstream/_cas/casdprocessmanager.py25
-rw-r--r--src/buildstream/_cas/casremote.py24
-rw-r--r--src/buildstream/_cas/casserver.py268
-rw-r--r--src/buildstream/_exceptions.py4
-rw-r--r--src/buildstream/_protos/build/bazel/remote/asset/__init__.py0
-rw-r--r--src/buildstream/_protos/build/bazel/remote/asset/v1/__init__.py0
-rw-r--r--src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto445
-rw-r--r--src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py666
-rw-r--r--src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py324
-rw-r--r--src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py1374
-rw-r--r--src/buildstream/_protos/build/buildgrid/local_cas.proto48
-rw-r--r--src/buildstream/_protos/build/buildgrid/local_cas_pb2.py179
-rw-r--r--src/buildstream/_protos/build/buildgrid/local_cas_pb2_grpc.py667
-rw-r--r--src/buildstream/_protos/buildstream/v2/artifact.proto25
-rw-r--r--src/buildstream/_protos/buildstream/v2/artifact_pb2.py135
-rw-r--r--src/buildstream/_protos/buildstream/v2/artifact_pb2_grpc.py66
-rw-r--r--src/buildstream/_protos/buildstream/v2/buildstream.proto29
-rw-r--r--src/buildstream/_protos/buildstream/v2/buildstream_pb2.py195
-rw-r--r--src/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py244
-rw-r--r--src/buildstream/_protos/buildstream/v2/source.proto31
-rw-r--r--src/buildstream/_protos/buildstream/v2/source_pb2.py135
-rw-r--r--src/buildstream/_protos/buildstream/v2/source_pb2_grpc.py67
-rw-r--r--src/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py334
-rw-r--r--src/buildstream/_protos/google/longrunning/operations_pb2_grpc.py310
-rw-r--r--src/buildstream/_sourcecache.py129
-rw-r--r--tests/artifactcache/artifactservice.py107
-rw-r--r--tests/frontend/push.py15
-rw-r--r--tests/testutils/artifactshare.py83
34 files changed, 3952 insertions, 2519 deletions
diff --git a/NEWS b/NEWS
index babcb259b..b0f8650db 100644
--- a/NEWS
+++ b/NEWS
@@ -18,6 +18,14 @@ CLI
o Add `bst source push` subcommand. This command pushes element sources to a
remote source cache.
+Artifacts
+---------
+
+ o BREAKING CHANGE: Use Remote Asset API for remote artifact and source caches.
+ Existing artifact servers are not compatible and need to be updated to the
+ latest version which will then allow them to be repopulated with new
+ artifacts.
+
Plugins
-------
diff --git a/doc/source/arch_caches.rst b/doc/source/arch_caches.rst
index c415cfc47..448a0a389 100644
--- a/doc/source/arch_caches.rst
+++ b/doc/source/arch_caches.rst
@@ -34,10 +34,10 @@ files digests. The digests point to locations in the CAS of relavant files and
directories, allowing BuildStream to query remote CAS servers for this
information.
-:ref:`bst-artifact-server <artifact_command_reference>` uses grpc to implement a
-remote API for an artifact service, that BuildStream then uses to query,
-retrieve and update artifact files, before using this information to download
-the files and other data from the remote CAS.
+:ref:`bst-artifact-server <artifact_command_reference>` uses grpc to implement
+the Remote Asset API for an artifact service, that BuildStream then uses to
+query, retrieve and update artifact references, before using this information to
+download the files and other data from the remote CAS.
Source caches
-------------
@@ -53,9 +53,8 @@ artifacts, as they just need to map a source key to a directory digest, with no
additional metadata.
Similar to artifacts, :ref:`bst-artifact-server <artifact_command_reference>`
-uses grpc to implements a 'reference service' API that allows BuildStream to
-query for these source digests, which can then be used to retrieve sources from
-a CAS.
+uses grpc to implement the Remote Asset API that allows BuildStream to query for
+these source digests, which can then be used to retrieve sources from a CAS.
.. note::
diff --git a/doc/source/arch_remote_execution.rst b/doc/source/arch_remote_execution.rst
index efefc84b5..9c8e9d354 100644
--- a/doc/source/arch_remote_execution.rst
+++ b/doc/source/arch_remote_execution.rst
@@ -8,7 +8,7 @@ The previous section :ref:`sandboxing` describes the two forms of local sandbox:
Artifact caches and other storage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-BuildStream can transmit the results of local builds to remote artifact caches and retrieve them later. The same mechanism is used for remote execution. The filesystem staged before building is stored in a local content-addressable store, which may be the same as the local artifact cache. The command to execute is also stored as an object in local CAS. Both the initial source filesystem and command are transmitted to remote storage specific to the remote execution service, and after the build is complete, the filesystem after build is retrieved from remote storage to the local CAS. The remote execution service uses the same communication protocol as artifact caches, and may use the same internal storage, but may not implement the extensions used by BuildStream to store full artifacts.
+BuildStream can transmit the results of local builds to remote artifact caches and retrieve them later. The same mechanism is used for remote execution. The filesystem staged before building is stored in a local content-addressable store, which may be the same as the local artifact cache. The command to execute is also stored as an object in local CAS. Both the initial source filesystem and command are transmitted to remote storage specific to the remote execution service, and after the build is complete, the filesystem after build is retrieved from remote storage to the local CAS. The remote execution service uses the same communication protocol as artifact caches, and may use the same internal storage, but may not implement the Remote Asset API used by BuildStream to store full artifacts.
.. image:: images/arch-remote-execution.svg
:align: center
diff --git a/doc/source/using_configuring_cache_server.rst b/doc/source/using_configuring_cache_server.rst
index d31a6661c..e5755a666 100644
--- a/doc/source/using_configuring_cache_server.rst
+++ b/doc/source/using_configuring_cache_server.rst
@@ -161,13 +161,13 @@ Instance with push and requiring client authentication:
.. note::
- BuildStream's artifact cache is an extension of `Google's Remote
- Execution CAS server
+ BuildStream's artifact cache uses `Bazel's Remote Execution CAS and Remote
+ Asset API
<https://github.com/bazelbuild/remote-apis/>`_.
Sometimes, when using Remote Execution, it is useful to run
BuildStream with just a basic CAS server, without using the
- artifact extensions, but BuildStream still needs to store these to
+ Remote Asset API, but BuildStream still needs to store these to
work correctly.
For this scenario, you can add the `--index-only` flag to the above
diff --git a/src/buildstream/_artifactcache.py b/src/buildstream/_artifactcache.py
index 46fc5fbfb..2a5f5faa6 100644
--- a/src/buildstream/_artifactcache.py
+++ b/src/buildstream/_artifactcache.py
@@ -1,6 +1,6 @@
#
# Copyright (C) 2017-2018 Codethink Limited
-# Copyright (C) 2019 Bloomberg Finance LP
+# Copyright (C) 2019-2020 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
@@ -21,112 +21,14 @@
import os
import grpc
-from ._basecache import BaseCache
+from ._assetcache import AssetCache
from ._cas.casremote import BlobNotFound
-from ._exceptions import ArtifactError, CASError, CacheError, CASRemoteError, RemoteError
-from ._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc, artifact_pb2, artifact_pb2_grpc
+from ._exceptions import ArtifactError, AssetCacheError, CASError, CASRemoteError
+from ._protos.buildstream.v2 import artifact_pb2
-from ._remote import BaseRemote
from . import utils
-
-# ArtifactRemote():
-#
-# Facilitates communication with the BuildStream-specific part of
-# artifact remotes.
-#
-class ArtifactRemote(BaseRemote):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.artifact_service = None
-
- def close(self):
- self.artifact_service = None
- super().close()
-
- # _configure_protocols():
- #
- # Configure the protocols used by this remote as part of the
- # remote initialization; Note that this should only be used in
- # Remote.init(), and is expected to fail when called by itself.
- #
- def _configure_protocols(self):
- # Set up artifact stub
- self.artifact_service = artifact_pb2_grpc.ArtifactServiceStub(self.channel)
-
- # _check():
- #
- # Check if this remote provides everything required for the
- # particular kind of remote. This is expected to be called as part
- # of check()
- #
- # Raises:
- # RemoteError: If the upstream has a problem
- #
- def _check(self):
- capabilities_service = buildstream_pb2_grpc.CapabilitiesStub(self.channel)
-
- # Check whether the server supports newer proto based artifact.
- try:
- request = buildstream_pb2.GetCapabilitiesRequest()
- if self.instance_name:
- request.instance_name = self.instance_name
- response = capabilities_service.GetCapabilities(request)
- except grpc.RpcError as e:
- # Check if this remote has the artifact service
- if e.code() == grpc.StatusCode.UNIMPLEMENTED:
- raise RemoteError(
- "Configured remote does not have the BuildStream "
- "capabilities service. Please check remote configuration."
- )
- # Else raise exception with details
- raise RemoteError("Remote initialisation failed with status {}: {}".format(e.code().name, e.details()))
-
- if not response.artifact_capabilities:
- raise RemoteError("Configured remote does not support artifact service")
-
- if self.spec.push and not response.artifact_capabilities.allow_updates:
- raise RemoteError("Artifact server does not allow push")
-
- # get_artifact():
- #
- # Get an artifact proto for a given cache key from the remote.
- #
- # Args:
- # cache_key (str): The artifact cache key. NOTE: This "key"
- # is actually the ref/name and its name in
- # the protocol is inaccurate. You have been warned.
- #
- # Returns:
- # (Artifact): The artifact proto
- #
- # Raises:
- # grpc.RpcError: If someting goes wrong during the request.
- #
- def get_artifact(self, cache_key):
- artifact_request = artifact_pb2.GetArtifactRequest()
- artifact_request.cache_key = cache_key
-
- return self.artifact_service.GetArtifact(artifact_request)
-
- # update_artifact():
- #
- # Update an artifact with the given cache key on the remote with
- # the given proto.
- #
- # Args:
- # cache_key (str): The artifact cache key of the artifact to update.
- # artifact (ArtifactProto): The artifact proto to send.
- #
- # Raises:
- # grpc.RpcError: If someting goes wrong during the request.
- #
- def update_artifact(self, cache_key, artifact):
- update_request = artifact_pb2.UpdateArtifactRequest()
- update_request.cache_key = cache_key
- update_request.artifact.CopyFrom(artifact)
-
- self.artifact_service.UpdateArtifact(update_request)
+REMOTE_ASSET_ARTIFACT_URN_TEMPLATE = "urn:fdc:buildstream.build:2020:artifact:{}"
# An ArtifactCache manages artifacts.
@@ -134,12 +36,10 @@ class ArtifactRemote(BaseRemote):
# Args:
# context (Context): The BuildStream context
#
-class ArtifactCache(BaseCache):
+class ArtifactCache(AssetCache):
spec_name = "artifact_cache_specs"
- spec_error = ArtifactError
config_node_name = "artifacts"
- index_remote_class = ArtifactRemote
def __init__(self, context):
super().__init__(context)
@@ -202,7 +102,7 @@ class ArtifactCache(BaseCache):
def remove(self, ref):
try:
self._remove_ref(ref)
- except CacheError as e:
+ except AssetCacheError as e:
raise ArtifactError("{}".format(e)) from e
# push():
@@ -226,14 +126,18 @@ class ArtifactCache(BaseCache):
index_remotes = [r for r in self._index_remotes[project] if r.push]
storage_remotes = [r for r in self._storage_remotes[project] if r.push]
+ artifact_proto = artifact._get_proto()
+ artifact_digest = self.cas.add_object(buffer=artifact_proto.SerializeToString())
+
pushed = False
+
# First push our files to all storage remotes, so that they
# can perform file checks on their end
for remote in storage_remotes:
remote.init()
element.status("Pushing data from artifact {} -> {}".format(display_key, remote))
- if self._push_artifact_blobs(artifact, remote):
+ if self._push_artifact_blobs(artifact, artifact_digest, remote):
element.info("Pushed data from artifact {} -> {}".format(display_key, remote))
else:
element.info(
@@ -246,7 +150,7 @@ class ArtifactCache(BaseCache):
remote.init()
element.status("Pushing artifact {} -> {}".format(display_key, remote))
- if self._push_artifact_proto(element, artifact, remote):
+ if self._push_artifact_proto(element, artifact, artifact_digest, remote):
element.info("Pushed artifact {} -> {}".format(display_key, remote))
pushed = True
else:
@@ -269,10 +173,13 @@ class ArtifactCache(BaseCache):
# (bool): True if pull was successful, False if artifact was not available
#
def pull(self, element, key, *, pull_buildtrees=False):
- artifact = None
+ artifact_digest = None
display_key = key[: self.context.log_key_length]
project = element._get_project()
+ artifact_name = element.get_artifact_name(key=key)
+ uri = REMOTE_ASSET_ARTIFACT_URN_TEMPLATE.format(artifact_name)
+
errors = []
# Start by pulling our artifact proto, so that we know which
# blobs to pull
@@ -280,23 +187,24 @@ class ArtifactCache(BaseCache):
remote.init()
try:
element.status("Pulling artifact {} <- {}".format(display_key, remote))
- artifact = self._pull_artifact_proto(element, key, remote)
- if artifact:
+ response = remote.fetch_blob([uri])
+ if response:
+ artifact_digest = response.blob_digest
break
element.info("Remote ({}) does not have artifact {} cached".format(remote, display_key))
- except CASError as e:
+ except AssetCacheError as e:
element.warn("Could not pull from remote {}: {}".format(remote, e))
errors.append(e)
- if errors and not artifact:
+ if errors and not artifact_digest:
raise ArtifactError(
"Failed to pull artifact {}".format(display_key), detail="\n".join(str(e) for e in errors)
)
# If we don't have an artifact, we can't exactly pull our
# artifact
- if not artifact:
+ if not artifact_digest:
return False
errors = []
@@ -306,7 +214,7 @@ class ArtifactCache(BaseCache):
try:
element.status("Pulling data for artifact {} <- {}".format(display_key, remote))
- if self._pull_artifact_storage(element, artifact, remote, pull_buildtrees=pull_buildtrees):
+ if self._pull_artifact_storage(element, key, artifact_digest, remote, pull_buildtrees=pull_buildtrees):
element.info("Pulled artifact {} <- {}".format(display_key, remote))
return True
@@ -484,7 +392,7 @@ class ArtifactCache(BaseCache):
# ArtifactError: If we fail to push blobs (*unless* they're
# already there or we run out of space on the server).
#
- def _push_artifact_blobs(self, artifact, remote):
+ def _push_artifact_blobs(self, artifact, artifact_digest, remote):
artifact_proto = artifact._get_proto()
try:
@@ -497,7 +405,8 @@ class ArtifactCache(BaseCache):
except FileNotFoundError:
pass
- digests = []
+ digests = [artifact_digest]
+
if str(artifact_proto.public_data):
digests.append(artifact_proto.public_data)
@@ -526,7 +435,7 @@ class ArtifactCache(BaseCache):
# Args:
# element (Element): The element
# artifact (Artifact): The related artifact being pushed
- # remote (ArtifactRemote): Remote to push to
+ # remote (AssetRemote): Remote to push to
#
# Returns:
# (bool): Whether we pushed the artifact.
@@ -535,33 +444,46 @@ class ArtifactCache(BaseCache):
# ArtifactError: If the push fails for any reason except the
# artifact already existing.
#
- def _push_artifact_proto(self, element, artifact, remote):
+ def _push_artifact_proto(self, element, artifact, artifact_digest, remote):
artifact_proto = artifact._get_proto()
keys = list(utils._deduplicate([artifact_proto.strong_key, artifact_proto.weak_key]))
+ artifact_names = [element.get_artifact_name(key=key) for key in keys]
+ uris = [REMOTE_ASSET_ARTIFACT_URN_TEMPLATE.format(artifact_name) for artifact_name in artifact_names]
- pushed = False
+ try:
+ response = remote.fetch_blob(uris)
+ # Skip push if artifact is already on the server
+ if response and response.blob_digest == artifact_digest:
+ return False
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ raise ArtifactError(
+ "Error checking artifact cache with status {}: {}".format(e.code().name, e.details())
+ )
- for key in keys:
- try:
- remote_artifact = remote.get_artifact(element.get_artifact_name(key=key))
- # Skip push if artifact is already on the server
- if remote_artifact == artifact_proto:
- continue
- except grpc.RpcError as e:
- if e.code() != grpc.StatusCode.NOT_FOUND:
- raise ArtifactError(
- "Error checking artifact cache with status {}: {}".format(e.code().name, e.details())
- )
+ referenced_directories = []
+ if artifact_proto.files:
+ referenced_directories.append(artifact_proto.files)
+ if artifact_proto.buildtree:
+ referenced_directories.append(artifact_proto.buildtree)
+ if artifact_proto.sources:
+ referenced_directories.append(artifact_proto.sources)
- try:
- remote.update_artifact(element.get_artifact_name(key=key), artifact_proto)
- pushed = True
- except grpc.RpcError as e:
- raise ArtifactError("Failed to push artifact with status {}: {}".format(e.code().name, e.details()))
+ referenced_blobs = [log_file.digest for log_file in artifact_proto.logs]
- return pushed
+ try:
+ remote.push_blob(
+ uris,
+ artifact_digest,
+ references_blobs=referenced_blobs,
+ references_directories=referenced_directories,
+ )
+ except grpc.RpcError as e:
+ raise ArtifactError("Failed to push artifact with status {}: {}".format(e.code().name, e.details()))
+
+ return True
# _pull_artifact_storage():
#
@@ -580,7 +502,7 @@ class ArtifactCache(BaseCache):
# ArtifactError: If the pull failed for any reason except the
# blobs not existing on the server.
#
- def _pull_artifact_storage(self, element, artifact, remote, pull_buildtrees=False):
+ def _pull_artifact_storage(self, element, key, artifact_digest, remote, pull_buildtrees=False):
def __pull_digest(digest):
self.cas._fetch_directory(remote, digest)
required_blobs = self.cas.required_blobs_for_directory(digest)
@@ -588,7 +510,21 @@ class ArtifactCache(BaseCache):
if missing_blobs:
self.cas.fetch_blobs(remote, missing_blobs)
+ artifact_name = element.get_artifact_name(key=key)
+
try:
+ # Fetch and parse artifact proto
+ self.cas.fetch_blobs(remote, [artifact_digest])
+ artifact = artifact_pb2.Artifact()
+ with open(self.cas.objpath(artifact_digest), "rb") as f:
+ artifact.ParseFromString(f.read())
+
+ # Write the artifact proto to cache
+ artifact_path = os.path.join(self._basedir, artifact_name)
+ os.makedirs(os.path.dirname(artifact_path), exist_ok=True)
+ with utils.save_file_atomic(artifact_path, mode="wb") as f:
+ f.write(artifact.SerializeToString())
+
if str(artifact.files):
__pull_digest(artifact.files)
@@ -610,57 +546,22 @@ class ArtifactCache(BaseCache):
return True
- # _pull_artifact_proto():
- #
- # Pull an artifact proto from a remote server.
- #
- # Args:
- # element (Element): The element whose artifact to pull.
- # key (str): The specific key for the artifact to pull.
- # remote (ArtifactRemote): The remote to pull from.
- #
- # Returns:
- # (Artifact|None): The artifact proto, or None if the server
- # doesn't have it.
- #
- # Raises:
- # ArtifactError: If the pull fails.
- #
- def _pull_artifact_proto(self, element, key, remote):
- artifact_name = element.get_artifact_name(key=key)
-
- try:
- artifact = remote.get_artifact(artifact_name)
- except grpc.RpcError as e:
- if e.code() != grpc.StatusCode.NOT_FOUND:
- raise ArtifactError("Failed to pull artifact with status {}: {}".format(e.code().name, e.details()))
- return None
-
- # Write the artifact proto to cache
- artifact_path = os.path.join(self._basedir, artifact_name)
- os.makedirs(os.path.dirname(artifact_path), exist_ok=True)
- with utils.save_file_atomic(artifact_path, mode="wb") as f:
- f.write(artifact.SerializeToString())
-
- return artifact
-
# _query_remote()
#
# Args:
# ref (str): The artifact ref
- # remote (ArtifactRemote): The remote we want to check
+ # remote (AssetRemote): The remote we want to check
#
# Returns:
# (bool): True if the ref exists in the remote, False otherwise.
#
def _query_remote(self, ref, remote):
- request = artifact_pb2.GetArtifactRequest()
- request.cache_key = ref
+ uri = REMOTE_ASSET_ARTIFACT_URN_TEMPLATE.format(ref)
+
try:
- remote.artifact_service.GetArtifact(request)
+ response = remote.fetch_blob([uri])
+ return bool(response)
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.NOT_FOUND:
raise ArtifactError("Error when querying with status {}: {}".format(e.code().name, e.details()))
return False
-
- return True
diff --git a/src/buildstream/_basecache.py b/src/buildstream/_assetcache.py
index 91eae6ae4..68f7fd732 100644
--- a/src/buildstream/_basecache.py
+++ b/src/buildstream/_assetcache.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2019 Bloomberg Finance LP
+# Copyright (C) 2019-2020 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
@@ -20,32 +20,251 @@ import os
from fnmatch import fnmatch
from itertools import chain
from typing import TYPE_CHECKING
+import grpc
from . import utils
from . import _yaml
from ._cas import CASRemote
from ._message import Message, MessageType
-from ._exceptions import LoadError, RemoteError, CacheError
-from ._remote import RemoteSpec, RemoteType
+from ._exceptions import AssetCacheError, LoadError, RemoteError
+from ._remote import BaseRemote, RemoteSpec, RemoteType
+from ._protos.build.bazel.remote.asset.v1 import remote_asset_pb2, remote_asset_pb2_grpc
+from ._protos.google.rpc import code_pb2
if TYPE_CHECKING:
from typing import Optional, Type
from ._exceptions import BstError
- from ._remote import BaseRemote
-# Base Cache for Caches to derive from
+class AssetRemote(BaseRemote):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.fetch_service = None
+ self.push_service = None
+
+ def close(self):
+ self.fetch_service = None
+ self.push_service = None
+ super().close()
+
+ def _configure_protocols(self):
+ # set up remote asset stubs
+ self.fetch_service = remote_asset_pb2_grpc.FetchStub(self.channel)
+ self.push_service = remote_asset_pb2_grpc.PushStub(self.channel)
+
+ # _check():
+ #
+ # Check if this remote provides everything required for the
+ # particular kind of remote. This is expected to be called as part
+ # of check()
+ #
+ # Raises:
+ # RemoteError: If the upstream has a problem
+ #
+ def _check(self):
+ request = remote_asset_pb2.FetchBlobRequest()
+ if self.instance_name:
+ request.instance_name = self.instance_name
+
+ try:
+ self.fetch_service.FetchBlob(request)
+ except grpc.RpcError as e:
+ if e.code() == grpc.StatusCode.INVALID_ARGUMENT:
+ # Expected error as the request doesn't specify any URIs.
+ pass
+ elif e.code() == grpc.StatusCode.UNIMPLEMENTED:
+ raise RemoteError(
+ "Configured remote does not implement the Remote Asset "
+ "Fetch service. Please check remote configuration."
+ )
+ else:
+ raise RemoteError("Remote initialisation failed with status {}: {}".format(e.code().name, e.details()))
+
+ if self.spec.push:
+ request = remote_asset_pb2.PushBlobRequest()
+ if self.instance_name:
+ request.instance_name = self.instance_name
+
+ try:
+ self.push_service.PushBlob(request)
+ except grpc.RpcError as e:
+ if e.code() == grpc.StatusCode.INVALID_ARGUMENT:
+ # Expected error as the request doesn't specify any URIs.
+ pass
+ elif e.code() == grpc.StatusCode.UNIMPLEMENTED:
+ raise RemoteError(
+ "Configured remote does not implement the Remote Asset "
+ "Push service. Please check remote configuration."
+ )
+ else:
+ raise RemoteError(
+ "Remote initialisation failed with status {}: {}".format(e.code().name, e.details())
+ )
+
+ # fetch_blob():
+ #
+ # Resolve URIs to a CAS blob digest.
+ #
+ # Args:
+ # uris (list of str): The URIs to resolve. Multiple URIs should represent
+ # the same content available at different locations.
+ # qualifiers (list of Qualifier): Optional qualifiers sub-specifying the
+ # content to fetch.
+ #
+ # Returns
+ # (FetchBlobResponse): The asset server response or None if the resource
+ # is not available.
+ #
+ # Raises:
+ # AssetCacheError: If the upstream has a problem
+ #
+ def fetch_blob(self, uris, *, qualifiers=None):
+ request = remote_asset_pb2.FetchBlobRequest()
+ if self.instance_name:
+ request.instance_name = self.instance_name
+ request.uris.extend(uris)
+ if qualifiers:
+ request.qualifiers.extend(qualifiers)
+
+ try:
+ response = self.fetch_service.FetchBlob(request)
+ except grpc.RpcError as e:
+ if e.code() == grpc.StatusCode.NOT_FOUND:
+ return None
+
+ raise AssetCacheError("FetchBlob failed with status {}: {}".format(e.code().name, e.details())) from e
+
+ if response.status.code == code_pb2.NOT_FOUND:
+ return None
+
+ if response.status.code != code_pb2.OK:
+ raise AssetCacheError("FetchBlob failed with response status {}".format(response.status.code))
+
+ return response
+
+ # fetch_directory():
+ #
+ # Resolve URIs to a CAS Directory digest.
+ #
+ # Args:
+ # uris (list of str): The URIs to resolve. Multiple URIs should represent
+ # the same content available at different locations.
+ # qualifiers (list of Qualifier): Optional qualifiers sub-specifying the
+ # content to fetch.
+ #
+ # Returns
+ # (FetchDirectoryResponse): The asset server response or None if the resource
+ # is not available.
+ #
+ # Raises:
+ # AssetCacheError: If the upstream has a problem
+ #
+ def fetch_directory(self, uris, *, qualifiers=None):
+ request = remote_asset_pb2.FetchDirectoryRequest()
+ if self.instance_name:
+ request.instance_name = self.instance_name
+ request.uris.extend(uris)
+ if qualifiers:
+ request.qualifiers.extend(qualifiers)
+
+ try:
+ response = self.fetch_service.FetchDirectory(request)
+ except grpc.RpcError as e:
+ if e.code() == grpc.StatusCode.NOT_FOUND:
+ return None
+
+ raise AssetCacheError("FetchDirectory failed with status {}: {}".format(e.code().name, e.details())) from e
+
+ if response.status.code == code_pb2.NOT_FOUND:
+ return None
+
+ if response.status.code != code_pb2.OK:
+ raise AssetCacheError("FetchDirectory failed with response status {}".format(response.status.code))
+
+ return response
+
+ # push_blob():
+ #
+ # Associate a CAS blob digest to URIs.
+ #
+ # Args:
+ # uris (list of str): The URIs to associate with the blob digest.
+ # blob_digest (Digest): The CAS blob to associate.
+ # qualifiers (list of Qualifier): Optional qualifiers sub-specifying the
+ # content that is being pushed.
+ # references_blobs (list of Digest): Referenced blobs that need to not expire
+ # before expiration of this association.
+ # references_directories (list of Digest): Referenced directories that need to not expire
+ # before expiration of this association.
+ #
+ # Raises:
+ # AssetCacheError: If the upstream has a problem
+ #
+ def push_blob(self, uris, blob_digest, *, qualifiers=None, references_blobs=None, references_directories=None):
+ request = remote_asset_pb2.PushBlobRequest()
+ if self.instance_name:
+ request.instance_name = self.instance_name
+ request.uris.extend(uris)
+ request.blob_digest.CopyFrom(blob_digest)
+ if qualifiers:
+ request.qualifiers.extend(qualifiers)
+ if references_blobs:
+ request.references_blobs.extend(references_blobs)
+ if references_directories:
+ request.references_directories.extend(references_directories)
+
+ try:
+ self.push_service.PushBlob(request)
+ except grpc.RpcError as e:
+ raise AssetCacheError("PushBlob failed with status {}: {}".format(e.code().name, e.details())) from e
+
+ # push_directory():
+ #
+ # Associate a CAS Directory digest to URIs.
+ #
+ # Args:
+ # uris (list of str): The URIs to associate with the blob digest.
+ # directory_digest (Digest): The CAS Direcdtory to associate.
+ # qualifiers (list of Qualifier): Optional qualifiers sub-specifying the
+ # content that is being pushed.
+ # references_blobs (list of Digest): Referenced blobs that need to not expire
+ # before expiration of this association.
+ # references_directories (list of Digest): Referenced directories that need to not expire
+ # before expiration of this association.
+ #
+ # Raises:
+ # AssetCacheError: If the upstream has a problem
+ #
+ def push_directory(
+ self, uris, directory_digest, *, qualifiers=None, references_blobs=None, references_directories=None
+ ):
+ request = remote_asset_pb2.PushDirectoryRequest()
+ if self.instance_name:
+ request.instance_name = self.instance_name
+ request.uris.extend(uris)
+ request.root_directory_digest.CopyFrom(directory_digest)
+ if qualifiers:
+ request.qualifiers.extend(qualifiers)
+ if references_blobs:
+ request.references_blobs.extend(references_blobs)
+ if references_directories:
+ request.references_directories.extend(references_directories)
+
+ try:
+ self.push_service.PushDirectory(request)
+ except grpc.RpcError as e:
+ raise AssetCacheError("PushDirectory failed with status {}: {}".format(e.code().name, e.details())) from e
+
+
+# Base Asset Cache for Caches to derive from
#
-class BaseCache:
+class AssetCache:
# None of these should ever be called in the base class, but this appeases
# pylint to some degree
spec_name = None # type: str
- spec_error = None # type: Type[BstError]
config_node_name = None # type: str
- index_remote_class = None # type: Type[BaseRemote]
- storage_remote_class = CASRemote # type: Type[BaseRemote]
def __init__(self, context):
self.context = context
@@ -76,7 +295,7 @@ class BaseCache:
# release_resources():
#
- # Release resources used by BaseCache.
+ # Release resources used by AssetCache.
#
def release_resources(self):
self.close_grpc_channels()
@@ -274,12 +493,11 @@ class BaseCache:
# are accessible.
#
# Args:
- # on_failure (Callable[[self.remote_class,Exception],None]):
+ # on_failure (Callable[[Remote,Exception],None]):
# What do do when a remote doesn't respond.
#
# Returns:
- # (Dict[RemoteSpec, self.remote_class], Dict[RemoteSpec,
- # self.remote_class]) -
+ # (Dict[RemoteSpec, AssetRemote], Dict[RemoteSpec, CASRemote]) -
# The created remote instances, index first, storage last.
#
def _create_remote_instances(self, *, on_failure=None):
@@ -347,10 +565,10 @@ class BaseCache:
index = None
storage = None
if remote_spec.type in [RemoteType.INDEX, RemoteType.ALL]:
- index = self.index_remote_class(remote_spec) # pylint: disable=not-callable
+ index = AssetRemote(remote_spec) # pylint: disable=not-callable
index.check()
if remote_spec.type in [RemoteType.STORAGE, RemoteType.ALL]:
- storage = self.storage_remote_class(remote_spec, self.cas)
+ storage = CASRemote(remote_spec, self.cas)
storage.check()
return (index, storage)
@@ -431,13 +649,13 @@ class BaseCache:
# ref (str): The ref to remove
#
# Raises:
- # (CASCacheError): If the ref didnt exist, or a system error
+ # (AssetCacheError): If the ref didnt exist, or a system error
# occurred while removing it
#
def _remove_ref(self, ref):
try:
utils._remove_path_with_parents(self._basedir, ref)
except FileNotFoundError as e:
- raise CacheError("Could not find ref '{}'".format(ref)) from e
+ raise AssetCacheError("Could not find ref '{}'".format(ref)) from e
except OSError as e:
- raise CacheError("System error while removing ref '{}': {}".format(ref, e)) from e
+ raise AssetCacheError("System error while removing ref '{}': {}".format(ref, e)) from e
diff --git a/src/buildstream/_cas/casdprocessmanager.py b/src/buildstream/_cas/casdprocessmanager.py
index 637c4e0b1..32e4cce63 100644
--- a/src/buildstream/_cas/casdprocessmanager.py
+++ b/src/buildstream/_cas/casdprocessmanager.py
@@ -29,6 +29,7 @@ import psutil
import grpc
+from .._protos.build.bazel.remote.asset.v1 import remote_asset_pb2_grpc
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2_grpc
from .._protos.build.buildgrid import local_cas_pb2_grpc
from .._protos.google.bytestream import bytestream_pb2_grpc
@@ -236,6 +237,8 @@ class CASDChannel:
self._bytestream = None
self._casd_cas = None
self._local_cas = None
+ self._asset_fetch = None
+ self._asset_push = None
self._casd_pid = casd_pid
def _establish_connection(self):
@@ -264,6 +267,8 @@ class CASDChannel:
self._bytestream = bytestream_pb2_grpc.ByteStreamStub(self._casd_channel)
self._casd_cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self._casd_channel)
self._local_cas = local_cas_pb2_grpc.LocalContentAddressableStorageStub(self._casd_channel)
+ self._asset_fetch = remote_asset_pb2_grpc.FetchStub(self._casd_channel)
+ self._asset_push = remote_asset_pb2_grpc.PushStub(self._casd_channel)
# get_cas():
#
@@ -288,6 +293,24 @@ class CASDChannel:
self._establish_connection()
return self._bytestream
+ # get_asset_fetch():
+ #
+ # Return Remote Asset Fetch stub for buildbox-casd channel.
+ #
+ def get_asset_fetch(self):
+ if self._casd_channel is None:
+ self._establish_connection()
+ return self._asset_fetch
+
+ # get_asset_push():
+ #
+ # Return Remote Asset Push stub for buildbox-casd channel.
+ #
+ def get_asset_push(self):
+ if self._casd_channel is None:
+ self._establish_connection()
+ return self._asset_push
+
# is_closed():
#
# Return whether this connection is closed or not.
@@ -302,6 +325,8 @@ class CASDChannel:
def close(self):
if self.is_closed():
return
+ self._asset_push = None
+ self._asset_fetch = None
self._local_cas = None
self._casd_cas = None
self._bytestream = None
diff --git a/src/buildstream/_cas/casremote.py b/src/buildstream/_cas/casremote.py
index 93f4e500c..656c08a36 100644
--- a/src/buildstream/_cas/casremote.py
+++ b/src/buildstream/_cas/casremote.py
@@ -15,6 +15,8 @@
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
+import grpc
+
from .._protos.google.rpc import code_pb2
from .._protos.build.buildgrid import local_cas_pb2
@@ -54,17 +56,25 @@ class CASRemote(BaseRemote):
#
def _configure_protocols(self):
local_cas = self.cascache.get_local_cas()
- request = local_cas_pb2.GetInstanceNameForRemoteRequest()
- request.url = self.spec.url
+ request = local_cas_pb2.GetInstanceNameForRemotesRequest()
+ cas_endpoint = request.content_addressable_storage
+ cas_endpoint.url = self.spec.url
if self.spec.instance_name:
- request.instance_name = self.spec.instance_name
+ cas_endpoint.instance_name = self.spec.instance_name
if self.server_cert:
- request.server_cert = self.server_cert
+ cas_endpoint.server_cert = self.server_cert
if self.client_key:
- request.client_key = self.client_key
+ cas_endpoint.client_key = self.client_key
if self.client_cert:
- request.client_cert = self.client_cert
- response = local_cas.GetInstanceNameForRemote(request)
+ cas_endpoint.client_cert = self.client_cert
+ try:
+ response = local_cas.GetInstanceNameForRemotes(request)
+ except grpc.RpcError as e:
+ if e.code() == grpc.StatusCode.UNIMPLEMENTED:
+ raise CASRemoteError(
+ "Unsupported buildbox-casd version: GetInstanceNameForRemotes unimplemented"
+ ) from e
+ raise
self.local_cas_instance_name = response.instance_name
# push_message():
diff --git a/src/buildstream/_cas/casserver.py b/src/buildstream/_cas/casserver.py
index 71d7d9071..013fb07dd 100644
--- a/src/buildstream/_cas/casserver.py
+++ b/src/buildstream/_cas/casserver.py
@@ -1,5 +1,6 @@
#
# Copyright (C) 2018 Codethink Limited
+# Copyright (C) 2020 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
@@ -26,22 +27,17 @@ import signal
import sys
import grpc
-from google.protobuf.message import DecodeError
import click
+from .._protos.build.bazel.remote.asset.v1 import remote_asset_pb2_grpc
from .._protos.build.bazel.remote.execution.v2 import (
remote_execution_pb2,
remote_execution_pb2_grpc,
)
from .._protos.google.bytestream import bytestream_pb2_grpc
-from .._protos.build.buildgrid import local_cas_pb2
from .._protos.buildstream.v2 import (
buildstream_pb2,
buildstream_pb2_grpc,
- artifact_pb2,
- artifact_pb2_grpc,
- source_pb2,
- source_pb2_grpc,
)
# Note: We'd ideally like to avoid imports from the core codebase as
@@ -115,7 +111,6 @@ def create_server(repo, *, enable_push, quota, index_only, log_level=LogLevel.Le
try:
root = os.path.abspath(repo)
- sourcedir = os.path.join(root, "source_protos")
# Use max_workers default from Python 3.5+
max_workers = (os.cpu_count() or 1) * 5
@@ -132,23 +127,16 @@ def create_server(repo, *, enable_push, quota, index_only, log_level=LogLevel.Le
remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(_CapabilitiesServicer(), server)
+ # Remote Asset API
+ remote_asset_pb2_grpc.add_FetchServicer_to_server(_FetchServicer(casd_channel), server)
+ if enable_push:
+ remote_asset_pb2_grpc.add_PushServicer_to_server(_PushServicer(casd_channel), server)
+
+ # BuildStream protocols
buildstream_pb2_grpc.add_ReferenceStorageServicer_to_server(
_ReferenceStorageServicer(casd_channel, root, enable_push=enable_push), server
)
- artifact_pb2_grpc.add_ArtifactServiceServicer_to_server(
- _ArtifactServicer(casd_channel, root, update_cas=not index_only), server
- )
-
- source_pb2_grpc.add_SourceServiceServicer_to_server(_SourceServicer(sourcedir), server)
-
- # Create up reference storage and artifact capabilities
- artifact_capabilities = buildstream_pb2.ArtifactCapabilities(allow_updates=enable_push)
- source_capabilities = buildstream_pb2.SourceCapabilities(allow_updates=enable_push)
- buildstream_pb2_grpc.add_CapabilitiesServicer_to_server(
- _BuildStreamCapabilitiesServicer(artifact_capabilities, source_capabilities), server
- )
-
yield server
finally:
@@ -295,6 +283,48 @@ class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
return response
+class _FetchServicer(remote_asset_pb2_grpc.FetchServicer):
+ def __init__(self, casd):
+ super().__init__()
+ self.fetch = casd.get_asset_fetch()
+ self.logger = logging.getLogger("buildstream._cas.casserver")
+
+ def FetchBlob(self, request, context):
+ self.logger.debug("FetchBlob '%s'", request.uris)
+ try:
+ return self.fetch.FetchBlob(request)
+ except grpc.RpcError as err:
+ context.abort(err.code(), err.details())
+
+ def FetchDirectory(self, request, context):
+ self.logger.debug("FetchDirectory '%s'", request.uris)
+ try:
+ return self.fetch.FetchDirectory(request)
+ except grpc.RpcError as err:
+ context.abort(err.code(), err.details())
+
+
+class _PushServicer(remote_asset_pb2_grpc.PushServicer):
+ def __init__(self, casd):
+ super().__init__()
+ self.push = casd.get_asset_push()
+ self.logger = logging.getLogger("buildstream._cas.casserver")
+
+ def PushBlob(self, request, context):
+ self.logger.debug("PushBlob '%s'", request.uris)
+ try:
+ return self.push.PushBlob(request)
+ except grpc.RpcError as err:
+ context.abort(err.code(), err.details())
+
+ def PushDirectory(self, request, context):
+ self.logger.debug("PushDirectory '%s'", request.uris)
+ try:
+ return self.push.PushDirectory(request)
+ except grpc.RpcError as err:
+ context.abort(err.code(), err.details())
+
+
class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer):
def __init__(self, casd, cas_root, *, enable_push):
super().__init__()
@@ -393,201 +423,3 @@ class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer):
response.allow_updates = self.enable_push
return response
-
-
-class _ArtifactServicer(artifact_pb2_grpc.ArtifactServiceServicer):
- def __init__(self, casd, root, *, update_cas=True):
- super().__init__()
- self.cas = casd.get_cas()
- self.local_cas = casd.get_local_cas()
- self.root = root
- self.artifactdir = os.path.join(root, "artifacts", "refs")
- self.update_cas = update_cas
- self.logger = logging.getLogger("buildstream._cas.casserver")
-
- # object_path():
- #
- # Get the path to an object's file.
- #
- # Args:
- # digest - The digest of the object.
- #
- # Returns:
- # str - The path to the object's file.
- #
- def object_path(self, digest) -> str:
- return os.path.join(self.root, "cas", "objects", digest.hash[:2], digest.hash[2:])
-
- # resolve_digest():
- #
- # Read the directory corresponding to a digest.
- #
- # Args:
- # digest - The digest corresponding to a directory.
- #
- # Returns:
- # remote_execution_pb2.Directory - The directory.
- #
- # Raises:
- # FileNotFoundError - If the digest object doesn't exist.
- def resolve_digest(self, digest):
- directory = remote_execution_pb2.Directory()
- with open(self.object_path(digest), "rb") as f:
- directory.ParseFromString(f.read())
- return directory
-
- def GetArtifact(self, request, context):
- self.logger.info("'%s'", request.cache_key)
- artifact_path = os.path.join(self.artifactdir, request.cache_key)
- if not os.path.exists(artifact_path):
- context.abort(grpc.StatusCode.NOT_FOUND, "Artifact proto not found")
-
- artifact = artifact_pb2.Artifact()
- with open(artifact_path, "rb") as f:
- artifact.ParseFromString(f.read())
-
- os.utime(artifact_path)
-
- # Artifact-only servers will not have blobs on their system,
- # so we can't reasonably update their mtimes. Instead, we exit
- # early, and let the CAS server deal with its blobs.
- #
- # FIXME: We could try to run FindMissingBlobs on the other
- # server. This is tricky to do from here, of course,
- # because we don't know who the other server is, but
- # the client could be smart about it - but this might
- # make things slower.
- #
- # It needs some more thought...
- if not self.update_cas:
- return artifact
-
- # Now update mtimes of files present.
- try:
-
- if str(artifact.files):
- request = local_cas_pb2.FetchTreeRequest()
- request.root_digest.CopyFrom(artifact.files)
- request.fetch_file_blobs = True
- self.local_cas.FetchTree(request)
-
- if str(artifact.buildtree):
- try:
- request = local_cas_pb2.FetchTreeRequest()
- request.root_digest.CopyFrom(artifact.buildtree)
- request.fetch_file_blobs = True
- self.local_cas.FetchTree(request)
- except grpc.RpcError as err:
- # buildtrees might not be there
- if err.code() != grpc.StatusCode.NOT_FOUND:
- raise
-
- if str(artifact.public_data):
- request = remote_execution_pb2.FindMissingBlobsRequest()
- d = request.blob_digests.add()
- d.CopyFrom(artifact.public_data)
- self.cas.FindMissingBlobs(request)
-
- request = remote_execution_pb2.FindMissingBlobsRequest()
- for log_file in artifact.logs:
- d = request.blob_digests.add()
- d.CopyFrom(log_file.digest)
- self.cas.FindMissingBlobs(request)
-
- except grpc.RpcError as err:
- if err.code() == grpc.StatusCode.NOT_FOUND:
- os.unlink(artifact_path)
- context.abort(grpc.StatusCode.NOT_FOUND, "Artifact files incomplete")
- else:
- context.abort(grpc.StatusCode.NOT_FOUND, "Artifact files not valid")
-
- return artifact
-
- def UpdateArtifact(self, request, context):
- self.logger.info("'%s' -> '%s'", request.artifact, request.cache_key)
- artifact = request.artifact
-
- if self.update_cas:
- # Check that the files specified are in the CAS
- if str(artifact.files):
- self._check_directory("files", artifact.files, context)
-
- # Unset protocol buffers don't evaluated to False but do return empty
- # strings, hence str()
- if str(artifact.public_data):
- self._check_file("public data", artifact.public_data, context)
-
- for log_file in artifact.logs:
- self._check_file("log digest", log_file.digest, context)
-
- # Add the artifact proto to the cas
- artifact_path = os.path.join(self.artifactdir, request.cache_key)
- os.makedirs(os.path.dirname(artifact_path), exist_ok=True)
- with save_file_atomic(artifact_path, mode="wb") as f:
- f.write(artifact.SerializeToString())
-
- return artifact
-
- def _check_directory(self, name, digest, context):
- try:
- self.resolve_digest(digest)
- except FileNotFoundError:
- self.logger.warning("Artifact %s specified but no files found", name)
- context.abort(grpc.StatusCode.FAILED_PRECONDITION, "Artifact {} specified but no files found".format(name))
- except DecodeError:
- self.logger.warning("Artifact %s specified but directory not found", name)
- context.abort(
- grpc.StatusCode.FAILED_PRECONDITION, "Artifact {} specified but directory not found".format(name)
- )
-
- def _check_file(self, name, digest, context):
- if not os.path.exists(self.object_path(digest)):
- context.abort(grpc.StatusCode.FAILED_PRECONDITION, "Artifact {} specified but not found".format(name))
-
-
-class _BuildStreamCapabilitiesServicer(buildstream_pb2_grpc.CapabilitiesServicer):
- def __init__(self, artifact_capabilities, source_capabilities):
- self.artifact_capabilities = artifact_capabilities
- self.source_capabilities = source_capabilities
-
- def GetCapabilities(self, request, context):
- response = buildstream_pb2.ServerCapabilities()
- response.artifact_capabilities.CopyFrom(self.artifact_capabilities)
- response.source_capabilities.CopyFrom(self.source_capabilities)
- return response
-
-
-class _SourceServicer(source_pb2_grpc.SourceServiceServicer):
- def __init__(self, sourcedir):
- self.sourcedir = sourcedir
- self.logger = logging.getLogger("buildstream._cas.casserver")
-
- def GetSource(self, request, context):
- self.logger.info("'%s'", request.cache_key)
- try:
- source_proto = self._get_source(request.cache_key)
- except FileNotFoundError:
- context.abort(grpc.StatusCode.NOT_FOUND, "Source not found")
- except DecodeError:
- context.abort(grpc.StatusCode.NOT_FOUND, "Sources gives invalid directory")
-
- return source_proto
-
- def UpdateSource(self, request, context):
- self.logger.info("'%s' -> '%s'", request.source, request.cache_key)
- self._set_source(request.cache_key, request.source)
- return request.source
-
- def _get_source(self, cache_key):
- path = os.path.join(self.sourcedir, cache_key)
- source_proto = source_pb2.Source()
- with open(path, "r+b") as f:
- source_proto.ParseFromString(f.read())
- os.utime(path)
- return source_proto
-
- def _set_source(self, cache_key, source_proto):
- path = os.path.join(self.sourcedir, cache_key)
- os.makedirs(os.path.dirname(path), exist_ok=True)
- with save_file_atomic(path, "w+b") as f:
- f.write(source_proto.SerializeToString())
diff --git a/src/buildstream/_exceptions.py b/src/buildstream/_exceptions.py
index 755da3a9d..80008b884 100644
--- a/src/buildstream/_exceptions.py
+++ b/src/buildstream/_exceptions.py
@@ -171,11 +171,11 @@ class SandboxError(BstError):
super().__init__(message, detail=detail, domain=ErrorDomain.SANDBOX, reason=reason)
-# CacheError
+# AssetCacheError
#
# Raised when errors are encountered in either type of cache
#
-class CacheError(BstError):
+class AssetCacheError(BstError):
def __init__(self, message, detail=None, reason=None):
super().__init__(message, detail=detail, domain=ErrorDomain.SANDBOX, reason=reason)
diff --git a/src/buildstream/_protos/build/bazel/remote/asset/__init__.py b/src/buildstream/_protos/build/bazel/remote/asset/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/asset/__init__.py
diff --git a/src/buildstream/_protos/build/bazel/remote/asset/v1/__init__.py b/src/buildstream/_protos/build/bazel/remote/asset/v1/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/asset/v1/__init__.py
diff --git a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto
new file mode 100644
index 000000000..60be76411
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto
@@ -0,0 +1,445 @@
+// Copyright 2020 The Bazel Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package build.bazel.remote.asset.v1;
+
+import "build/bazel/remote/execution/v2/remote_execution.proto";
+import "google/api/annotations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Build.Bazel.Remote.Asset.v1";
+option go_package = "remoteasset";
+option java_multiple_files = true;
+option java_outer_classname = "RemoteAssetProto";
+option java_package = "build.bazel.remote.asset.v1";
+option objc_class_prefix = "RA";
+
+// The Remote Asset API provides a mapping from a URI and Qualifiers to
+// Digests.
+//
+// Multiple URIs may be used to refer to the same content. For example, the
+// same tarball may exist at multiple mirrors and thus be retrievable from
+// multiple URLs. When URLs are used, these should refer to actual content as
+// Fetch service implementations may choose to fetch the content directly
+// from the origin. For example, the HEAD of a git repository's active branch
+// can be referred to as:
+//
+// uri: https://github.com/bazelbuild/remote-apis.git
+//
+// URNs may be used to strongly identify content, for instance by using the
+// uuid namespace identifier: urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6.
+// This is most applicable to named content that is Push'd, where the URN
+// serves as an agreed-upon key, but carries no other inherent meaning.
+//
+// Service implementations may choose to support only URLs, only URNs for
+// Push'd content, only other URIs for which the server and client agree upon
+// semantics of, or any mixture of the above.
+
+// Qualifiers are used to disambiguate or sub-select content that shares a URI.
+// This may include specifying a particular commit or branch, in the case of
+// URIs referencing a repository; they could also be used to specify a
+// particular subdirectory of a repository or tarball. Qualifiers may also be
+// used to ensure content matches what the client expects, even when there is
+// no ambiguity to be had - for example, a qualifier specifying a checksum
+// value.
+//
+// In cases where the semantics of the request are not immediately clear from
+// the URL and/or qualifiers - e.g. dictated by URL scheme - it is recommended
+// to use an additional qualifier to remove the ambiguity. The `resource_type`
+// qualifier is recommended for this purpose.
+//
+// Qualifiers may be supplied in any order.
+message Qualifier {
+ // The "name" of the qualifier, for example "resource_type".
+ // No separation is made between 'standard' and 'nonstandard'
+ // qualifiers, in accordance with https://tools.ietf.org/html/rfc6648,
+ // however implementers *SHOULD* take care to avoid ambiguity.
+ string name = 1;
+
+ // The "value" of the qualifier. Semantics will be dictated by the name.
+ string value = 2;
+}
+
+// The Fetch service resolves or fetches assets referenced by URI and
+// Qualifiers, returning a Digest for the content in
+// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service Fetch {
+ // Resolve or fetch referenced assets, making them available to the caller and
+ // other consumers in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ //
+ // Servers *MAY* fetch content that they do not already have cached, for any
+ // URLs they support.
+ //
+ // Servers *SHOULD* ensure that referenced files are present in the CAS at the
+ // time of the response, and (if supported) that they will remain available
+ // for a reasonable period of time. The TTLs of the referenced blobs *SHOULD*
+ // be increased if necessary and applicable.
+ // In the event that a client receives a reference to content that is no
+ // longer present, it *MAY* re-issue the request with
+ // `oldest_content_accepted` set to a more recent timestamp than the original
+ // attempt, to induce a re-fetch from origin.
+ //
+ // Servers *MAY* cache fetched content and reuse it for subsequent requests,
+ // subject to `oldest_content_accepted`.
+ //
+ // Servers *MAY* support the complementary [Push][build.bazel.remote.asset.v1.Push]
+ // API and allow content to be directly inserted for use in future fetch
+ // responses.
+ //
+ // Servers *MUST* ensure Fetch'd content matches all the specified
+ // qualifiers except in the case of previously Push'd resources, for which
+ // the server *MAY* trust the pushing client to have set the qualifiers
+ // correctly, without validation.
+ //
+ // Servers not implementing the complementary [Push][build.bazel.remote.asset.v1.Push]
+ // API *MUST* reject requests containing qualifiers it does not support.
+ //
+ // Servers *MAY* transform assets as part of the fetch. For example a
+ // tarball fetched by [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]
+ // might be unpacked, or a Git repository
+ // fetched by [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]
+ // might be passed through `git-archive`.
+ //
+ // Errors handling the requested assets will be returned as gRPC Status errors
+ // here; errors outside the server's control will be returned inline in the
+ // `status` field of the response (see comment there for details).
+ // The possible RPC errors include:
+ // * `INVALID_ARGUMENT`: One or more arguments were invalid, such as a
+ // qualifier that is not supported by the server.
+ // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
+ // perform the requested operation. The client may retry after a delay.
+ // * `UNAVAILABLE`: Due to a transient condition the operation could not be
+ // completed. The client should retry.
+ // * `INTERNAL`: An internal error occurred while performing the operation.
+ // The client should retry.
+ // * `DEADLINE_EXCEEDED`: The fetch could not be completed within the given
+ // RPC deadline. The client should retry for at least as long as the value
+ // provided in `timeout` field of the request.
+ //
+ // In the case of unsupported qualifiers, the server *SHOULD* additionally
+ // send a [BadRequest][google.rpc.BadRequest] error detail where, for each
+ // unsupported qualifier, there is a `FieldViolation` with a `field` of
+ // `qualifiers.name` and a `description` of `"{qualifier}" not supported`
+ // indicating the name of the unsupported qualifier.
+ rpc FetchBlob(FetchBlobRequest) returns (FetchBlobResponse) {
+ option (google.api.http) = { post: "/v1/{instance_name=**}/assets:fetchBlob" body: "*" };
+ }
+ rpc FetchDirectory(FetchDirectoryRequest) returns (FetchDirectoryResponse) {
+ option (google.api.http) = { post: "/v1/{instance_name=**}/assets:fetchDirectory" body: "*" };
+ }
+}
+
+// A request message for
+// [Fetch.FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob].
+message FetchBlobRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The timeout for the underlying fetch, if content needs to be retrieved from
+ // origin.
+ //
+ // If unset, the server *MAY* apply an implementation-defined timeout.
+ //
+ // If set, and the user-provided timeout exceeds the RPC deadline, the server
+ // *SHOULD* keep the fetch going after the RPC completes, to be made
+ // available for future Fetch calls. The server may also enforce (via clamping
+ // and/or an INVALID_ARGUMENT error) implementation-defined minimum and
+ // maximum timeout values.
+ //
+ // If this timeout is exceeded on an attempt to retrieve content from origin
+ // the client will receive DEADLINE_EXCEEDED in [FetchBlobResponse.status].
+ google.protobuf.Duration timeout = 2;
+
+ // The oldest content the client is willing to accept, as measured from the
+ // time it was Push'd or when the underlying retrieval from origin was
+ // started.
+ // Upon retries of Fetch requests that cannot be completed within a single
+ // RPC, clients *SHOULD* provide the same value for subsequent requests as the
+ // original, to simplify combining the request with the previous attempt.
+ //
+ // If unset, the client *SHOULD* accept content of any age.
+ google.protobuf.Timestamp oldest_content_accepted = 3;
+
+ // The URI(s) of the content to fetch. These may be resources that the server
+ // can directly fetch from origin, in which case multiple URIs *SHOULD*
+ // represent the same content available at different locations (such as an
+ // origin and secondary mirrors). These may also be URIs for content known to
+ // the server through other mechanisms, e.g. pushed via the [Push][build.bazel.remote.asset.v1.Push]
+ // service.
+ //
+ // Clients *MUST* supply at least one URI. Servers *MAY* match any one of the
+ // supplied URIs.
+ repeated string uris = 4;
+
+ // Qualifiers sub-specifying the content to fetch - see comments on
+ // [Qualifier][build.bazel.remote.asset.v1.Qualifier].
+ // The same qualifiers apply to all URIs.
+ //
+ // Specified qualifier names *MUST* be unique.
+ repeated Qualifier qualifiers = 5;
+}
+
+// A response message for
+// [Fetch.FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob].
+message FetchBlobResponse {
+ // If the status has a code other than `OK`, it indicates that the operation
+ // was unable to be completed for reasons outside the servers' control.
+ // The possible fetch errors include:
+ // * `DEADLINE_EXCEEDED`: The operation could not be completed within the
+ // specified timeout.
+ // * `NOT_FOUND`: The requested asset was not found at the specified location.
+ // * `PERMISSION_DENIED`: The request was rejected by a remote server, or
+ // requested an asset from a disallowed origin.
+ // * `ABORTED`: The operation could not be completed, typically due to a
+ // failed consistency check.
+ google.rpc.Status status = 1;
+
+ // The uri from the request that resulted in a successful retrieval, or from
+ // which the error indicated in `status` was obtained.
+ string uri = 2;
+
+ // Any qualifiers known to the server and of interest to clients.
+ repeated Qualifier qualifiers = 3;
+
+ // A minimum timestamp the content is expected to be available through.
+ // Servers *MAY* omit this field, if not known with confidence.
+ google.protobuf.Timestamp expires_at = 4;
+
+ // The result of the fetch, if the status had code `OK`.
+ // The digest of the file's contents, available for download through the CAS.
+ build.bazel.remote.execution.v2.Digest blob_digest = 5;
+}
+
+// A request message for
+// [Fetch.FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory].
+message FetchDirectoryRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The timeout for the underlying fetch, if content needs to be retrieved from
+ // origin. This value is allowed to exceed the RPC deadline, in which case the
+ // server *SHOULD* keep the fetch going after the RPC completes, to be made
+ // available for future Fetch calls.
+ //
+ // If this timeout is exceeded on an attempt to retrieve content from origin
+ // the client will receive DEADLINE_EXCEEDED in [FetchDirectoryResponse.status].
+ google.protobuf.Duration timeout = 2;
+
+ // The oldest content the client is willing to accept, as measured from the
+ // time it was Push'd or when the underlying retrieval from origin was
+ // started.
+ // Upon retries of Fetch requests that cannot be completed within a single
+ // RPC, clients *SHOULD* provide the same value for subsequent requests as the
+ // original, to simplify combining the request with the previous attempt.
+ //
+ // If unset, the client *SHOULD* accept content of any age.
+ google.protobuf.Timestamp oldest_content_accepted = 3;
+
+ // The URI(s) of the content to fetch. These may be resources that the server
+ // can directly fetch from origin, in which case multiple URIs *SHOULD*
+ // represent the same content available at different locations (such as an
+ // origin and secondary mirrors). These may also be URIs for content known to
+ // the server through other mechanisms, e.g. pushed via the [Push][build.bazel.remote.asset.v1.Push]
+ // service.
+ //
+ // Clients *MUST* supply at least one URI. Servers *MAY* match any one of the
+ // supplied URIs.
+ repeated string uris = 4;
+
+ // Qualifiers sub-specifying the content to fetch - see comments on
+ // [Qualifier][build.bazel.remote.asset.v1.Qualifier].
+ // The same qualifiers apply to all URIs.
+ //
+ // Specified qualifier names *MUST* be unique.
+ repeated Qualifier qualifiers = 5;
+}
+
+// A response message for
+// [Fetch.FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory].
+message FetchDirectoryResponse {
+ // If the status has a code other than `OK`, it indicates that the operation
+ // was unable to be completed for reasons outside the servers' control.
+ // The possible fetch errors include:
+ // * `DEADLINE_EXCEEDED`: The operation could not be completed within the
+ // specified timeout.
+ // * `NOT_FOUND`: The requested asset was not found at the specified location.
+ // * `PERMISSION_DENIED`: The request was rejected by a remote server, or
+ // requested an asset from a disallowed origin.
+ // * `ABORTED`: The operation could not be completed, typically due to a
+ // failed consistency check.
+ google.rpc.Status status = 1;
+
+ // The uri from the request that resulted in a successful retrieval, or from
+ // which the error indicated in `status` was obtained.
+ string uri = 2;
+
+ // Any qualifiers known to the server and of interest to clients.
+ repeated Qualifier qualifiers = 3;
+
+ // A minimum timestamp the content is expected to be available through.
+ // Servers *MAY* omit this field, if not known with confidence.
+ google.protobuf.Timestamp expires_at = 4;
+
+ // The result of the fetch, if the status had code `OK`.
+ // the root digest of a directory tree, suitable for fetching via
+ // [ContentAddressableStorage.GetTree].
+ build.bazel.remote.execution.v2.Digest root_directory_digest = 5;
+}
+
+// The Push service is complementary to the Fetch, and allows for
+// associating contents of URLs to be returned in future Fetch API calls.
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service Push {
+ // These APIs associate the identifying information of a resource, as
+ // indicated by URI and optionally Qualifiers, with content available in the
+ // CAS. For example, associating a repository url and a commit id with a
+ // Directory Digest.
+ //
+ // Servers *SHOULD* only allow trusted clients to associate content, and *MAY*
+ // only allow certain URIs to be pushed.
+ //
+ // Clients *MUST* ensure associated content is available in CAS prior to
+ // pushing.
+ //
+ // Clients *MUST* ensure the Qualifiers listed correctly match the contents,
+ // and Servers *MAY* trust these values without validation.
+ // Fetch servers *MAY* require exact match of all qualifiers when returning
+ // content previously pushed, or allow fetching content with only a subset of
+ // the qualifiers specified on Push.
+ //
+ // Clients can specify expiration information that the server *SHOULD*
+ // respect. Subsequent requests can be used to alter the expiration time.
+ //
+ // A minimal compliant Fetch implementation may support only Push'd content
+ // and return `NOT_FOUND` for any resource that was not pushed first.
+ // Alternatively, a compliant implementation may choose to not support Push
+ // and only return resources that can be Fetch'd from origin.
+ //
+ // Errors will be returned as gRPC Status errors.
+ // The possible RPC errors include:
+ // * `INVALID_ARGUMENT`: One or more arguments to the RPC were invalid.
+ // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
+ // perform the requested operation. The client may retry after a delay.
+ // * `UNAVAILABLE`: Due to a transient condition the operation could not be
+ // completed. The client should retry.
+ // * `INTERNAL`: An internal error occurred while performing the operation.
+ // The client should retry.
+ rpc PushBlob(PushBlobRequest) returns (PushBlobResponse) {
+ option (google.api.http) = { post: "/v1/{instance_name=**}/assets:pushBlob" body: "*" };
+ }
+
+ rpc PushDirectory(PushDirectoryRequest) returns (PushDirectoryResponse) {
+ option (google.api.http) = { post: "/v1/{instance_name=**}/assets:pushDirectory" body: "*" };
+ }
+}
+
+// A request message for
+// [Push.PushBlob][build.bazel.remote.asset.v1.Push.PushBlob].
+message PushBlobRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The URI(s) of the content to associate. If multiple URIs are specified, the
+ // pushed content will be available to fetch by specifying any of them.
+ repeated string uris = 2;
+
+ // Qualifiers sub-specifying the content that is being pushed - see comments
+ // on [Qualifier][build.bazel.remote.asset.v1.Qualifier].
+ // The same qualifiers apply to all URIs.
+ repeated Qualifier qualifiers = 3;
+
+ // A time after which this content should stop being returned via [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob].
+ // Servers *MAY* expire content early, e.g. due to storage pressure.
+ google.protobuf.Timestamp expire_at = 4;
+
+ // The blob to associate.
+ build.bazel.remote.execution.v2.Digest blob_digest = 5;
+
+ // Referenced blobs or directories that need to not expire before expiration
+ // of this association, in addition to `blob_digest` itself.
+ // These fields are hints - clients *MAY* omit them, and servers *SHOULD*
+ // respect them, at the risk of increased incidents of Fetch responses
+ // indirectly referencing unavailable blobs.
+ repeated build.bazel.remote.execution.v2.Digest references_blobs = 6;
+ repeated build.bazel.remote.execution.v2.Digest references_directories = 7;
+}
+
+// A response message for
+// [Push.PushBlob][build.bazel.remote.asset.v1.Push.PushBlob].
+message PushBlobResponse { /* empty */ }
+
+// A request message for
+// [Push.PushDirectory][build.bazel.remote.asset.v1.Push.PushDirectory].
+message PushDirectoryRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The URI(s) of the content to associate. If multiple URIs are specified, the
+ // pushed content will be available to fetch by specifying any of them.
+ repeated string uris = 2;
+
+ // Qualifiers sub-specifying the content that is being pushed - see comments
+ // on [Qualifier][build.bazel.remote.asset.v1.Qualifier].
+ // The same qualifiers apply to all URIs.
+ repeated Qualifier qualifiers = 3;
+
+ // A time after which this content should stop being returned via
+ // [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory].
+ // Servers *MAY* expire content early, e.g. due to storage pressure.
+ google.protobuf.Timestamp expire_at = 4;
+
+ // Directory to associate
+ build.bazel.remote.execution.v2.Digest root_directory_digest = 5;
+
+ // Referenced blobs or directories that need to not expire before expiration
+ // of this association, in addition to `root_directory_digest` itself.
+ // These fields are hints - clients *MAY* omit them, and servers *SHOULD*
+ // respect them, at the risk of increased incidents of Fetch responses
+ // indirectly referencing unavailable blobs.
+ repeated build.bazel.remote.execution.v2.Digest references_blobs = 6;
+ repeated build.bazel.remote.execution.v2.Digest references_directories = 7;
+}
+
+// A response message for
+// [Push.PushDirectory][build.bazel.remote.asset.v1.Push.PushDirectory].
+message PushDirectoryResponse { /* empty */ }
diff --git a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py
new file mode 100644
index 000000000..c40df6434
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py
@@ -0,0 +1,666 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: build/bazel/remote/asset/v1/remote_asset.proto
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2
+from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='build/bazel/remote/asset/v1/remote_asset.proto',
+ package='build.bazel.remote.asset.v1',
+ syntax='proto3',
+ serialized_options=b'\n\033build.bazel.remote.asset.v1B\020RemoteAssetProtoP\001Z\013remoteasset\242\002\002RA\252\002\033Build.Bazel.Remote.Asset.v1',
+ serialized_pb=b'\n.build/bazel/remote/asset/v1/remote_asset.proto\x12\x1b\x62uild.bazel.remote.asset.v1\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"(\n\tQualifier\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xdc\x01\n\x10\x46\x65tchBlobRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12*\n\x07timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12;\n\x17oldest_content_accepted\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04uris\x18\x04 \x03(\t\x12:\n\nqualifiers\x18\x05 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\"\xee\x01\n\x11\x46\x65tchBlobResponse\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12.\n\nexpires_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12<\n\x0b\x62lob_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xe1\x01\n\x15\x46\x65tchDirectoryRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12*\n\x07timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12;\n\x17oldest_content_accepted\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04uris\x18\x04 \x03(\t\x12:\n\nqualifiers\x18\x05 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\"\xfd\x01\n\x16\x46\x65tchDirectoryResponse\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12.\n\nexpires_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x46\n\x15root_directory_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xeb\x02\n\x0fPushBlobRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04uris\x18\x02 \x03(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12-\n\texpire_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12<\n\x0b\x62lob_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x41\n\x10references_blobs\x18\x06 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12G\n\x16references_directories\x18\x07 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x12\n\x10PushBlobResponse\"\xfa\x02\n\x14PushDirectoryRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04uris\x18\x02 \x03(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12-\n\texpire_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x46\n\x15root_directory_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x41\n\x10references_blobs\x18\x06 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12G\n\x16references_directories\x18\x07 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x17\n\x15PushDirectoryResponse2\xdd\x02\n\x05\x46\x65tch\x12\x9e\x01\n\tFetchBlob\x12-.build.bazel.remote.asset.v1.FetchBlobRequest\x1a..build.bazel.remote.asset.v1.FetchBlobResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1/{instance_name=**}/assets:fetchBlob:\x01*\x12\xb2\x01\n\x0e\x46\x65tchDirectory\x12\x32.build.bazel.remote.asset.v1.FetchDirectoryRequest\x1a\x33.build.bazel.remote.asset.v1.FetchDirectoryResponse\"7\x82\xd3\xe4\x93\x02\x31\",/v1/{instance_name=**}/assets:fetchDirectory:\x01*2\xd4\x02\n\x04Push\x12\x9a\x01\n\x08PushBlob\x12,.build.bazel.remote.asset.v1.PushBlobRequest\x1a-.build.bazel.remote.asset.v1.PushBlobResponse\"1\x82\xd3\xe4\x93\x02+\"&/v1/{instance_name=**}/assets:pushBlob:\x01*\x12\xae\x01\n\rPushDirectory\x12\x31.build.bazel.remote.asset.v1.PushDirectoryRequest\x1a\x32.build.bazel.remote.asset.v1.PushDirectoryResponse\"6\x82\xd3\xe4\x93\x02\x30\"+/v1/{instance_name=**}/assets:pushDirectory:\x01*Ba\n\x1b\x62uild.bazel.remote.asset.v1B\x10RemoteAssetProtoP\x01Z\x0bremoteasset\xa2\x02\x02RA\xaa\x02\x1b\x42uild.Bazel.Remote.Asset.v1b\x06proto3'
+ ,
+ dependencies=[build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
+
+
+
+
+_QUALIFIER = _descriptor.Descriptor(
+ name='Qualifier',
+ full_name='build.bazel.remote.asset.v1.Qualifier',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.asset.v1.Qualifier.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='build.bazel.remote.asset.v1.Qualifier.value', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=255,
+ serialized_end=295,
+)
+
+
+_FETCHBLOBREQUEST = _descriptor.Descriptor(
+ name='FetchBlobRequest',
+ full_name='build.bazel.remote.asset.v1.FetchBlobRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.asset.v1.FetchBlobRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='timeout', full_name='build.bazel.remote.asset.v1.FetchBlobRequest.timeout', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='oldest_content_accepted', full_name='build.bazel.remote.asset.v1.FetchBlobRequest.oldest_content_accepted', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='uris', full_name='build.bazel.remote.asset.v1.FetchBlobRequest.uris', index=3,
+ number=4, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='qualifiers', full_name='build.bazel.remote.asset.v1.FetchBlobRequest.qualifiers', index=4,
+ number=5, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=298,
+ serialized_end=518,
+)
+
+
+_FETCHBLOBRESPONSE = _descriptor.Descriptor(
+ name='FetchBlobResponse',
+ full_name='build.bazel.remote.asset.v1.FetchBlobResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='status', full_name='build.bazel.remote.asset.v1.FetchBlobResponse.status', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='uri', full_name='build.bazel.remote.asset.v1.FetchBlobResponse.uri', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='qualifiers', full_name='build.bazel.remote.asset.v1.FetchBlobResponse.qualifiers', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='expires_at', full_name='build.bazel.remote.asset.v1.FetchBlobResponse.expires_at', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='blob_digest', full_name='build.bazel.remote.asset.v1.FetchBlobResponse.blob_digest', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=521,
+ serialized_end=759,
+)
+
+
+_FETCHDIRECTORYREQUEST = _descriptor.Descriptor(
+ name='FetchDirectoryRequest',
+ full_name='build.bazel.remote.asset.v1.FetchDirectoryRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.asset.v1.FetchDirectoryRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='timeout', full_name='build.bazel.remote.asset.v1.FetchDirectoryRequest.timeout', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='oldest_content_accepted', full_name='build.bazel.remote.asset.v1.FetchDirectoryRequest.oldest_content_accepted', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='uris', full_name='build.bazel.remote.asset.v1.FetchDirectoryRequest.uris', index=3,
+ number=4, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='qualifiers', full_name='build.bazel.remote.asset.v1.FetchDirectoryRequest.qualifiers', index=4,
+ number=5, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=762,
+ serialized_end=987,
+)
+
+
+_FETCHDIRECTORYRESPONSE = _descriptor.Descriptor(
+ name='FetchDirectoryResponse',
+ full_name='build.bazel.remote.asset.v1.FetchDirectoryResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='status', full_name='build.bazel.remote.asset.v1.FetchDirectoryResponse.status', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='uri', full_name='build.bazel.remote.asset.v1.FetchDirectoryResponse.uri', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='qualifiers', full_name='build.bazel.remote.asset.v1.FetchDirectoryResponse.qualifiers', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='expires_at', full_name='build.bazel.remote.asset.v1.FetchDirectoryResponse.expires_at', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='root_directory_digest', full_name='build.bazel.remote.asset.v1.FetchDirectoryResponse.root_directory_digest', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=990,
+ serialized_end=1243,
+)
+
+
+_PUSHBLOBREQUEST = _descriptor.Descriptor(
+ name='PushBlobRequest',
+ full_name='build.bazel.remote.asset.v1.PushBlobRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.asset.v1.PushBlobRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='uris', full_name='build.bazel.remote.asset.v1.PushBlobRequest.uris', index=1,
+ number=2, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='qualifiers', full_name='build.bazel.remote.asset.v1.PushBlobRequest.qualifiers', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='expire_at', full_name='build.bazel.remote.asset.v1.PushBlobRequest.expire_at', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='blob_digest', full_name='build.bazel.remote.asset.v1.PushBlobRequest.blob_digest', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='references_blobs', full_name='build.bazel.remote.asset.v1.PushBlobRequest.references_blobs', index=5,
+ number=6, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='references_directories', full_name='build.bazel.remote.asset.v1.PushBlobRequest.references_directories', index=6,
+ number=7, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1246,
+ serialized_end=1609,
+)
+
+
+_PUSHBLOBRESPONSE = _descriptor.Descriptor(
+ name='PushBlobResponse',
+ full_name='build.bazel.remote.asset.v1.PushBlobResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1611,
+ serialized_end=1629,
+)
+
+
+_PUSHDIRECTORYREQUEST = _descriptor.Descriptor(
+ name='PushDirectoryRequest',
+ full_name='build.bazel.remote.asset.v1.PushDirectoryRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.asset.v1.PushDirectoryRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='uris', full_name='build.bazel.remote.asset.v1.PushDirectoryRequest.uris', index=1,
+ number=2, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='qualifiers', full_name='build.bazel.remote.asset.v1.PushDirectoryRequest.qualifiers', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='expire_at', full_name='build.bazel.remote.asset.v1.PushDirectoryRequest.expire_at', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='root_directory_digest', full_name='build.bazel.remote.asset.v1.PushDirectoryRequest.root_directory_digest', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='references_blobs', full_name='build.bazel.remote.asset.v1.PushDirectoryRequest.references_blobs', index=5,
+ number=6, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='references_directories', full_name='build.bazel.remote.asset.v1.PushDirectoryRequest.references_directories', index=6,
+ number=7, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1632,
+ serialized_end=2010,
+)
+
+
+_PUSHDIRECTORYRESPONSE = _descriptor.Descriptor(
+ name='PushDirectoryResponse',
+ full_name='build.bazel.remote.asset.v1.PushDirectoryResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2012,
+ serialized_end=2035,
+)
+
+_FETCHBLOBREQUEST.fields_by_name['timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
+_FETCHBLOBREQUEST.fields_by_name['oldest_content_accepted'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_FETCHBLOBREQUEST.fields_by_name['qualifiers'].message_type = _QUALIFIER
+_FETCHBLOBRESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_FETCHBLOBRESPONSE.fields_by_name['qualifiers'].message_type = _QUALIFIER
+_FETCHBLOBRESPONSE.fields_by_name['expires_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_FETCHBLOBRESPONSE.fields_by_name['blob_digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_FETCHDIRECTORYREQUEST.fields_by_name['timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
+_FETCHDIRECTORYREQUEST.fields_by_name['oldest_content_accepted'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_FETCHDIRECTORYREQUEST.fields_by_name['qualifiers'].message_type = _QUALIFIER
+_FETCHDIRECTORYRESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_FETCHDIRECTORYRESPONSE.fields_by_name['qualifiers'].message_type = _QUALIFIER
+_FETCHDIRECTORYRESPONSE.fields_by_name['expires_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_FETCHDIRECTORYRESPONSE.fields_by_name['root_directory_digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_PUSHBLOBREQUEST.fields_by_name['qualifiers'].message_type = _QUALIFIER
+_PUSHBLOBREQUEST.fields_by_name['expire_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_PUSHBLOBREQUEST.fields_by_name['blob_digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_PUSHBLOBREQUEST.fields_by_name['references_blobs'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_PUSHBLOBREQUEST.fields_by_name['references_directories'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_PUSHDIRECTORYREQUEST.fields_by_name['qualifiers'].message_type = _QUALIFIER
+_PUSHDIRECTORYREQUEST.fields_by_name['expire_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_PUSHDIRECTORYREQUEST.fields_by_name['root_directory_digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_PUSHDIRECTORYREQUEST.fields_by_name['references_blobs'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_PUSHDIRECTORYREQUEST.fields_by_name['references_directories'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+DESCRIPTOR.message_types_by_name['Qualifier'] = _QUALIFIER
+DESCRIPTOR.message_types_by_name['FetchBlobRequest'] = _FETCHBLOBREQUEST
+DESCRIPTOR.message_types_by_name['FetchBlobResponse'] = _FETCHBLOBRESPONSE
+DESCRIPTOR.message_types_by_name['FetchDirectoryRequest'] = _FETCHDIRECTORYREQUEST
+DESCRIPTOR.message_types_by_name['FetchDirectoryResponse'] = _FETCHDIRECTORYRESPONSE
+DESCRIPTOR.message_types_by_name['PushBlobRequest'] = _PUSHBLOBREQUEST
+DESCRIPTOR.message_types_by_name['PushBlobResponse'] = _PUSHBLOBRESPONSE
+DESCRIPTOR.message_types_by_name['PushDirectoryRequest'] = _PUSHDIRECTORYREQUEST
+DESCRIPTOR.message_types_by_name['PushDirectoryResponse'] = _PUSHDIRECTORYRESPONSE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Qualifier = _reflection.GeneratedProtocolMessageType('Qualifier', (_message.Message,), {
+ 'DESCRIPTOR' : _QUALIFIER,
+ '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.Qualifier)
+ })
+_sym_db.RegisterMessage(Qualifier)
+
+FetchBlobRequest = _reflection.GeneratedProtocolMessageType('FetchBlobRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _FETCHBLOBREQUEST,
+ '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchBlobRequest)
+ })
+_sym_db.RegisterMessage(FetchBlobRequest)
+
+FetchBlobResponse = _reflection.GeneratedProtocolMessageType('FetchBlobResponse', (_message.Message,), {
+ 'DESCRIPTOR' : _FETCHBLOBRESPONSE,
+ '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchBlobResponse)
+ })
+_sym_db.RegisterMessage(FetchBlobResponse)
+
+FetchDirectoryRequest = _reflection.GeneratedProtocolMessageType('FetchDirectoryRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _FETCHDIRECTORYREQUEST,
+ '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchDirectoryRequest)
+ })
+_sym_db.RegisterMessage(FetchDirectoryRequest)
+
+FetchDirectoryResponse = _reflection.GeneratedProtocolMessageType('FetchDirectoryResponse', (_message.Message,), {
+ 'DESCRIPTOR' : _FETCHDIRECTORYRESPONSE,
+ '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchDirectoryResponse)
+ })
+_sym_db.RegisterMessage(FetchDirectoryResponse)
+
+PushBlobRequest = _reflection.GeneratedProtocolMessageType('PushBlobRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _PUSHBLOBREQUEST,
+ '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushBlobRequest)
+ })
+_sym_db.RegisterMessage(PushBlobRequest)
+
+PushBlobResponse = _reflection.GeneratedProtocolMessageType('PushBlobResponse', (_message.Message,), {
+ 'DESCRIPTOR' : _PUSHBLOBRESPONSE,
+ '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushBlobResponse)
+ })
+_sym_db.RegisterMessage(PushBlobResponse)
+
+PushDirectoryRequest = _reflection.GeneratedProtocolMessageType('PushDirectoryRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _PUSHDIRECTORYREQUEST,
+ '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushDirectoryRequest)
+ })
+_sym_db.RegisterMessage(PushDirectoryRequest)
+
+PushDirectoryResponse = _reflection.GeneratedProtocolMessageType('PushDirectoryResponse', (_message.Message,), {
+ 'DESCRIPTOR' : _PUSHDIRECTORYRESPONSE,
+ '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushDirectoryResponse)
+ })
+_sym_db.RegisterMessage(PushDirectoryResponse)
+
+
+DESCRIPTOR._options = None
+
+_FETCH = _descriptor.ServiceDescriptor(
+ name='Fetch',
+ full_name='build.bazel.remote.asset.v1.Fetch',
+ file=DESCRIPTOR,
+ index=0,
+ serialized_options=None,
+ serialized_start=2038,
+ serialized_end=2387,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='FetchBlob',
+ full_name='build.bazel.remote.asset.v1.Fetch.FetchBlob',
+ index=0,
+ containing_service=None,
+ input_type=_FETCHBLOBREQUEST,
+ output_type=_FETCHBLOBRESPONSE,
+ serialized_options=b'\202\323\344\223\002,\"\'/v1/{instance_name=**}/assets:fetchBlob:\001*',
+ ),
+ _descriptor.MethodDescriptor(
+ name='FetchDirectory',
+ full_name='build.bazel.remote.asset.v1.Fetch.FetchDirectory',
+ index=1,
+ containing_service=None,
+ input_type=_FETCHDIRECTORYREQUEST,
+ output_type=_FETCHDIRECTORYRESPONSE,
+ serialized_options=b'\202\323\344\223\0021\",/v1/{instance_name=**}/assets:fetchDirectory:\001*',
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_FETCH)
+
+DESCRIPTOR.services_by_name['Fetch'] = _FETCH
+
+
+_PUSH = _descriptor.ServiceDescriptor(
+ name='Push',
+ full_name='build.bazel.remote.asset.v1.Push',
+ file=DESCRIPTOR,
+ index=1,
+ serialized_options=None,
+ serialized_start=2390,
+ serialized_end=2730,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='PushBlob',
+ full_name='build.bazel.remote.asset.v1.Push.PushBlob',
+ index=0,
+ containing_service=None,
+ input_type=_PUSHBLOBREQUEST,
+ output_type=_PUSHBLOBRESPONSE,
+ serialized_options=b'\202\323\344\223\002+\"&/v1/{instance_name=**}/assets:pushBlob:\001*',
+ ),
+ _descriptor.MethodDescriptor(
+ name='PushDirectory',
+ full_name='build.bazel.remote.asset.v1.Push.PushDirectory',
+ index=1,
+ containing_service=None,
+ input_type=_PUSHDIRECTORYREQUEST,
+ output_type=_PUSHDIRECTORYRESPONSE,
+ serialized_options=b'\202\323\344\223\0020\"+/v1/{instance_name=**}/assets:pushDirectory:\001*',
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_PUSH)
+
+DESCRIPTOR.services_by_name['Push'] = _PUSH
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py
new file mode 100644
index 000000000..38d31a2a1
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py
@@ -0,0 +1,324 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream._protos.build.bazel.remote.asset.v1 import remote_asset_pb2 as build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2
+
+
+class FetchStub(object):
+ """The Fetch service resolves or fetches assets referenced by URI and
+ Qualifiers, returning a Digest for the content in
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.FetchBlob = channel.unary_unary(
+ '/build.bazel.remote.asset.v1.Fetch/FetchBlob',
+ request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.FromString,
+ )
+ self.FetchDirectory = channel.unary_unary(
+ '/build.bazel.remote.asset.v1.Fetch/FetchDirectory',
+ request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.FromString,
+ )
+
+
+class FetchServicer(object):
+ """The Fetch service resolves or fetches assets referenced by URI and
+ Qualifiers, returning a Digest for the content in
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def FetchBlob(self, request, context):
+ """Resolve or fetch referenced assets, making them available to the caller and
+ other consumers in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+
+ Servers *MAY* fetch content that they do not already have cached, for any
+ URLs they support.
+
+ Servers *SHOULD* ensure that referenced files are present in the CAS at the
+ time of the response, and (if supported) that they will remain available
+ for a reasonable period of time. The TTLs of the referenced blobs *SHOULD*
+ be increased if necessary and applicable.
+ In the event that a client receives a reference to content that is no
+ longer present, it *MAY* re-issue the request with
+ `oldest_content_accepted` set to a more recent timestamp than the original
+ attempt, to induce a re-fetch from origin.
+
+ Servers *MAY* cache fetched content and reuse it for subsequent requests,
+ subject to `oldest_content_accepted`.
+
+ Servers *MAY* support the complementary [Push][build.bazel.remote.asset.v1.Push]
+ API and allow content to be directly inserted for use in future fetch
+ responses.
+
+ Servers *MUST* ensure Fetch'd content matches all the specified
+ qualifiers except in the case of previously Push'd resources, for which
+ the server *MAY* trust the pushing client to have set the qualifiers
+ correctly, without validation.
+
+ Servers not implementing the complementary [Push][build.bazel.remote.asset.v1.Push]
+ API *MUST* reject requests containing qualifiers it does not support.
+
+ Servers *MAY* transform assets as part of the fetch. For example a
+ tarball fetched by [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]
+ might be unpacked, or a Git repository
+ fetched by [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]
+ might be passed through `git-archive`.
+
+ Errors handling the requested assets will be returned as gRPC Status errors
+ here; errors outside the server's control will be returned inline in the
+ `status` field of the response (see comment there for details).
+ The possible RPC errors include:
+ * `INVALID_ARGUMENT`: One or more arguments were invalid, such as a
+ qualifier that is not supported by the server.
+ * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
+ perform the requested operation. The client may retry after a delay.
+ * `UNAVAILABLE`: Due to a transient condition the operation could not be
+ completed. The client should retry.
+ * `INTERNAL`: An internal error occurred while performing the operation.
+ The client should retry.
+ * `DEADLINE_EXCEEDED`: The fetch could not be completed within the given
+ RPC deadline. The client should retry for at least as long as the value
+ provided in `timeout` field of the request.
+
+ In the case of unsupported qualifiers, the server *SHOULD* additionally
+ send a [BadRequest][google.rpc.BadRequest] error detail where, for each
+ unsupported qualifier, there is a `FieldViolation` with a `field` of
+ `qualifiers.name` and a `description` of `"{qualifier}" not supported`
+ indicating the name of the unsupported qualifier.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def FetchDirectory(self, request, context):
+ """Missing associated documentation comment in .proto file"""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_FetchServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'FetchBlob': grpc.unary_unary_rpc_method_handler(
+ servicer.FetchBlob,
+ request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.SerializeToString,
+ ),
+ 'FetchDirectory': grpc.unary_unary_rpc_method_handler(
+ servicer.FetchDirectory,
+ request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.asset.v1.Fetch', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class Fetch(object):
+ """The Fetch service resolves or fetches assets referenced by URI and
+ Qualifiers, returning a Digest for the content in
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ @staticmethod
+ def FetchBlob(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Fetch/FetchBlob',
+ build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def FetchDirectory(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Fetch/FetchDirectory',
+ build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+
+class PushStub(object):
+ """The Push service is complementary to the Fetch, and allows for
+ associating contents of URLs to be returned in future Fetch API calls.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.PushBlob = channel.unary_unary(
+ '/build.bazel.remote.asset.v1.Push/PushBlob',
+ request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.FromString,
+ )
+ self.PushDirectory = channel.unary_unary(
+ '/build.bazel.remote.asset.v1.Push/PushDirectory',
+ request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.FromString,
+ )
+
+
+class PushServicer(object):
+ """The Push service is complementary to the Fetch, and allows for
+ associating contents of URLs to be returned in future Fetch API calls.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def PushBlob(self, request, context):
+ """These APIs associate the identifying information of a resource, as
+ indicated by URI and optionally Qualifiers, with content available in the
+ CAS. For example, associating a repository url and a commit id with a
+ Directory Digest.
+
+ Servers *SHOULD* only allow trusted clients to associate content, and *MAY*
+ only allow certain URIs to be pushed.
+
+ Clients *MUST* ensure associated content is available in CAS prior to
+ pushing.
+
+ Clients *MUST* ensure the Qualifiers listed correctly match the contents,
+ and Servers *MAY* trust these values without validation.
+ Fetch servers *MAY* require exact match of all qualifiers when returning
+ content previously pushed, or allow fetching content with only a subset of
+ the qualifiers specified on Push.
+
+ Clients can specify expiration information that the server *SHOULD*
+ respect. Subsequent requests can be used to alter the expiration time.
+
+ A minimal compliant Fetch implementation may support only Push'd content
+ and return `NOT_FOUND` for any resource that was not pushed first.
+ Alternatively, a compliant implementation may choose to not support Push
+ and only return resources that can be Fetch'd from origin.
+
+ Errors will be returned as gRPC Status errors.
+ The possible RPC errors include:
+ * `INVALID_ARGUMENT`: One or more arguments to the RPC were invalid.
+ * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
+ perform the requested operation. The client may retry after a delay.
+ * `UNAVAILABLE`: Due to a transient condition the operation could not be
+ completed. The client should retry.
+ * `INTERNAL`: An internal error occurred while performing the operation.
+ The client should retry.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def PushDirectory(self, request, context):
+ """Missing associated documentation comment in .proto file"""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_PushServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'PushBlob': grpc.unary_unary_rpc_method_handler(
+ servicer.PushBlob,
+ request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.SerializeToString,
+ ),
+ 'PushDirectory': grpc.unary_unary_rpc_method_handler(
+ servicer.PushDirectory,
+ request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.asset.v1.Push', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class Push(object):
+ """The Push service is complementary to the Fetch, and allows for
+ associating contents of URLs to be returned in future Fetch API calls.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ @staticmethod
+ def PushBlob(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Push/PushBlob',
+ build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def PushDirectory(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Push/PushDirectory',
+ build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
index 33ca1c6aa..19e3d337a 100644
--- a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
@@ -6,621 +6,887 @@ from buildstream._protos.google.longrunning import operations_pb2 as google_dot_
class ExecutionStub(object):
- """The Remote Execution API is used to execute an
- [Action][build.bazel.remote.execution.v2.Action] on the remote
- workers.
-
- As with other services in the Remote Execution API, any call may return an
- error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
- information about when the client should retry the request; clients SHOULD
- respect the information provided.
- """
+ """The Remote Execution API is used to execute an
+ [Action][build.bazel.remote.execution.v2.Action] on the remote
+ workers.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
- def __init__(self, channel):
- """Constructor.
+ def __init__(self, channel):
+ """Constructor.
- Args:
- channel: A grpc.Channel.
- """
- self.Execute = channel.unary_stream(
- '/build.bazel.remote.execution.v2.Execution/Execute',
- request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.SerializeToString,
- response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
- )
- self.WaitExecution = channel.unary_stream(
- '/build.bazel.remote.execution.v2.Execution/WaitExecution',
- request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.SerializeToString,
- response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
- )
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.Execute = channel.unary_stream(
+ '/build.bazel.remote.execution.v2.Execution/Execute',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
+ self.WaitExecution = channel.unary_stream(
+ '/build.bazel.remote.execution.v2.Execution/WaitExecution',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
class ExecutionServicer(object):
- """The Remote Execution API is used to execute an
- [Action][build.bazel.remote.execution.v2.Action] on the remote
- workers.
-
- As with other services in the Remote Execution API, any call may return an
- error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
- information about when the client should retry the request; clients SHOULD
- respect the information provided.
- """
-
- def Execute(self, request, context):
- """Execute an action remotely.
-
- In order to execute an action, the client must first upload all of the
- inputs, the
- [Command][build.bazel.remote.execution.v2.Command] to run, and the
- [Action][build.bazel.remote.execution.v2.Action] into the
- [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
- It then calls `Execute` with an `action_digest` referring to them. The
- server will run the action and eventually return the result.
-
- The input `Action`'s fields MUST meet the various canonicalization
- requirements specified in the documentation for their types so that it has
- the same digest as other logically equivalent `Action`s. The server MAY
- enforce the requirements and return errors if a non-canonical input is
- received. It MAY also proceed without verifying some or all of the
- requirements, such as for performance reasons. If the server does not
- verify the requirement, then it will treat the `Action` as distinct from
- another logically equivalent action if they hash differently.
-
- Returns a stream of
- [google.longrunning.Operation][google.longrunning.Operation] messages
- describing the resulting execution, with eventual `response`
- [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The
- `metadata` on the operation is of type
- [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata].
-
- If the client remains connected after the first response is returned after
- the server, then updates are streamed as if the client had called
- [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]
- until the execution completes or the request reaches an error. The
- operation can also be queried using [Operations
- API][google.longrunning.Operations.GetOperation].
-
- The server NEED NOT implement other methods or functionality of the
- Operations API.
-
- Errors discovered during creation of the `Operation` will be reported
- as gRPC Status errors, while errors that occurred while running the
- action will be reported in the `status` field of the `ExecuteResponse`. The
- server MUST NOT set the `error` field of the `Operation` proto.
- The possible errors include:
-
- * `INVALID_ARGUMENT`: One or more arguments are invalid.
- * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
- action requested, such as a missing input or command or no worker being
- available. The client may be able to fix the errors and retry.
- * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
- the action.
- * `UNAVAILABLE`: Due to a transient condition, such as all workers being
- occupied (and the server does not support a queue), the action could not
- be started. The client should retry.
- * `INTERNAL`: An internal error occurred in the execution engine or the
- worker.
- * `DEADLINE_EXCEEDED`: The execution timed out.
- * `CANCELLED`: The operation was cancelled by the client. This status is
- only possible if the server implements the Operations API CancelOperation
- method, and it was called for the current execution.
-
- In the case of a missing input or command, the server SHOULD additionally
- send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
- where, for each requested blob not present in the CAS, there is a
- `Violation` with a `type` of `MISSING` and a `subject` of
- `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+ """The Remote Execution API is used to execute an
+ [Action][build.bazel.remote.execution.v2.Action] on the remote
+ workers.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
"""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def WaitExecution(self, request, context):
- """Wait for an execution operation to complete. When the client initially
- makes the request, the server immediately responds with the current status
- of the execution. The server will leave the request stream open until the
- operation completes, and then respond with the completed operation. The
- server MAY choose to stream additional updates as execution progresses,
- such as to provide an update as to the state of the execution.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
+
+ def Execute(self, request, context):
+ """Execute an action remotely.
+
+ In order to execute an action, the client must first upload all of the
+ inputs, the
+ [Command][build.bazel.remote.execution.v2.Command] to run, and the
+ [Action][build.bazel.remote.execution.v2.Action] into the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ It then calls `Execute` with an `action_digest` referring to them. The
+ server will run the action and eventually return the result.
+
+ The input `Action`'s fields MUST meet the various canonicalization
+ requirements specified in the documentation for their types so that it has
+ the same digest as other logically equivalent `Action`s. The server MAY
+ enforce the requirements and return errors if a non-canonical input is
+ received. It MAY also proceed without verifying some or all of the
+ requirements, such as for performance reasons. If the server does not
+ verify the requirement, then it will treat the `Action` as distinct from
+ another logically equivalent action if they hash differently.
+
+ Returns a stream of
+ [google.longrunning.Operation][google.longrunning.Operation] messages
+ describing the resulting execution, with eventual `response`
+ [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The
+ `metadata` on the operation is of type
+ [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata].
+
+ If the client remains connected after the first response is returned after
+ the server, then updates are streamed as if the client had called
+ [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]
+ until the execution completes or the request reaches an error. The
+ operation can also be queried using [Operations
+ API][google.longrunning.Operations.GetOperation].
+
+ The server NEED NOT implement other methods or functionality of the
+ Operations API.
+
+ Errors discovered during creation of the `Operation` will be reported
+ as gRPC Status errors, while errors that occurred while running the
+ action will be reported in the `status` field of the `ExecuteResponse`. The
+ server MUST NOT set the `error` field of the `Operation` proto.
+ The possible errors include:
+
+ * `INVALID_ARGUMENT`: One or more arguments are invalid.
+ * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
+ action requested, such as a missing input or command or no worker being
+ available. The client may be able to fix the errors and retry.
+ * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
+ the action.
+ * `UNAVAILABLE`: Due to a transient condition, such as all workers being
+ occupied (and the server does not support a queue), the action could not
+ be started. The client should retry.
+ * `INTERNAL`: An internal error occurred in the execution engine or the
+ worker.
+ * `DEADLINE_EXCEEDED`: The execution timed out.
+ * `CANCELLED`: The operation was cancelled by the client. This status is
+ only possible if the server implements the Operations API CancelOperation
+ method, and it was called for the current execution.
+
+ In the case of a missing input or command, the server SHOULD additionally
+ send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
+ where, for each requested blob not present in the CAS, there is a
+ `Violation` with a `type` of `MISSING` and a `subject` of
+ `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def WaitExecution(self, request, context):
+ """Wait for an execution operation to complete. When the client initially
+ makes the request, the server immediately responds with the current status
+ of the execution. The server will leave the request stream open until the
+ operation completes, and then respond with the completed operation. The
+ server MAY choose to stream additional updates as execution progresses,
+ such as to provide an update as to the state of the execution.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
def add_ExecutionServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'Execute': grpc.unary_stream_rpc_method_handler(
- servicer.Execute,
- request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.FromString,
- response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
- ),
- 'WaitExecution': grpc.unary_stream_rpc_method_handler(
- servicer.WaitExecution,
- request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.FromString,
- response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'build.bazel.remote.execution.v2.Execution', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
+ rpc_method_handlers = {
+ 'Execute': grpc.unary_stream_rpc_method_handler(
+ servicer.Execute,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ 'WaitExecution': grpc.unary_stream_rpc_method_handler(
+ servicer.WaitExecution,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.Execution', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class Execution(object):
+ """The Remote Execution API is used to execute an
+ [Action][build.bazel.remote.execution.v2.Action] on the remote
+ workers.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ @staticmethod
+ def Execute(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_stream(request, target, '/build.bazel.remote.execution.v2.Execution/Execute',
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def WaitExecution(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_stream(request, target, '/build.bazel.remote.execution.v2.Execution/WaitExecution',
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
class ActionCacheStub(object):
- """The action cache API is used to query whether a given action has already been
- performed and, if so, retrieve its result. Unlike the
- [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
- which addresses blobs by their own content, the action cache addresses the
- [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
- digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
- which produced them.
-
- The lifetime of entries in the action cache is implementation-specific, but
- the server SHOULD assume that more recently used entries are more likely to
- be used again.
-
- As with other services in the Remote Execution API, any call may return an
- error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
- information about when the client should retry the request; clients SHOULD
- respect the information provided.
- """
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
+ """The action cache API is used to query whether a given action has already been
+ performed and, if so, retrieve its result. Unlike the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
+ which addresses blobs by their own content, the action cache addresses the
+ [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
+ digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
+ which produced them.
+
+ The lifetime of entries in the action cache is implementation-specific, but
+ the server SHOULD assume that more recently used entries are more likely to
+ be used again.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
"""
- self.GetActionResult = channel.unary_unary(
- '/build.bazel.remote.execution.v2.ActionCache/GetActionResult',
- request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.SerializeToString,
- response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
- )
- self.UpdateActionResult = channel.unary_unary(
- '/build.bazel.remote.execution.v2.ActionCache/UpdateActionResult',
- request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.SerializeToString,
- response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
- )
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetActionResult = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ActionCache/GetActionResult',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
+ )
+ self.UpdateActionResult = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ActionCache/UpdateActionResult',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
+ )
class ActionCacheServicer(object):
- """The action cache API is used to query whether a given action has already been
- performed and, if so, retrieve its result. Unlike the
- [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
- which addresses blobs by their own content, the action cache addresses the
- [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
- digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
- which produced them.
-
- The lifetime of entries in the action cache is implementation-specific, but
- the server SHOULD assume that more recently used entries are more likely to
- be used again.
-
- As with other services in the Remote Execution API, any call may return an
- error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
- information about when the client should retry the request; clients SHOULD
- respect the information provided.
- """
-
- def GetActionResult(self, request, context):
- """Retrieve a cached execution result.
-
- Implementations SHOULD ensure that any blobs referenced from the
- [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
- are available at the time of returning the
- [ActionResult][build.bazel.remote.execution.v2.ActionResult] and will be
- for some period of time afterwards. The TTLs of the referenced blobs SHOULD be increased
- if necessary and applicable.
-
- Errors:
-
- * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+ """The action cache API is used to query whether a given action has already been
+ performed and, if so, retrieve its result. Unlike the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
+ which addresses blobs by their own content, the action cache addresses the
+ [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
+ digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
+ which produced them.
+
+ The lifetime of entries in the action cache is implementation-specific, but
+ the server SHOULD assume that more recently used entries are more likely to
+ be used again.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
"""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def UpdateActionResult(self, request, context):
- """Upload a new execution result.
-
- In order to allow the server to perform access control based on the type of
- action, and to assist with client debugging, the client MUST first upload
- the [Action][build.bazel.remote.execution.v2.Execution] that produced the
- result, along with its
- [Command][build.bazel.remote.execution.v2.Command], into the
- `ContentAddressableStorage`.
-
- Errors:
-
- * `INVALID_ARGUMENT`: One or more arguments are invalid.
- * `FAILED_PRECONDITION`: One or more errors occurred in updating the
- action result, such as a missing command or action.
- * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
- entry to the cache.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
+
+ def GetActionResult(self, request, context):
+ """Retrieve a cached execution result.
+
+ Implementations SHOULD ensure that any blobs referenced from the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+ are available at the time of returning the
+ [ActionResult][build.bazel.remote.execution.v2.ActionResult] and will be
+ for some period of time afterwards. The TTLs of the referenced blobs SHOULD be increased
+ if necessary and applicable.
+
+ Errors:
+
+ * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def UpdateActionResult(self, request, context):
+ """Upload a new execution result.
+
+ In order to allow the server to perform access control based on the type of
+ action, and to assist with client debugging, the client MUST first upload
+ the [Action][build.bazel.remote.execution.v2.Execution] that produced the
+ result, along with its
+ [Command][build.bazel.remote.execution.v2.Command], into the
+ `ContentAddressableStorage`.
+
+ Errors:
+
+ * `INVALID_ARGUMENT`: One or more arguments are invalid.
+ * `FAILED_PRECONDITION`: One or more errors occurred in updating the
+ action result, such as a missing command or action.
+ * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+ entry to the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
def add_ActionCacheServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'GetActionResult': grpc.unary_unary_rpc_method_handler(
- servicer.GetActionResult,
- request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.FromString,
- response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString,
- ),
- 'UpdateActionResult': grpc.unary_unary_rpc_method_handler(
- servicer.UpdateActionResult,
- request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.FromString,
- response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'build.bazel.remote.execution.v2.ActionCache', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
+ rpc_method_handlers = {
+ 'GetActionResult': grpc.unary_unary_rpc_method_handler(
+ servicer.GetActionResult,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString,
+ ),
+ 'UpdateActionResult': grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateActionResult,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.ActionCache', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class ActionCache(object):
+ """The action cache API is used to query whether a given action has already been
+ performed and, if so, retrieve its result. Unlike the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
+ which addresses blobs by their own content, the action cache addresses the
+ [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
+ digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
+ which produced them.
+
+ The lifetime of entries in the action cache is implementation-specific, but
+ the server SHOULD assume that more recently used entries are more likely to
+ be used again.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ @staticmethod
+ def GetActionResult(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ActionCache/GetActionResult',
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def UpdateActionResult(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ActionCache/UpdateActionResult',
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
class ContentAddressableStorageStub(object):
- """The CAS (content-addressable storage) is used to store the inputs to and
- outputs from the execution service. Each piece of content is addressed by the
- digest of its binary data.
-
- Most of the binary data stored in the CAS is opaque to the execution engine,
- and is only used as a communication medium. In order to build an
- [Action][build.bazel.remote.execution.v2.Action],
- however, the client will need to also upload the
- [Command][build.bazel.remote.execution.v2.Command] and input root
- [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
- The Command and Directory messages must be marshalled to wire format and then
- uploaded under the hash as with any other piece of content. In practice, the
- input root directory is likely to refer to other Directories in its
- hierarchy, which must also each be uploaded on their own.
-
- For small file uploads the client should group them together and call
- [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
- For large uploads, the client must use the
- [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
- `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
- where `instance_name` is as described in the next paragraph, `uuid` is a
- version 4 UUID generated by the client, and `hash` and `size` are the
- [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
- `uuid` is used only to avoid collisions when multiple clients try to upload
- the same file (or the same client tries to upload the file multiple times at
- once on different threads), so the client MAY reuse the `uuid` for uploading
- different blobs. The `resource_name` may optionally have a trailing filename
- (or other metadata) for a client to use if it is storing URLs, as in
- `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
- after the `size` is ignored.
-
- A single server MAY support multiple instances of the execution system, each
- with their own workers, storage, cache, etc. The exact relationship between
- instances is up to the server. If the server does, then the `instance_name`
- is an identifier, possibly containing multiple path segments, used to
- distinguish between the various instances on the server, in a manner defined
- by the server. For servers which do not support multiple instances, then the
- `instance_name` is the empty path and the leading slash is omitted, so that
- the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
- To simplify parsing, a path segment cannot equal any of the following
- keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
- `capabilities`.
-
- When attempting an upload, if another client has already completed the upload
- (which may occur in the middle of a single upload if another client uploads
- the same blob concurrently), the request will terminate immediately with
- a response whose `committed_size` is the full size of the uploaded file
- (regardless of how much data was transmitted by the client). If the client
- completes the upload but the
- [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
- `INVALID_ARGUMENT` error will be returned. In either case, the client should
- not attempt to retry the upload.
-
- For downloading blobs, the client must use the
- [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
- a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
- `instance_name` is the instance name (see above), and `hash` and `size` are
- the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
-
- The lifetime of entries in the CAS is implementation specific, but it SHOULD
- be long enough to allow for newly-added and recently looked-up entries to be
- used in subsequent calls (e.g. to
- [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
-
- Servers MUST behave as though empty blobs are always available, even if they
- have not been uploaded. Clients MAY optimize away the uploading or
- downloading of empty blobs.
-
- As with other services in the Remote Execution API, any call may return an
- error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
- information about when the client should retry the request; clients SHOULD
- respect the information provided.
- """
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
+ """The CAS (content-addressable storage) is used to store the inputs to and
+ outputs from the execution service. Each piece of content is addressed by the
+ digest of its binary data.
+
+ Most of the binary data stored in the CAS is opaque to the execution engine,
+ and is only used as a communication medium. In order to build an
+ [Action][build.bazel.remote.execution.v2.Action],
+ however, the client will need to also upload the
+ [Command][build.bazel.remote.execution.v2.Command] and input root
+ [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
+ The Command and Directory messages must be marshalled to wire format and then
+ uploaded under the hash as with any other piece of content. In practice, the
+ input root directory is likely to refer to other Directories in its
+ hierarchy, which must also each be uploaded on their own.
+
+ For small file uploads the client should group them together and call
+ [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+ For large uploads, the client must use the
+ [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+ `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+ where `instance_name` is as described in the next paragraph, `uuid` is a
+ version 4 UUID generated by the client, and `hash` and `size` are the
+ [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
+ `uuid` is used only to avoid collisions when multiple clients try to upload
+ the same file (or the same client tries to upload the file multiple times at
+ once on different threads), so the client MAY reuse the `uuid` for uploading
+ different blobs. The `resource_name` may optionally have a trailing filename
+ (or other metadata) for a client to use if it is storing URLs, as in
+ `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+ after the `size` is ignored.
+
+ A single server MAY support multiple instances of the execution system, each
+ with their own workers, storage, cache, etc. The exact relationship between
+ instances is up to the server. If the server does, then the `instance_name`
+ is an identifier, possibly containing multiple path segments, used to
+ distinguish between the various instances on the server, in a manner defined
+ by the server. For servers which do not support multiple instances, then the
+ `instance_name` is the empty path and the leading slash is omitted, so that
+ the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+ To simplify parsing, a path segment cannot equal any of the following
+ keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
+ `capabilities`.
+
+ When attempting an upload, if another client has already completed the upload
+ (which may occur in the middle of a single upload if another client uploads
+ the same blob concurrently), the request will terminate immediately with
+ a response whose `committed_size` is the full size of the uploaded file
+ (regardless of how much data was transmitted by the client). If the client
+ completes the upload but the
+ [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
+ `INVALID_ARGUMENT` error will be returned. In either case, the client should
+ not attempt to retry the upload.
+
+ For downloading blobs, the client must use the
+ [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+ a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+ `instance_name` is the instance name (see above), and `hash` and `size` are
+ the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+
+ The lifetime of entries in the CAS is implementation specific, but it SHOULD
+ be long enough to allow for newly-added and recently looked-up entries to be
+ used in subsequent calls (e.g. to
+ [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
+
+ Servers MUST behave as though empty blobs are always available, even if they
+ have not been uploaded. Clients MAY optimize away the uploading or
+ downloading of empty blobs.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
"""
- self.FindMissingBlobs = channel.unary_unary(
- '/build.bazel.remote.execution.v2.ContentAddressableStorage/FindMissingBlobs',
- request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.SerializeToString,
- response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.FromString,
- )
- self.BatchUpdateBlobs = channel.unary_unary(
- '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchUpdateBlobs',
- request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.SerializeToString,
- response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.FromString,
- )
- self.BatchReadBlobs = channel.unary_unary(
- '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchReadBlobs',
- request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.SerializeToString,
- response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.FromString,
- )
- self.GetTree = channel.unary_stream(
- '/build.bazel.remote.execution.v2.ContentAddressableStorage/GetTree',
- request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.SerializeToString,
- response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.FromString,
- )
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.FindMissingBlobs = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/FindMissingBlobs',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.FromString,
+ )
+ self.BatchUpdateBlobs = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchUpdateBlobs',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.FromString,
+ )
+ self.BatchReadBlobs = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchReadBlobs',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.FromString,
+ )
+ self.GetTree = channel.unary_stream(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/GetTree',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.FromString,
+ )
class ContentAddressableStorageServicer(object):
- """The CAS (content-addressable storage) is used to store the inputs to and
- outputs from the execution service. Each piece of content is addressed by the
- digest of its binary data.
-
- Most of the binary data stored in the CAS is opaque to the execution engine,
- and is only used as a communication medium. In order to build an
- [Action][build.bazel.remote.execution.v2.Action],
- however, the client will need to also upload the
- [Command][build.bazel.remote.execution.v2.Command] and input root
- [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
- The Command and Directory messages must be marshalled to wire format and then
- uploaded under the hash as with any other piece of content. In practice, the
- input root directory is likely to refer to other Directories in its
- hierarchy, which must also each be uploaded on their own.
-
- For small file uploads the client should group them together and call
- [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
- For large uploads, the client must use the
- [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
- `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
- where `instance_name` is as described in the next paragraph, `uuid` is a
- version 4 UUID generated by the client, and `hash` and `size` are the
- [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
- `uuid` is used only to avoid collisions when multiple clients try to upload
- the same file (or the same client tries to upload the file multiple times at
- once on different threads), so the client MAY reuse the `uuid` for uploading
- different blobs. The `resource_name` may optionally have a trailing filename
- (or other metadata) for a client to use if it is storing URLs, as in
- `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
- after the `size` is ignored.
-
- A single server MAY support multiple instances of the execution system, each
- with their own workers, storage, cache, etc. The exact relationship between
- instances is up to the server. If the server does, then the `instance_name`
- is an identifier, possibly containing multiple path segments, used to
- distinguish between the various instances on the server, in a manner defined
- by the server. For servers which do not support multiple instances, then the
- `instance_name` is the empty path and the leading slash is omitted, so that
- the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
- To simplify parsing, a path segment cannot equal any of the following
- keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
- `capabilities`.
-
- When attempting an upload, if another client has already completed the upload
- (which may occur in the middle of a single upload if another client uploads
- the same blob concurrently), the request will terminate immediately with
- a response whose `committed_size` is the full size of the uploaded file
- (regardless of how much data was transmitted by the client). If the client
- completes the upload but the
- [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
- `INVALID_ARGUMENT` error will be returned. In either case, the client should
- not attempt to retry the upload.
-
- For downloading blobs, the client must use the
- [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
- a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
- `instance_name` is the instance name (see above), and `hash` and `size` are
- the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
-
- The lifetime of entries in the CAS is implementation specific, but it SHOULD
- be long enough to allow for newly-added and recently looked-up entries to be
- used in subsequent calls (e.g. to
- [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
-
- Servers MUST behave as though empty blobs are always available, even if they
- have not been uploaded. Clients MAY optimize away the uploading or
- downloading of empty blobs.
-
- As with other services in the Remote Execution API, any call may return an
- error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
- information about when the client should retry the request; clients SHOULD
- respect the information provided.
- """
-
- def FindMissingBlobs(self, request, context):
- """Determine if blobs are present in the CAS.
-
- Clients can use this API before uploading blobs to determine which ones are
- already present in the CAS and do not need to be uploaded again.
-
- Servers SHOULD increase the TTLs of the referenced blobs if necessary and
- applicable.
-
- There are no method-specific errors.
+ """The CAS (content-addressable storage) is used to store the inputs to and
+ outputs from the execution service. Each piece of content is addressed by the
+ digest of its binary data.
+
+ Most of the binary data stored in the CAS is opaque to the execution engine,
+ and is only used as a communication medium. In order to build an
+ [Action][build.bazel.remote.execution.v2.Action],
+ however, the client will need to also upload the
+ [Command][build.bazel.remote.execution.v2.Command] and input root
+ [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
+ The Command and Directory messages must be marshalled to wire format and then
+ uploaded under the hash as with any other piece of content. In practice, the
+ input root directory is likely to refer to other Directories in its
+ hierarchy, which must also each be uploaded on their own.
+
+ For small file uploads the client should group them together and call
+ [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+ For large uploads, the client must use the
+ [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+ `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+ where `instance_name` is as described in the next paragraph, `uuid` is a
+ version 4 UUID generated by the client, and `hash` and `size` are the
+ [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
+ `uuid` is used only to avoid collisions when multiple clients try to upload
+ the same file (or the same client tries to upload the file multiple times at
+ once on different threads), so the client MAY reuse the `uuid` for uploading
+ different blobs. The `resource_name` may optionally have a trailing filename
+ (or other metadata) for a client to use if it is storing URLs, as in
+ `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+ after the `size` is ignored.
+
+ A single server MAY support multiple instances of the execution system, each
+ with their own workers, storage, cache, etc. The exact relationship between
+ instances is up to the server. If the server does, then the `instance_name`
+ is an identifier, possibly containing multiple path segments, used to
+ distinguish between the various instances on the server, in a manner defined
+ by the server. For servers which do not support multiple instances, then the
+ `instance_name` is the empty path and the leading slash is omitted, so that
+ the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+ To simplify parsing, a path segment cannot equal any of the following
+ keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
+ `capabilities`.
+
+ When attempting an upload, if another client has already completed the upload
+ (which may occur in the middle of a single upload if another client uploads
+ the same blob concurrently), the request will terminate immediately with
+ a response whose `committed_size` is the full size of the uploaded file
+ (regardless of how much data was transmitted by the client). If the client
+ completes the upload but the
+ [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
+ `INVALID_ARGUMENT` error will be returned. In either case, the client should
+ not attempt to retry the upload.
+
+ For downloading blobs, the client must use the
+ [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+ a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+ `instance_name` is the instance name (see above), and `hash` and `size` are
+ the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+
+ The lifetime of entries in the CAS is implementation specific, but it SHOULD
+ be long enough to allow for newly-added and recently looked-up entries to be
+ used in subsequent calls (e.g. to
+ [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
+
+ Servers MUST behave as though empty blobs are always available, even if they
+ have not been uploaded. Clients MAY optimize away the uploading or
+ downloading of empty blobs.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
"""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
- def BatchUpdateBlobs(self, request, context):
- """Upload many blobs at once.
+ def FindMissingBlobs(self, request, context):
+ """Determine if blobs are present in the CAS.
- The server may enforce a limit of the combined total size of blobs
- to be uploaded using this API. This limit may be obtained using the
- [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
- Requests exceeding the limit should either be split into smaller
- chunks or uploaded using the
- [ByteStream API][google.bytestream.ByteStream], as appropriate.
+ Clients can use this API before uploading blobs to determine which ones are
+ already present in the CAS and do not need to be uploaded again.
- This request is equivalent to calling a Bytestream `Write` request
- on each individual blob, in parallel. The requests may succeed or fail
- independently.
+ Servers SHOULD increase the TTLs of the referenced blobs if necessary and
+ applicable.
- Errors:
+ There are no method-specific errors.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
- * `INVALID_ARGUMENT`: The client attempted to upload more than the
- server supported limit.
+ def BatchUpdateBlobs(self, request, context):
+ """Upload many blobs at once.
- Individual requests may return the following errors, additionally:
+ The server may enforce a limit of the combined total size of blobs
+ to be uploaded using this API. This limit may be obtained using the
+ [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
+ Requests exceeding the limit should either be split into smaller
+ chunks or uploaded using the
+ [ByteStream API][google.bytestream.ByteStream], as appropriate.
- * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
- * `INVALID_ARGUMENT`: The
- [Digest][build.bazel.remote.execution.v2.Digest] does not match the
- provided data.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
+ This request is equivalent to calling a Bytestream `Write` request
+ on each individual blob, in parallel. The requests may succeed or fail
+ independently.
- def BatchReadBlobs(self, request, context):
- """Download many blobs at once.
+ Errors:
- The server may enforce a limit of the combined total size of blobs
- to be downloaded using this API. This limit may be obtained using the
- [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
- Requests exceeding the limit should either be split into smaller
- chunks or downloaded using the
- [ByteStream API][google.bytestream.ByteStream], as appropriate.
+ * `INVALID_ARGUMENT`: The client attempted to upload more than the
+ server supported limit.
- This request is equivalent to calling a Bytestream `Read` request
- on each individual blob, in parallel. The requests may succeed or fail
- independently.
+ Individual requests may return the following errors, additionally:
- Errors:
+ * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
+ * `INVALID_ARGUMENT`: The
+ [Digest][build.bazel.remote.execution.v2.Digest] does not match the
+ provided data.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
- * `INVALID_ARGUMENT`: The client attempted to read more than the
- server supported limit.
+ def BatchReadBlobs(self, request, context):
+ """Download many blobs at once.
- Every error on individual read will be returned in the corresponding digest
- status.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
+ The server may enforce a limit of the combined total size of blobs
+ to be downloaded using this API. This limit may be obtained using the
+ [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
+ Requests exceeding the limit should either be split into smaller
+ chunks or downloaded using the
+ [ByteStream API][google.bytestream.ByteStream], as appropriate.
- def GetTree(self, request, context):
- """Fetch the entire directory tree rooted at a node.
+ This request is equivalent to calling a Bytestream `Read` request
+ on each individual blob, in parallel. The requests may succeed or fail
+ independently.
- This request must be targeted at a
- [Directory][build.bazel.remote.execution.v2.Directory] stored in the
- [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
- (CAS). The server will enumerate the `Directory` tree recursively and
- return every node descended from the root.
+ Errors:
- The GetTreeRequest.page_token parameter can be used to skip ahead in
- the stream (e.g. when retrying a partially completed and aborted request),
- by setting it to a value taken from GetTreeResponse.next_page_token of the
- last successfully processed GetTreeResponse).
+ * `INVALID_ARGUMENT`: The client attempted to read more than the
+ server supported limit.
- The exact traversal order is unspecified and, unless retrieving subsequent
- pages from an earlier request, is not guaranteed to be stable across
- multiple invocations of `GetTree`.
+ Every error on individual read will be returned in the corresponding digest
+ status.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
- If part of the tree is missing from the CAS, the server will return the
- portion present and omit the rest.
+ def GetTree(self, request, context):
+ """Fetch the entire directory tree rooted at a node.
- Errors:
+ This request must be targeted at a
+ [Directory][build.bazel.remote.execution.v2.Directory] stored in the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+ (CAS). The server will enumerate the `Directory` tree recursively and
+ return every node descended from the root.
- * `NOT_FOUND`: The requested tree root is not present in the CAS.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
+ The GetTreeRequest.page_token parameter can be used to skip ahead in
+ the stream (e.g. when retrying a partially completed and aborted request),
+ by setting it to a value taken from GetTreeResponse.next_page_token of the
+ last successfully processed GetTreeResponse).
+
+ The exact traversal order is unspecified and, unless retrieving subsequent
+ pages from an earlier request, is not guaranteed to be stable across
+ multiple invocations of `GetTree`.
+
+ If part of the tree is missing from the CAS, the server will return the
+ portion present and omit the rest.
+
+ Errors:
+
+ * `NOT_FOUND`: The requested tree root is not present in the CAS.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
def add_ContentAddressableStorageServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'FindMissingBlobs': grpc.unary_unary_rpc_method_handler(
- servicer.FindMissingBlobs,
- request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.FromString,
- response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.SerializeToString,
- ),
- 'BatchUpdateBlobs': grpc.unary_unary_rpc_method_handler(
- servicer.BatchUpdateBlobs,
- request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.FromString,
- response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.SerializeToString,
- ),
- 'BatchReadBlobs': grpc.unary_unary_rpc_method_handler(
- servicer.BatchReadBlobs,
- request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.FromString,
- response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.SerializeToString,
- ),
- 'GetTree': grpc.unary_stream_rpc_method_handler(
- servicer.GetTree,
- request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.FromString,
- response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'build.bazel.remote.execution.v2.ContentAddressableStorage', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
+ rpc_method_handlers = {
+ 'FindMissingBlobs': grpc.unary_unary_rpc_method_handler(
+ servicer.FindMissingBlobs,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.SerializeToString,
+ ),
+ 'BatchUpdateBlobs': grpc.unary_unary_rpc_method_handler(
+ servicer.BatchUpdateBlobs,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.SerializeToString,
+ ),
+ 'BatchReadBlobs': grpc.unary_unary_rpc_method_handler(
+ servicer.BatchReadBlobs,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.SerializeToString,
+ ),
+ 'GetTree': grpc.unary_stream_rpc_method_handler(
+ servicer.GetTree,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.ContentAddressableStorage', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class ContentAddressableStorage(object):
+ """The CAS (content-addressable storage) is used to store the inputs to and
+ outputs from the execution service. Each piece of content is addressed by the
+ digest of its binary data.
+
+ Most of the binary data stored in the CAS is opaque to the execution engine,
+ and is only used as a communication medium. In order to build an
+ [Action][build.bazel.remote.execution.v2.Action],
+ however, the client will need to also upload the
+ [Command][build.bazel.remote.execution.v2.Command] and input root
+ [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
+ The Command and Directory messages must be marshalled to wire format and then
+ uploaded under the hash as with any other piece of content. In practice, the
+ input root directory is likely to refer to other Directories in its
+ hierarchy, which must also each be uploaded on their own.
+
+ For small file uploads the client should group them together and call
+ [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+ For large uploads, the client must use the
+ [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+ `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+ where `instance_name` is as described in the next paragraph, `uuid` is a
+ version 4 UUID generated by the client, and `hash` and `size` are the
+ [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
+ `uuid` is used only to avoid collisions when multiple clients try to upload
+ the same file (or the same client tries to upload the file multiple times at
+ once on different threads), so the client MAY reuse the `uuid` for uploading
+ different blobs. The `resource_name` may optionally have a trailing filename
+ (or other metadata) for a client to use if it is storing URLs, as in
+ `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+ after the `size` is ignored.
+
+ A single server MAY support multiple instances of the execution system, each
+ with their own workers, storage, cache, etc. The exact relationship between
+ instances is up to the server. If the server does, then the `instance_name`
+ is an identifier, possibly containing multiple path segments, used to
+ distinguish between the various instances on the server, in a manner defined
+ by the server. For servers which do not support multiple instances, then the
+ `instance_name` is the empty path and the leading slash is omitted, so that
+ the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+ To simplify parsing, a path segment cannot equal any of the following
+ keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
+ `capabilities`.
+
+ When attempting an upload, if another client has already completed the upload
+ (which may occur in the middle of a single upload if another client uploads
+ the same blob concurrently), the request will terminate immediately with
+ a response whose `committed_size` is the full size of the uploaded file
+ (regardless of how much data was transmitted by the client). If the client
+ completes the upload but the
+ [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
+ `INVALID_ARGUMENT` error will be returned. In either case, the client should
+ not attempt to retry the upload.
+
+ For downloading blobs, the client must use the
+ [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+ a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+ `instance_name` is the instance name (see above), and `hash` and `size` are
+ the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+
+ The lifetime of entries in the CAS is implementation specific, but it SHOULD
+ be long enough to allow for newly-added and recently looked-up entries to be
+ used in subsequent calls (e.g. to
+ [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
+
+ Servers MUST behave as though empty blobs are always available, even if they
+ have not been uploaded. Clients MAY optimize away the uploading or
+ downloading of empty blobs.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ @staticmethod
+ def FindMissingBlobs(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/FindMissingBlobs',
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def BatchUpdateBlobs(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchUpdateBlobs',
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def BatchReadBlobs(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchReadBlobs',
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def GetTree(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_stream(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/GetTree',
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
class CapabilitiesStub(object):
- """The Capabilities service may be used by remote execution clients to query
- various server properties, in order to self-configure or return meaningful
- error messages.
+ """The Capabilities service may be used by remote execution clients to query
+ various server properties, in order to self-configure or return meaningful
+ error messages.
- The query may include a particular `instance_name`, in which case the values
- returned will pertain to that instance.
- """
+ The query may include a particular `instance_name`, in which case the values
+ returned will pertain to that instance.
+ """
- def __init__(self, channel):
- """Constructor.
+ def __init__(self, channel):
+ """Constructor.
- Args:
- channel: A grpc.Channel.
- """
- self.GetCapabilities = channel.unary_unary(
- '/build.bazel.remote.execution.v2.Capabilities/GetCapabilities',
- request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.SerializeToString,
- response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.FromString,
- )
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetCapabilities = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.Capabilities/GetCapabilities',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.FromString,
+ )
class CapabilitiesServicer(object):
- """The Capabilities service may be used by remote execution clients to query
- various server properties, in order to self-configure or return meaningful
- error messages.
-
- The query may include a particular `instance_name`, in which case the values
- returned will pertain to that instance.
- """
-
- def GetCapabilities(self, request, context):
- """GetCapabilities returns the server capabilities configuration of the
- remote endpoint.
- Only the capabilities of the services supported by the endpoint will
- be returned:
- * Execution + CAS + Action Cache endpoints should return both
- CacheCapabilities and ExecutionCapabilities.
- * Execution only endpoints should return ExecutionCapabilities.
- * CAS + Action Cache only endpoints should return CacheCapabilities.
+ """The Capabilities service may be used by remote execution clients to query
+ various server properties, in order to self-configure or return meaningful
+ error messages.
+
+ The query may include a particular `instance_name`, in which case the values
+ returned will pertain to that instance.
"""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
+
+ def GetCapabilities(self, request, context):
+ """GetCapabilities returns the server capabilities configuration of the
+ remote endpoint.
+ Only the capabilities of the services supported by the endpoint will
+ be returned:
+ * Execution + CAS + Action Cache endpoints should return both
+ CacheCapabilities and ExecutionCapabilities.
+ * Execution only endpoints should return ExecutionCapabilities.
+ * CAS + Action Cache only endpoints should return CacheCapabilities.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
def add_CapabilitiesServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'GetCapabilities': grpc.unary_unary_rpc_method_handler(
- servicer.GetCapabilities,
- request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.FromString,
- response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'build.bazel.remote.execution.v2.Capabilities', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
+ rpc_method_handlers = {
+ 'GetCapabilities': grpc.unary_unary_rpc_method_handler(
+ servicer.GetCapabilities,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.Capabilities', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class Capabilities(object):
+ """The Capabilities service may be used by remote execution clients to query
+ various server properties, in order to self-configure or return meaningful
+ error messages.
+
+ The query may include a particular `instance_name`, in which case the values
+ returned will pertain to that instance.
+ """
+
+ @staticmethod
+ def GetCapabilities(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.Capabilities/GetCapabilities',
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.SerializeToString,
+ build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/src/buildstream/_protos/build/buildgrid/local_cas.proto b/src/buildstream/_protos/build/buildgrid/local_cas.proto
index 4aa9244ff..378033e0a 100644
--- a/src/buildstream/_protos/build/buildgrid/local_cas.proto
+++ b/src/buildstream/_protos/build/buildgrid/local_cas.proto
@@ -115,8 +115,17 @@ service LocalContentAddressableStorage {
//
// This returns a string that can be used as instance_name to access the
// specified endpoint in further requests.
+ //
+ // DEPRECATED: Use `content_addressable_storage` in `GetInstanceNameForRemotes()`
+ // instead.
rpc GetInstanceNameForRemote(GetInstanceNameForRemoteRequest) returns (GetInstanceNameForRemoteResponse) {}
+ // Configure remote endpoints.
+ //
+ // This returns a string that can be used as instance_name to access the
+ // specified endpoints in further requests.
+ rpc GetInstanceNameForRemotes(GetInstanceNameForRemotesRequest) returns (GetInstanceNameForRemotesResponse) {}
+
// Query total space used by the local cache.
rpc GetLocalDiskUsage(GetLocalDiskUsageRequest) returns (GetLocalDiskUsageResponse) {}
}
@@ -387,6 +396,45 @@ message GetInstanceNameForRemoteResponse {
string instance_name = 1;
}
+message Remote {
+ // The URL for the remote server.
+ string url = 1;
+
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 2;
+
+ // PEM-encoded public server certificate for https connections to the remote
+ // server.
+ bytes server_cert = 3;
+
+ // PEM-encoded private client key for https with certificate-based client
+ // authentication. If this is specified, `client_cert` must be specified
+ // as well.
+ bytes client_key = 4;
+
+ // PEM-encoded public client certificate for https with certificate-based
+ // client authentication. If this is specified, `client_key` must be
+ // specified as well.
+ bytes client_cert = 5;
+}
+
+// A request message for
+// [LocalContentAddressableStorage.GetInstanceNameForRemotes][build.buildgrid.v2.LocalContentAddressableStorage.GetInstanceNameForRemotes].
+message GetInstanceNameForRemotesRequest {
+ Remote content_addressable_storage = 1;
+ Remote remote_asset = 2;
+}
+
+// A response message for
+// [LocalContentAddressableStorage.GetInstanceNameForRemotes][build.buildgrid.v2.LocalContentAddressableStorage.GetInstanceNameForRemotes].
+message GetInstanceNameForRemotesResponse {
+ string instance_name = 1;
+}
+
// A request message for
// [LocalContentAddressableStorage.GetLocalDiskUsage][build.buildgrid.v2.LocalContentAddressableStorage.GetLocalDiskUsage].
message GetLocalDiskUsageRequest {
diff --git a/src/buildstream/_protos/build/buildgrid/local_cas_pb2.py b/src/buildstream/_protos/build/buildgrid/local_cas_pb2.py
index e2b48159c..0ac5a770c 100644
--- a/src/buildstream/_protos/build/buildgrid/local_cas_pb2.py
+++ b/src/buildstream/_protos/build/buildgrid/local_cas_pb2.py
@@ -20,7 +20,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
package='build.buildgrid',
syntax='proto3',
serialized_options=None,
- serialized_pb=b'\n\x1f\x62uild/buildgrid/local_cas.proto\x12\x0f\x62uild.buildgrid\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x17google/rpc/status.proto\"p\n\x18\x46\x65tchMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xcc\x01\n\x19\x46\x65tchMissingBlobsResponse\x12\x46\n\tresponses\x18\x01 \x03(\x0b\x32\x33.build.buildgrid.FetchMissingBlobsResponse.Response\x1ag\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"q\n\x19UploadMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xce\x01\n\x1aUploadMissingBlobsResponse\x12G\n\tresponses\x18\x01 \x03(\x0b\x32\x34.build.buildgrid.UploadMissingBlobsResponse.Response\x1ag\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\x81\x01\n\x10\x46\x65tchTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x18\n\x10\x66\x65tch_file_blobs\x18\x03 \x01(\x08\"\x13\n\x11\x46\x65tchTreeResponse\"h\n\x11UploadTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x14\n\x12UploadTreeResponse\"u\n\x10StageTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04path\x18\x03 \x01(\t\"!\n\x11StageTreeResponse\x12\x0c\n\x04path\x18\x01 \x01(\t\"n\n\x12\x43\x61ptureTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x03(\t\x12\x1a\n\x12\x62ypass_local_cache\x18\x03 \x01(\x08\x12\x17\n\x0fnode_properties\x18\x04 \x03(\t\"\xd3\x01\n\x13\x43\x61ptureTreeResponse\x12@\n\tresponses\x18\x01 \x03(\x0b\x32-.build.buildgrid.CaptureTreeResponse.Response\x1az\n\x08Response\x12\x0c\n\x04path\x18\x01 \x01(\t\x12<\n\x0btree_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\"o\n\x13\x43\x61ptureFilesRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x03(\t\x12\x1a\n\x12\x62ypass_local_cache\x18\x03 \x01(\x08\x12\x17\n\x0fnode_properties\x18\x04 \x03(\t\"\xb8\x02\n\x14\x43\x61ptureFilesResponse\x12\x41\n\tresponses\x18\x01 \x03(\x0b\x32..build.buildgrid.CaptureFilesResponse.Response\x1a\xdc\x01\n\x08Response\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12\x15\n\ris_executable\x18\x04 \x01(\x08\x12H\n\x0fnode_properties\x18\x06 \x01(\x0b\x32/.build.bazel.remote.execution.v2.NodePropertiesJ\x04\x08\x05\x10\x06\"\x83\x01\n\x1fGetInstanceNameForRemoteRequest\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x15\n\rinstance_name\x18\x02 \x01(\t\x12\x13\n\x0bserver_cert\x18\x03 \x01(\x0c\x12\x12\n\nclient_key\x18\x04 \x01(\x0c\x12\x13\n\x0b\x63lient_cert\x18\x05 \x01(\x0c\"9\n GetInstanceNameForRemoteResponse\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\x1a\n\x18GetLocalDiskUsageRequest\"D\n\x19GetLocalDiskUsageResponse\x12\x12\n\nsize_bytes\x18\x01 \x01(\x03\x12\x13\n\x0bquota_bytes\x18\x02 \x01(\x03\x32\xb5\x07\n\x1eLocalContentAddressableStorage\x12l\n\x11\x46\x65tchMissingBlobs\x12).build.buildgrid.FetchMissingBlobsRequest\x1a*.build.buildgrid.FetchMissingBlobsResponse\"\x00\x12o\n\x12UploadMissingBlobs\x12*.build.buildgrid.UploadMissingBlobsRequest\x1a+.build.buildgrid.UploadMissingBlobsResponse\"\x00\x12T\n\tFetchTree\x12!.build.buildgrid.FetchTreeRequest\x1a\".build.buildgrid.FetchTreeResponse\"\x00\x12W\n\nUploadTree\x12\".build.buildgrid.UploadTreeRequest\x1a#.build.buildgrid.UploadTreeResponse\"\x00\x12X\n\tStageTree\x12!.build.buildgrid.StageTreeRequest\x1a\".build.buildgrid.StageTreeResponse\"\x00(\x01\x30\x01\x12Z\n\x0b\x43\x61ptureTree\x12#.build.buildgrid.CaptureTreeRequest\x1a$.build.buildgrid.CaptureTreeResponse\"\x00\x12]\n\x0c\x43\x61ptureFiles\x12$.build.buildgrid.CaptureFilesRequest\x1a%.build.buildgrid.CaptureFilesResponse\"\x00\x12\x81\x01\n\x18GetInstanceNameForRemote\x12\x30.build.buildgrid.GetInstanceNameForRemoteRequest\x1a\x31.build.buildgrid.GetInstanceNameForRemoteResponse\"\x00\x12l\n\x11GetLocalDiskUsage\x12).build.buildgrid.GetLocalDiskUsageRequest\x1a*.build.buildgrid.GetLocalDiskUsageResponse\"\x00\x62\x06proto3'
+ serialized_pb=b'\n\x1f\x62uild/buildgrid/local_cas.proto\x12\x0f\x62uild.buildgrid\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x17google/rpc/status.proto\"p\n\x18\x46\x65tchMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xcc\x01\n\x19\x46\x65tchMissingBlobsResponse\x12\x46\n\tresponses\x18\x01 \x03(\x0b\x32\x33.build.buildgrid.FetchMissingBlobsResponse.Response\x1ag\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"q\n\x19UploadMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xce\x01\n\x1aUploadMissingBlobsResponse\x12G\n\tresponses\x18\x01 \x03(\x0b\x32\x34.build.buildgrid.UploadMissingBlobsResponse.Response\x1ag\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\x81\x01\n\x10\x46\x65tchTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x18\n\x10\x66\x65tch_file_blobs\x18\x03 \x01(\x08\"\x13\n\x11\x46\x65tchTreeResponse\"h\n\x11UploadTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x14\n\x12UploadTreeResponse\"u\n\x10StageTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04path\x18\x03 \x01(\t\"!\n\x11StageTreeResponse\x12\x0c\n\x04path\x18\x01 \x01(\t\"n\n\x12\x43\x61ptureTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x03(\t\x12\x1a\n\x12\x62ypass_local_cache\x18\x03 \x01(\x08\x12\x17\n\x0fnode_properties\x18\x04 \x03(\t\"\xd3\x01\n\x13\x43\x61ptureTreeResponse\x12@\n\tresponses\x18\x01 \x03(\x0b\x32-.build.buildgrid.CaptureTreeResponse.Response\x1az\n\x08Response\x12\x0c\n\x04path\x18\x01 \x01(\t\x12<\n\x0btree_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\"o\n\x13\x43\x61ptureFilesRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x03(\t\x12\x1a\n\x12\x62ypass_local_cache\x18\x03 \x01(\x08\x12\x17\n\x0fnode_properties\x18\x04 \x03(\t\"\xb8\x02\n\x14\x43\x61ptureFilesResponse\x12\x41\n\tresponses\x18\x01 \x03(\x0b\x32..build.buildgrid.CaptureFilesResponse.Response\x1a\xdc\x01\n\x08Response\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12\x15\n\ris_executable\x18\x04 \x01(\x08\x12H\n\x0fnode_properties\x18\x06 \x01(\x0b\x32/.build.bazel.remote.execution.v2.NodePropertiesJ\x04\x08\x05\x10\x06\"\x83\x01\n\x1fGetInstanceNameForRemoteRequest\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x15\n\rinstance_name\x18\x02 \x01(\t\x12\x13\n\x0bserver_cert\x18\x03 \x01(\x0c\x12\x12\n\nclient_key\x18\x04 \x01(\x0c\x12\x13\n\x0b\x63lient_cert\x18\x05 \x01(\x0c\"9\n GetInstanceNameForRemoteResponse\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"j\n\x06Remote\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x15\n\rinstance_name\x18\x02 \x01(\t\x12\x13\n\x0bserver_cert\x18\x03 \x01(\x0c\x12\x12\n\nclient_key\x18\x04 \x01(\x0c\x12\x13\n\x0b\x63lient_cert\x18\x05 \x01(\x0c\"\x8f\x01\n GetInstanceNameForRemotesRequest\x12<\n\x1b\x63ontent_addressable_storage\x18\x01 \x01(\x0b\x32\x17.build.buildgrid.Remote\x12-\n\x0cremote_asset\x18\x02 \x01(\x0b\x32\x17.build.buildgrid.Remote\":\n!GetInstanceNameForRemotesResponse\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\x1a\n\x18GetLocalDiskUsageRequest\"D\n\x19GetLocalDiskUsageResponse\x12\x12\n\nsize_bytes\x18\x01 \x01(\x03\x12\x13\n\x0bquota_bytes\x18\x02 \x01(\x03\x32\xbc\x08\n\x1eLocalContentAddressableStorage\x12l\n\x11\x46\x65tchMissingBlobs\x12).build.buildgrid.FetchMissingBlobsRequest\x1a*.build.buildgrid.FetchMissingBlobsResponse\"\x00\x12o\n\x12UploadMissingBlobs\x12*.build.buildgrid.UploadMissingBlobsRequest\x1a+.build.buildgrid.UploadMissingBlobsResponse\"\x00\x12T\n\tFetchTree\x12!.build.buildgrid.FetchTreeRequest\x1a\".build.buildgrid.FetchTreeResponse\"\x00\x12W\n\nUploadTree\x12\".build.buildgrid.UploadTreeRequest\x1a#.build.buildgrid.UploadTreeResponse\"\x00\x12X\n\tStageTree\x12!.build.buildgrid.StageTreeRequest\x1a\".build.buildgrid.StageTreeResponse\"\x00(\x01\x30\x01\x12Z\n\x0b\x43\x61ptureTree\x12#.build.buildgrid.CaptureTreeRequest\x1a$.build.buildgrid.CaptureTreeResponse\"\x00\x12]\n\x0c\x43\x61ptureFiles\x12$.build.buildgrid.CaptureFilesRequest\x1a%.build.buildgrid.CaptureFilesResponse\"\x00\x12\x81\x01\n\x18GetInstanceNameForRemote\x12\x30.build.buildgrid.GetInstanceNameForRemoteRequest\x1a\x31.build.buildgrid.GetInstanceNameForRemoteResponse\"\x00\x12\x84\x01\n\x19GetInstanceNameForRemotes\x12\x31.build.buildgrid.GetInstanceNameForRemotesRequest\x1a\x32.build.buildgrid.GetInstanceNameForRemotesResponse\"\x00\x12l\n\x11GetLocalDiskUsage\x12).build.buildgrid.GetLocalDiskUsageRequest\x1a*.build.buildgrid.GetLocalDiskUsageResponse\"\x00\x62\x06proto3'
,
dependencies=[build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
@@ -804,6 +804,134 @@ _GETINSTANCENAMEFORREMOTERESPONSE = _descriptor.Descriptor(
)
+_REMOTE = _descriptor.Descriptor(
+ name='Remote',
+ full_name='build.buildgrid.Remote',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='url', full_name='build.buildgrid.Remote.url', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.buildgrid.Remote.instance_name', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='server_cert', full_name='build.buildgrid.Remote.server_cert', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='client_key', full_name='build.buildgrid.Remote.client_key', index=3,
+ number=4, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='client_cert', full_name='build.buildgrid.Remote.client_cert', index=4,
+ number=5, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2160,
+ serialized_end=2266,
+)
+
+
+_GETINSTANCENAMEFORREMOTESREQUEST = _descriptor.Descriptor(
+ name='GetInstanceNameForRemotesRequest',
+ full_name='build.buildgrid.GetInstanceNameForRemotesRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='content_addressable_storage', full_name='build.buildgrid.GetInstanceNameForRemotesRequest.content_addressable_storage', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='remote_asset', full_name='build.buildgrid.GetInstanceNameForRemotesRequest.remote_asset', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2269,
+ serialized_end=2412,
+)
+
+
+_GETINSTANCENAMEFORREMOTESRESPONSE = _descriptor.Descriptor(
+ name='GetInstanceNameForRemotesResponse',
+ full_name='build.buildgrid.GetInstanceNameForRemotesResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.buildgrid.GetInstanceNameForRemotesResponse.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2414,
+ serialized_end=2472,
+)
+
+
_GETLOCALDISKUSAGEREQUEST = _descriptor.Descriptor(
name='GetLocalDiskUsageRequest',
full_name='build.buildgrid.GetLocalDiskUsageRequest',
@@ -823,8 +951,8 @@ _GETLOCALDISKUSAGEREQUEST = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=2160,
- serialized_end=2186,
+ serialized_start=2474,
+ serialized_end=2500,
)
@@ -861,8 +989,8 @@ _GETLOCALDISKUSAGERESPONSE = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
- serialized_start=2188,
- serialized_end=2256,
+ serialized_start=2502,
+ serialized_end=2570,
)
_FETCHMISSINGBLOBSREQUEST.fields_by_name['blob_digests'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
@@ -887,6 +1015,8 @@ _CAPTUREFILESRESPONSE_RESPONSE.fields_by_name['status'].message_type = google_do
_CAPTUREFILESRESPONSE_RESPONSE.fields_by_name['node_properties'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._NODEPROPERTIES
_CAPTUREFILESRESPONSE_RESPONSE.containing_type = _CAPTUREFILESRESPONSE
_CAPTUREFILESRESPONSE.fields_by_name['responses'].message_type = _CAPTUREFILESRESPONSE_RESPONSE
+_GETINSTANCENAMEFORREMOTESREQUEST.fields_by_name['content_addressable_storage'].message_type = _REMOTE
+_GETINSTANCENAMEFORREMOTESREQUEST.fields_by_name['remote_asset'].message_type = _REMOTE
DESCRIPTOR.message_types_by_name['FetchMissingBlobsRequest'] = _FETCHMISSINGBLOBSREQUEST
DESCRIPTOR.message_types_by_name['FetchMissingBlobsResponse'] = _FETCHMISSINGBLOBSRESPONSE
DESCRIPTOR.message_types_by_name['UploadMissingBlobsRequest'] = _UPLOADMISSINGBLOBSREQUEST
@@ -903,6 +1033,9 @@ DESCRIPTOR.message_types_by_name['CaptureFilesRequest'] = _CAPTUREFILESREQUEST
DESCRIPTOR.message_types_by_name['CaptureFilesResponse'] = _CAPTUREFILESRESPONSE
DESCRIPTOR.message_types_by_name['GetInstanceNameForRemoteRequest'] = _GETINSTANCENAMEFORREMOTEREQUEST
DESCRIPTOR.message_types_by_name['GetInstanceNameForRemoteResponse'] = _GETINSTANCENAMEFORREMOTERESPONSE
+DESCRIPTOR.message_types_by_name['Remote'] = _REMOTE
+DESCRIPTOR.message_types_by_name['GetInstanceNameForRemotesRequest'] = _GETINSTANCENAMEFORREMOTESREQUEST
+DESCRIPTOR.message_types_by_name['GetInstanceNameForRemotesResponse'] = _GETINSTANCENAMEFORREMOTESRESPONSE
DESCRIPTOR.message_types_by_name['GetLocalDiskUsageRequest'] = _GETLOCALDISKUSAGEREQUEST
DESCRIPTOR.message_types_by_name['GetLocalDiskUsageResponse'] = _GETLOCALDISKUSAGERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
@@ -1051,6 +1184,27 @@ GetInstanceNameForRemoteResponse = _reflection.GeneratedProtocolMessageType('Get
})
_sym_db.RegisterMessage(GetInstanceNameForRemoteResponse)
+Remote = _reflection.GeneratedProtocolMessageType('Remote', (_message.Message,), {
+ 'DESCRIPTOR' : _REMOTE,
+ '__module__' : 'build.buildgrid.local_cas_pb2'
+ # @@protoc_insertion_point(class_scope:build.buildgrid.Remote)
+ })
+_sym_db.RegisterMessage(Remote)
+
+GetInstanceNameForRemotesRequest = _reflection.GeneratedProtocolMessageType('GetInstanceNameForRemotesRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _GETINSTANCENAMEFORREMOTESREQUEST,
+ '__module__' : 'build.buildgrid.local_cas_pb2'
+ # @@protoc_insertion_point(class_scope:build.buildgrid.GetInstanceNameForRemotesRequest)
+ })
+_sym_db.RegisterMessage(GetInstanceNameForRemotesRequest)
+
+GetInstanceNameForRemotesResponse = _reflection.GeneratedProtocolMessageType('GetInstanceNameForRemotesResponse', (_message.Message,), {
+ 'DESCRIPTOR' : _GETINSTANCENAMEFORREMOTESRESPONSE,
+ '__module__' : 'build.buildgrid.local_cas_pb2'
+ # @@protoc_insertion_point(class_scope:build.buildgrid.GetInstanceNameForRemotesResponse)
+ })
+_sym_db.RegisterMessage(GetInstanceNameForRemotesResponse)
+
GetLocalDiskUsageRequest = _reflection.GeneratedProtocolMessageType('GetLocalDiskUsageRequest', (_message.Message,), {
'DESCRIPTOR' : _GETLOCALDISKUSAGEREQUEST,
'__module__' : 'build.buildgrid.local_cas_pb2'
@@ -1073,8 +1227,8 @@ _LOCALCONTENTADDRESSABLESTORAGE = _descriptor.ServiceDescriptor(
file=DESCRIPTOR,
index=0,
serialized_options=None,
- serialized_start=2259,
- serialized_end=3208,
+ serialized_start=2573,
+ serialized_end=3657,
methods=[
_descriptor.MethodDescriptor(
name='FetchMissingBlobs',
@@ -1149,9 +1303,18 @@ _LOCALCONTENTADDRESSABLESTORAGE = _descriptor.ServiceDescriptor(
serialized_options=None,
),
_descriptor.MethodDescriptor(
+ name='GetInstanceNameForRemotes',
+ full_name='build.buildgrid.LocalContentAddressableStorage.GetInstanceNameForRemotes',
+ index=8,
+ containing_service=None,
+ input_type=_GETINSTANCENAMEFORREMOTESREQUEST,
+ output_type=_GETINSTANCENAMEFORREMOTESRESPONSE,
+ serialized_options=None,
+ ),
+ _descriptor.MethodDescriptor(
name='GetLocalDiskUsage',
full_name='build.buildgrid.LocalContentAddressableStorage.GetLocalDiskUsage',
- index=8,
+ index=9,
containing_service=None,
input_type=_GETLOCALDISKUSAGEREQUEST,
output_type=_GETLOCALDISKUSAGERESPONSE,
diff --git a/src/buildstream/_protos/build/buildgrid/local_cas_pb2_grpc.py b/src/buildstream/_protos/build/buildgrid/local_cas_pb2_grpc.py
index 68af4826c..b5687a037 100644
--- a/src/buildstream/_protos/build/buildgrid/local_cas_pb2_grpc.py
+++ b/src/buildstream/_protos/build/buildgrid/local_cas_pb2_grpc.py
@@ -5,251 +5,436 @@ from buildstream._protos.build.buildgrid import local_cas_pb2 as build_dot_build
class LocalContentAddressableStorageStub(object):
- # missing associated documentation comment in .proto file
- pass
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
- """
- self.FetchMissingBlobs = channel.unary_unary(
- '/build.buildgrid.LocalContentAddressableStorage/FetchMissingBlobs',
- request_serializer=build_dot_buildgrid_dot_local__cas__pb2.FetchMissingBlobsRequest.SerializeToString,
- response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.FetchMissingBlobsResponse.FromString,
- )
- self.UploadMissingBlobs = channel.unary_unary(
- '/build.buildgrid.LocalContentAddressableStorage/UploadMissingBlobs',
- request_serializer=build_dot_buildgrid_dot_local__cas__pb2.UploadMissingBlobsRequest.SerializeToString,
- response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.UploadMissingBlobsResponse.FromString,
- )
- self.FetchTree = channel.unary_unary(
- '/build.buildgrid.LocalContentAddressableStorage/FetchTree',
- request_serializer=build_dot_buildgrid_dot_local__cas__pb2.FetchTreeRequest.SerializeToString,
- response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.FetchTreeResponse.FromString,
- )
- self.UploadTree = channel.unary_unary(
- '/build.buildgrid.LocalContentAddressableStorage/UploadTree',
- request_serializer=build_dot_buildgrid_dot_local__cas__pb2.UploadTreeRequest.SerializeToString,
- response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.UploadTreeResponse.FromString,
- )
- self.StageTree = channel.stream_stream(
- '/build.buildgrid.LocalContentAddressableStorage/StageTree',
- request_serializer=build_dot_buildgrid_dot_local__cas__pb2.StageTreeRequest.SerializeToString,
- response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.StageTreeResponse.FromString,
- )
- self.CaptureTree = channel.unary_unary(
- '/build.buildgrid.LocalContentAddressableStorage/CaptureTree',
- request_serializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureTreeRequest.SerializeToString,
- response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureTreeResponse.FromString,
- )
- self.CaptureFiles = channel.unary_unary(
- '/build.buildgrid.LocalContentAddressableStorage/CaptureFiles',
- request_serializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureFilesRequest.SerializeToString,
- response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureFilesResponse.FromString,
- )
- self.GetInstanceNameForRemote = channel.unary_unary(
- '/build.buildgrid.LocalContentAddressableStorage/GetInstanceNameForRemote',
- request_serializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemoteRequest.SerializeToString,
- response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemoteResponse.FromString,
- )
- self.GetLocalDiskUsage = channel.unary_unary(
- '/build.buildgrid.LocalContentAddressableStorage/GetLocalDiskUsage',
- request_serializer=build_dot_buildgrid_dot_local__cas__pb2.GetLocalDiskUsageRequest.SerializeToString,
- response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.GetLocalDiskUsageResponse.FromString,
- )
+ """Missing associated documentation comment in .proto file"""
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.FetchMissingBlobs = channel.unary_unary(
+ '/build.buildgrid.LocalContentAddressableStorage/FetchMissingBlobs',
+ request_serializer=build_dot_buildgrid_dot_local__cas__pb2.FetchMissingBlobsRequest.SerializeToString,
+ response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.FetchMissingBlobsResponse.FromString,
+ )
+ self.UploadMissingBlobs = channel.unary_unary(
+ '/build.buildgrid.LocalContentAddressableStorage/UploadMissingBlobs',
+ request_serializer=build_dot_buildgrid_dot_local__cas__pb2.UploadMissingBlobsRequest.SerializeToString,
+ response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.UploadMissingBlobsResponse.FromString,
+ )
+ self.FetchTree = channel.unary_unary(
+ '/build.buildgrid.LocalContentAddressableStorage/FetchTree',
+ request_serializer=build_dot_buildgrid_dot_local__cas__pb2.FetchTreeRequest.SerializeToString,
+ response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.FetchTreeResponse.FromString,
+ )
+ self.UploadTree = channel.unary_unary(
+ '/build.buildgrid.LocalContentAddressableStorage/UploadTree',
+ request_serializer=build_dot_buildgrid_dot_local__cas__pb2.UploadTreeRequest.SerializeToString,
+ response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.UploadTreeResponse.FromString,
+ )
+ self.StageTree = channel.stream_stream(
+ '/build.buildgrid.LocalContentAddressableStorage/StageTree',
+ request_serializer=build_dot_buildgrid_dot_local__cas__pb2.StageTreeRequest.SerializeToString,
+ response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.StageTreeResponse.FromString,
+ )
+ self.CaptureTree = channel.unary_unary(
+ '/build.buildgrid.LocalContentAddressableStorage/CaptureTree',
+ request_serializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureTreeRequest.SerializeToString,
+ response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureTreeResponse.FromString,
+ )
+ self.CaptureFiles = channel.unary_unary(
+ '/build.buildgrid.LocalContentAddressableStorage/CaptureFiles',
+ request_serializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureFilesRequest.SerializeToString,
+ response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureFilesResponse.FromString,
+ )
+ self.GetInstanceNameForRemote = channel.unary_unary(
+ '/build.buildgrid.LocalContentAddressableStorage/GetInstanceNameForRemote',
+ request_serializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemoteRequest.SerializeToString,
+ response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemoteResponse.FromString,
+ )
+ self.GetInstanceNameForRemotes = channel.unary_unary(
+ '/build.buildgrid.LocalContentAddressableStorage/GetInstanceNameForRemotes',
+ request_serializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemotesRequest.SerializeToString,
+ response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemotesResponse.FromString,
+ )
+ self.GetLocalDiskUsage = channel.unary_unary(
+ '/build.buildgrid.LocalContentAddressableStorage/GetLocalDiskUsage',
+ request_serializer=build_dot_buildgrid_dot_local__cas__pb2.GetLocalDiskUsageRequest.SerializeToString,
+ response_deserializer=build_dot_buildgrid_dot_local__cas__pb2.GetLocalDiskUsageResponse.FromString,
+ )
class LocalContentAddressableStorageServicer(object):
- # missing associated documentation comment in .proto file
- pass
+ """Missing associated documentation comment in .proto file"""
- def FetchMissingBlobs(self, request, context):
- """Fetch blobs from a remote CAS to the local cache.
-
- This request is equivalent to ByteStream `Read` or `BatchReadBlobs`
- requests, storing the downloaded blobs in the local cache.
-
- Requested blobs that failed to be downloaded will be listed in the
- response.
-
- Errors:
- * `INVALID_ARGUMENT`: The client attempted to download more than the
- server supported limit.
-
- Individual requests may return the following error, additionally:
- * `NOT_FOUND`: The requested blob is not present in the remote CAS.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def UploadMissingBlobs(self, request, context):
- """Upload blobs from the local cache to a remote CAS.
-
- This request is equivalent to `FindMissingBlobs` followed by
- ByteStream `Write` or `BatchUpdateBlobs` requests.
-
- Blobs that failed to be uploaded will be listed in the response.
-
- Errors:
- * `INVALID_ARGUMENT`: The client attempted to upload more than the
- server supported limit.
-
- Individual requests may return the following error, additionally:
- * `NOT_FOUND`: The requested blob is not present in the local cache.
- * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def FetchTree(self, request, context):
- """Fetch the entire directory tree rooted at a node from a remote CAS to the
- local cache.
-
- This request is equivalent to `GetTree`, storing the `Directory` objects
- in the local cache. Optionally, this will also fetch all blobs referenced
- by the `Directory` objects, equivalent to `FetchMissingBlobs`.
-
- If no remote CAS is available, this will check presence of the entire
- directory tree (and optionally also file blobs) in the local cache.
-
- * `NOT_FOUND`: The requested tree is not present in the CAS or incomplete.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def UploadTree(self, request, context):
- """Upload the entire directory tree from the local cache to a remote CAS.
-
- This request is equivalent to `UploadMissingBlobs` for all blobs
- referenced by the specified tree (recursively).
-
- Errors:
- * `NOT_FOUND`: The requested tree root is not present in the local cache.
- * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the tree.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def StageTree(self, request_iterator, context):
- """Stage a directory tree in the local filesystem.
-
- This makes the specified directory tree temporarily available for local
- filesystem access. It is implementation-defined whether this uses a
- userspace filesystem such as FUSE, hardlinking or a full copy.
-
- Missing blobs are fetched, if a CAS remote is configured.
-
- The staging starts when the server receives the initial request and
- it is ready to be used on the initial (non-error) response from the
- server.
-
- The server will clean up the staged directory when it either
- receives an additional request (with all fields unset) or when the
- stream is closed. The server will send an additional response after
- cleanup is complete.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def CaptureTree(self, request, context):
- """Capture a directory tree from the local filesystem.
-
- This imports the specified path from the local filesystem into CAS.
-
- If a CAS remote is configured, the blobs are uploaded.
- The `bypass_local_cache` parameter is a hint to indicate whether the blobs
- shall be uploaded without first storing them in the local cache.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def CaptureFiles(self, request, context):
- """Capture files from the local filesystem.
-
- This imports the specified paths from the local filesystem into CAS.
-
- If a CAS remote is configured, the blobs are uploaded.
- The `bypass_local_cache` parameter is a hint to indicate whether the blobs
- shall be uploaded without first storing them in the local cache.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def GetInstanceNameForRemote(self, request, context):
- """Configure remote CAS endpoint.
-
- This returns a string that can be used as instance_name to access the
- specified endpoint in further requests.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def GetLocalDiskUsage(self, request, context):
- """Query total space used by the local cache.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
+ def FetchMissingBlobs(self, request, context):
+ """Fetch blobs from a remote CAS to the local cache.
+
+ This request is equivalent to ByteStream `Read` or `BatchReadBlobs`
+ requests, storing the downloaded blobs in the local cache.
+
+ Requested blobs that failed to be downloaded will be listed in the
+ response.
+
+ Errors:
+ * `INVALID_ARGUMENT`: The client attempted to download more than the
+ server supported limit.
+
+ Individual requests may return the following error, additionally:
+ * `NOT_FOUND`: The requested blob is not present in the remote CAS.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def UploadMissingBlobs(self, request, context):
+ """Upload blobs from the local cache to a remote CAS.
+
+ This request is equivalent to `FindMissingBlobs` followed by
+ ByteStream `Write` or `BatchUpdateBlobs` requests.
+
+ Blobs that failed to be uploaded will be listed in the response.
+
+ Errors:
+ * `INVALID_ARGUMENT`: The client attempted to upload more than the
+ server supported limit.
+
+ Individual requests may return the following error, additionally:
+ * `NOT_FOUND`: The requested blob is not present in the local cache.
+ * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def FetchTree(self, request, context):
+ """Fetch the entire directory tree rooted at a node from a remote CAS to the
+ local cache.
+
+ This request is equivalent to `GetTree`, storing the `Directory` objects
+ in the local cache. Optionally, this will also fetch all blobs referenced
+ by the `Directory` objects, equivalent to `FetchMissingBlobs`.
+
+ If no remote CAS is available, this will check presence of the entire
+ directory tree (and optionally also file blobs) in the local cache.
+
+ * `NOT_FOUND`: The requested tree is not present in the CAS or incomplete.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def UploadTree(self, request, context):
+ """Upload the entire directory tree from the local cache to a remote CAS.
+
+ This request is equivalent to `UploadMissingBlobs` for all blobs
+ referenced by the specified tree (recursively).
+
+ Errors:
+ * `NOT_FOUND`: The requested tree root is not present in the local cache.
+ * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the tree.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def StageTree(self, request_iterator, context):
+ """Stage a directory tree in the local filesystem.
+
+ This makes the specified directory tree temporarily available for local
+ filesystem access. It is implementation-defined whether this uses a
+ userspace filesystem such as FUSE, hardlinking or a full copy.
+
+ Missing blobs are fetched, if a CAS remote is configured.
+
+ The staging starts when the server receives the initial request and
+ it is ready to be used on the initial (non-error) response from the
+ server.
+
+ The server will clean up the staged directory when it either
+ receives an additional request (with all fields unset) or when the
+ stream is closed. The server will send an additional response after
+ cleanup is complete.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def CaptureTree(self, request, context):
+ """Capture a directory tree from the local filesystem.
+
+ This imports the specified path from the local filesystem into CAS.
+
+ If a CAS remote is configured, the blobs are uploaded.
+ The `bypass_local_cache` parameter is a hint to indicate whether the blobs
+ shall be uploaded without first storing them in the local cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def CaptureFiles(self, request, context):
+ """Capture files from the local filesystem.
+
+ This imports the specified paths from the local filesystem into CAS.
+
+ If a CAS remote is configured, the blobs are uploaded.
+ The `bypass_local_cache` parameter is a hint to indicate whether the blobs
+ shall be uploaded without first storing them in the local cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetInstanceNameForRemote(self, request, context):
+ """Configure remote CAS endpoint.
+
+ This returns a string that can be used as instance_name to access the
+ specified endpoint in further requests.
+
+ DEPRECATED: Use `GetInstanceNameForRemotes()` instead.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetInstanceNameForRemotes(self, request, context):
+ """Configure remote endpoints.
+
+ This returns a string that can be used as instance_name to access the
+ specified endpoints in further requests.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetLocalDiskUsage(self, request, context):
+ """Query total space used by the local cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
def add_LocalContentAddressableStorageServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'FetchMissingBlobs': grpc.unary_unary_rpc_method_handler(
- servicer.FetchMissingBlobs,
- request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.FetchMissingBlobsRequest.FromString,
- response_serializer=build_dot_buildgrid_dot_local__cas__pb2.FetchMissingBlobsResponse.SerializeToString,
- ),
- 'UploadMissingBlobs': grpc.unary_unary_rpc_method_handler(
- servicer.UploadMissingBlobs,
- request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.UploadMissingBlobsRequest.FromString,
- response_serializer=build_dot_buildgrid_dot_local__cas__pb2.UploadMissingBlobsResponse.SerializeToString,
- ),
- 'FetchTree': grpc.unary_unary_rpc_method_handler(
- servicer.FetchTree,
- request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.FetchTreeRequest.FromString,
- response_serializer=build_dot_buildgrid_dot_local__cas__pb2.FetchTreeResponse.SerializeToString,
- ),
- 'UploadTree': grpc.unary_unary_rpc_method_handler(
- servicer.UploadTree,
- request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.UploadTreeRequest.FromString,
- response_serializer=build_dot_buildgrid_dot_local__cas__pb2.UploadTreeResponse.SerializeToString,
- ),
- 'StageTree': grpc.stream_stream_rpc_method_handler(
- servicer.StageTree,
- request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.StageTreeRequest.FromString,
- response_serializer=build_dot_buildgrid_dot_local__cas__pb2.StageTreeResponse.SerializeToString,
- ),
- 'CaptureTree': grpc.unary_unary_rpc_method_handler(
- servicer.CaptureTree,
- request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureTreeRequest.FromString,
- response_serializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureTreeResponse.SerializeToString,
- ),
- 'CaptureFiles': grpc.unary_unary_rpc_method_handler(
- servicer.CaptureFiles,
- request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureFilesRequest.FromString,
- response_serializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureFilesResponse.SerializeToString,
- ),
- 'GetInstanceNameForRemote': grpc.unary_unary_rpc_method_handler(
- servicer.GetInstanceNameForRemote,
- request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemoteRequest.FromString,
- response_serializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemoteResponse.SerializeToString,
- ),
- 'GetLocalDiskUsage': grpc.unary_unary_rpc_method_handler(
- servicer.GetLocalDiskUsage,
- request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.GetLocalDiskUsageRequest.FromString,
- response_serializer=build_dot_buildgrid_dot_local__cas__pb2.GetLocalDiskUsageResponse.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'build.buildgrid.LocalContentAddressableStorage', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
+ rpc_method_handlers = {
+ 'FetchMissingBlobs': grpc.unary_unary_rpc_method_handler(
+ servicer.FetchMissingBlobs,
+ request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.FetchMissingBlobsRequest.FromString,
+ response_serializer=build_dot_buildgrid_dot_local__cas__pb2.FetchMissingBlobsResponse.SerializeToString,
+ ),
+ 'UploadMissingBlobs': grpc.unary_unary_rpc_method_handler(
+ servicer.UploadMissingBlobs,
+ request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.UploadMissingBlobsRequest.FromString,
+ response_serializer=build_dot_buildgrid_dot_local__cas__pb2.UploadMissingBlobsResponse.SerializeToString,
+ ),
+ 'FetchTree': grpc.unary_unary_rpc_method_handler(
+ servicer.FetchTree,
+ request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.FetchTreeRequest.FromString,
+ response_serializer=build_dot_buildgrid_dot_local__cas__pb2.FetchTreeResponse.SerializeToString,
+ ),
+ 'UploadTree': grpc.unary_unary_rpc_method_handler(
+ servicer.UploadTree,
+ request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.UploadTreeRequest.FromString,
+ response_serializer=build_dot_buildgrid_dot_local__cas__pb2.UploadTreeResponse.SerializeToString,
+ ),
+ 'StageTree': grpc.stream_stream_rpc_method_handler(
+ servicer.StageTree,
+ request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.StageTreeRequest.FromString,
+ response_serializer=build_dot_buildgrid_dot_local__cas__pb2.StageTreeResponse.SerializeToString,
+ ),
+ 'CaptureTree': grpc.unary_unary_rpc_method_handler(
+ servicer.CaptureTree,
+ request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureTreeRequest.FromString,
+ response_serializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureTreeResponse.SerializeToString,
+ ),
+ 'CaptureFiles': grpc.unary_unary_rpc_method_handler(
+ servicer.CaptureFiles,
+ request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureFilesRequest.FromString,
+ response_serializer=build_dot_buildgrid_dot_local__cas__pb2.CaptureFilesResponse.SerializeToString,
+ ),
+ 'GetInstanceNameForRemote': grpc.unary_unary_rpc_method_handler(
+ servicer.GetInstanceNameForRemote,
+ request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemoteRequest.FromString,
+ response_serializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemoteResponse.SerializeToString,
+ ),
+ 'GetInstanceNameForRemotes': grpc.unary_unary_rpc_method_handler(
+ servicer.GetInstanceNameForRemotes,
+ request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemotesRequest.FromString,
+ response_serializer=build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemotesResponse.SerializeToString,
+ ),
+ 'GetLocalDiskUsage': grpc.unary_unary_rpc_method_handler(
+ servicer.GetLocalDiskUsage,
+ request_deserializer=build_dot_buildgrid_dot_local__cas__pb2.GetLocalDiskUsageRequest.FromString,
+ response_serializer=build_dot_buildgrid_dot_local__cas__pb2.GetLocalDiskUsageResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.buildgrid.LocalContentAddressableStorage', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class LocalContentAddressableStorage(object):
+ """Missing associated documentation comment in .proto file"""
+
+ @staticmethod
+ def FetchMissingBlobs(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.buildgrid.LocalContentAddressableStorage/FetchMissingBlobs',
+ build_dot_buildgrid_dot_local__cas__pb2.FetchMissingBlobsRequest.SerializeToString,
+ build_dot_buildgrid_dot_local__cas__pb2.FetchMissingBlobsResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def UploadMissingBlobs(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.buildgrid.LocalContentAddressableStorage/UploadMissingBlobs',
+ build_dot_buildgrid_dot_local__cas__pb2.UploadMissingBlobsRequest.SerializeToString,
+ build_dot_buildgrid_dot_local__cas__pb2.UploadMissingBlobsResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def FetchTree(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.buildgrid.LocalContentAddressableStorage/FetchTree',
+ build_dot_buildgrid_dot_local__cas__pb2.FetchTreeRequest.SerializeToString,
+ build_dot_buildgrid_dot_local__cas__pb2.FetchTreeResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def UploadTree(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.buildgrid.LocalContentAddressableStorage/UploadTree',
+ build_dot_buildgrid_dot_local__cas__pb2.UploadTreeRequest.SerializeToString,
+ build_dot_buildgrid_dot_local__cas__pb2.UploadTreeResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def StageTree(request_iterator,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.stream_stream(request_iterator, target, '/build.buildgrid.LocalContentAddressableStorage/StageTree',
+ build_dot_buildgrid_dot_local__cas__pb2.StageTreeRequest.SerializeToString,
+ build_dot_buildgrid_dot_local__cas__pb2.StageTreeResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def CaptureTree(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.buildgrid.LocalContentAddressableStorage/CaptureTree',
+ build_dot_buildgrid_dot_local__cas__pb2.CaptureTreeRequest.SerializeToString,
+ build_dot_buildgrid_dot_local__cas__pb2.CaptureTreeResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def CaptureFiles(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.buildgrid.LocalContentAddressableStorage/CaptureFiles',
+ build_dot_buildgrid_dot_local__cas__pb2.CaptureFilesRequest.SerializeToString,
+ build_dot_buildgrid_dot_local__cas__pb2.CaptureFilesResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def GetInstanceNameForRemote(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.buildgrid.LocalContentAddressableStorage/GetInstanceNameForRemote',
+ build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemoteRequest.SerializeToString,
+ build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemoteResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def GetInstanceNameForRemotes(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.buildgrid.LocalContentAddressableStorage/GetInstanceNameForRemotes',
+ build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemotesRequest.SerializeToString,
+ build_dot_buildgrid_dot_local__cas__pb2.GetInstanceNameForRemotesResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def GetLocalDiskUsage(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/build.buildgrid.LocalContentAddressableStorage/GetLocalDiskUsage',
+ build_dot_buildgrid_dot_local__cas__pb2.GetLocalDiskUsageRequest.SerializeToString,
+ build_dot_buildgrid_dot_local__cas__pb2.GetLocalDiskUsageResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/src/buildstream/_protos/buildstream/v2/artifact.proto b/src/buildstream/_protos/buildstream/v2/artifact.proto
index 87f66cc95..ac362b1f1 100644
--- a/src/buildstream/_protos/buildstream/v2/artifact.proto
+++ b/src/buildstream/_protos/buildstream/v2/artifact.proto
@@ -22,20 +22,6 @@ package buildstream.v2;
import "build/bazel/remote/execution/v2/remote_execution.proto";
import "google/api/annotations.proto";
-service ArtifactService {
- // Retrieves an Artifact message
- //
- // Errors:
- // * `NOT_FOUND`: Artifact not found on server
- rpc GetArtifact(GetArtifactRequest) returns (Artifact) {}
-
- // Sets an Artifact message
- //
- // Errors:
- // * `FAILED_PRECONDITION`: Files specified in upload aren't present in CAS
- rpc UpdateArtifact(UpdateArtifactRequest) returns (Artifact) {}
-}
-
message Artifact {
// This version number must always be present and can be used to
// further indicate presence or absence of parts of the proto at a
@@ -79,14 +65,3 @@ message Artifact {
// digest of a directory
build.bazel.remote.execution.v2.Digest sources = 13; // optional
}
-
-message GetArtifactRequest {
- string instance_name = 1;
- string cache_key = 2;
-}
-
-message UpdateArtifactRequest {
- string instance_name = 1;
- string cache_key = 2;
- Artifact artifact = 3;
-}
diff --git a/src/buildstream/_protos/buildstream/v2/artifact_pb2.py b/src/buildstream/_protos/buildstream/v2/artifact_pb2.py
index 8c34ef865..3950264dc 100644
--- a/src/buildstream/_protos/buildstream/v2/artifact_pb2.py
+++ b/src/buildstream/_protos/buildstream/v2/artifact_pb2.py
@@ -20,7 +20,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
package='buildstream.v2',
syntax='proto3',
serialized_options=None,
- serialized_pb=b'\n\x1d\x62uildstream/v2/artifact.proto\x12\x0e\x62uildstream.v2\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"\xae\x05\n\x08\x41rtifact\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x15\n\rbuild_success\x18\x02 \x01(\x08\x12\x13\n\x0b\x62uild_error\x18\x03 \x01(\t\x12\x1b\n\x13\x62uild_error_details\x18\x04 \x01(\t\x12\x12\n\nstrong_key\x18\x05 \x01(\t\x12\x10\n\x08weak_key\x18\x06 \x01(\t\x12\x16\n\x0ewas_workspaced\x18\x07 \x01(\x08\x12\x36\n\x05\x66iles\x18\x08 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x37\n\nbuild_deps\x18\t \x03(\x0b\x32#.buildstream.v2.Artifact.Dependency\x12<\n\x0bpublic_data\x18\n \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12.\n\x04logs\x18\x0b \x03(\x0b\x32 .buildstream.v2.Artifact.LogFile\x12:\n\tbuildtree\x18\x0c \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x38\n\x07sources\x18\r \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x1a\x63\n\nDependency\x12\x14\n\x0cproject_name\x18\x01 \x01(\t\x12\x14\n\x0c\x65lement_name\x18\x02 \x01(\t\x12\x11\n\tcache_key\x18\x03 \x01(\t\x12\x16\n\x0ewas_workspaced\x18\x04 \x01(\x08\x1aP\n\x07LogFile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\">\n\x12GetArtifactRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x11\n\tcache_key\x18\x02 \x01(\t\"m\n\x15UpdateArtifactRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x11\n\tcache_key\x18\x02 \x01(\t\x12*\n\x08\x61rtifact\x18\x03 \x01(\x0b\x32\x18.buildstream.v2.Artifact2\xb5\x01\n\x0f\x41rtifactService\x12M\n\x0bGetArtifact\x12\".buildstream.v2.GetArtifactRequest\x1a\x18.buildstream.v2.Artifact\"\x00\x12S\n\x0eUpdateArtifact\x12%.buildstream.v2.UpdateArtifactRequest\x1a\x18.buildstream.v2.Artifact\"\x00\x62\x06proto3'
+ serialized_pb=b'\n\x1d\x62uildstream/v2/artifact.proto\x12\x0e\x62uildstream.v2\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"\xae\x05\n\x08\x41rtifact\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x15\n\rbuild_success\x18\x02 \x01(\x08\x12\x13\n\x0b\x62uild_error\x18\x03 \x01(\t\x12\x1b\n\x13\x62uild_error_details\x18\x04 \x01(\t\x12\x12\n\nstrong_key\x18\x05 \x01(\t\x12\x10\n\x08weak_key\x18\x06 \x01(\t\x12\x16\n\x0ewas_workspaced\x18\x07 \x01(\x08\x12\x36\n\x05\x66iles\x18\x08 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x37\n\nbuild_deps\x18\t \x03(\x0b\x32#.buildstream.v2.Artifact.Dependency\x12<\n\x0bpublic_data\x18\n \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12.\n\x04logs\x18\x0b \x03(\x0b\x32 .buildstream.v2.Artifact.LogFile\x12:\n\tbuildtree\x18\x0c \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x38\n\x07sources\x18\r \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x1a\x63\n\nDependency\x12\x14\n\x0cproject_name\x18\x01 \x01(\t\x12\x14\n\x0c\x65lement_name\x18\x02 \x01(\t\x12\x11\n\tcache_key\x18\x03 \x01(\t\x12\x16\n\x0ewas_workspaced\x18\x04 \x01(\x08\x1aP\n\x07LogFile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digestb\x06proto3'
,
dependencies=[build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
@@ -229,89 +229,6 @@ _ARTIFACT = _descriptor.Descriptor(
serialized_end=822,
)
-
-_GETARTIFACTREQUEST = _descriptor.Descriptor(
- name='GetArtifactRequest',
- full_name='buildstream.v2.GetArtifactRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='instance_name', full_name='buildstream.v2.GetArtifactRequest.instance_name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='cache_key', full_name='buildstream.v2.GetArtifactRequest.cache_key', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=824,
- serialized_end=886,
-)
-
-
-_UPDATEARTIFACTREQUEST = _descriptor.Descriptor(
- name='UpdateArtifactRequest',
- full_name='buildstream.v2.UpdateArtifactRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='instance_name', full_name='buildstream.v2.UpdateArtifactRequest.instance_name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='cache_key', full_name='buildstream.v2.UpdateArtifactRequest.cache_key', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='artifact', full_name='buildstream.v2.UpdateArtifactRequest.artifact', index=2,
- number=3, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=888,
- serialized_end=997,
-)
-
_ARTIFACT_DEPENDENCY.containing_type = _ARTIFACT
_ARTIFACT_LOGFILE.fields_by_name['digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
_ARTIFACT_LOGFILE.containing_type = _ARTIFACT
@@ -321,10 +238,7 @@ _ARTIFACT.fields_by_name['public_data'].message_type = build_dot_bazel_dot_remot
_ARTIFACT.fields_by_name['logs'].message_type = _ARTIFACT_LOGFILE
_ARTIFACT.fields_by_name['buildtree'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
_ARTIFACT.fields_by_name['sources'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
-_UPDATEARTIFACTREQUEST.fields_by_name['artifact'].message_type = _ARTIFACT
DESCRIPTOR.message_types_by_name['Artifact'] = _ARTIFACT
-DESCRIPTOR.message_types_by_name['GetArtifactRequest'] = _GETARTIFACTREQUEST
-DESCRIPTOR.message_types_by_name['UpdateArtifactRequest'] = _UPDATEARTIFACTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Artifact = _reflection.GeneratedProtocolMessageType('Artifact', (_message.Message,), {
@@ -350,52 +264,5 @@ _sym_db.RegisterMessage(Artifact)
_sym_db.RegisterMessage(Artifact.Dependency)
_sym_db.RegisterMessage(Artifact.LogFile)
-GetArtifactRequest = _reflection.GeneratedProtocolMessageType('GetArtifactRequest', (_message.Message,), {
- 'DESCRIPTOR' : _GETARTIFACTREQUEST,
- '__module__' : 'buildstream.v2.artifact_pb2'
- # @@protoc_insertion_point(class_scope:buildstream.v2.GetArtifactRequest)
- })
-_sym_db.RegisterMessage(GetArtifactRequest)
-
-UpdateArtifactRequest = _reflection.GeneratedProtocolMessageType('UpdateArtifactRequest', (_message.Message,), {
- 'DESCRIPTOR' : _UPDATEARTIFACTREQUEST,
- '__module__' : 'buildstream.v2.artifact_pb2'
- # @@protoc_insertion_point(class_scope:buildstream.v2.UpdateArtifactRequest)
- })
-_sym_db.RegisterMessage(UpdateArtifactRequest)
-
-
-
-_ARTIFACTSERVICE = _descriptor.ServiceDescriptor(
- name='ArtifactService',
- full_name='buildstream.v2.ArtifactService',
- file=DESCRIPTOR,
- index=0,
- serialized_options=None,
- serialized_start=1000,
- serialized_end=1181,
- methods=[
- _descriptor.MethodDescriptor(
- name='GetArtifact',
- full_name='buildstream.v2.ArtifactService.GetArtifact',
- index=0,
- containing_service=None,
- input_type=_GETARTIFACTREQUEST,
- output_type=_ARTIFACT,
- serialized_options=None,
- ),
- _descriptor.MethodDescriptor(
- name='UpdateArtifact',
- full_name='buildstream.v2.ArtifactService.UpdateArtifact',
- index=1,
- containing_service=None,
- input_type=_UPDATEARTIFACTREQUEST,
- output_type=_ARTIFACT,
- serialized_options=None,
- ),
-])
-_sym_db.RegisterServiceDescriptor(_ARTIFACTSERVICE)
-
-DESCRIPTOR.services_by_name['ArtifactService'] = _ARTIFACTSERVICE
# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/buildstream/v2/artifact_pb2_grpc.py b/src/buildstream/_protos/buildstream/v2/artifact_pb2_grpc.py
index db0cd6435..a89435267 100644
--- a/src/buildstream/_protos/buildstream/v2/artifact_pb2_grpc.py
+++ b/src/buildstream/_protos/buildstream/v2/artifact_pb2_grpc.py
@@ -1,69 +1,3 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
-from buildstream._protos.buildstream.v2 import artifact_pb2 as buildstream_dot_v2_dot_artifact__pb2
-
-
-class ArtifactServiceStub(object):
- # missing associated documentation comment in .proto file
- pass
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
- """
- self.GetArtifact = channel.unary_unary(
- '/buildstream.v2.ArtifactService/GetArtifact',
- request_serializer=buildstream_dot_v2_dot_artifact__pb2.GetArtifactRequest.SerializeToString,
- response_deserializer=buildstream_dot_v2_dot_artifact__pb2.Artifact.FromString,
- )
- self.UpdateArtifact = channel.unary_unary(
- '/buildstream.v2.ArtifactService/UpdateArtifact',
- request_serializer=buildstream_dot_v2_dot_artifact__pb2.UpdateArtifactRequest.SerializeToString,
- response_deserializer=buildstream_dot_v2_dot_artifact__pb2.Artifact.FromString,
- )
-
-
-class ArtifactServiceServicer(object):
- # missing associated documentation comment in .proto file
- pass
-
- def GetArtifact(self, request, context):
- """Retrieves an Artifact message
-
- Errors:
- * `NOT_FOUND`: Artifact not found on server
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def UpdateArtifact(self, request, context):
- """Sets an Artifact message
-
- Errors:
- * `FAILED_PRECONDITION`: Files specified in upload aren't present in CAS
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
-
-def add_ArtifactServiceServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'GetArtifact': grpc.unary_unary_rpc_method_handler(
- servicer.GetArtifact,
- request_deserializer=buildstream_dot_v2_dot_artifact__pb2.GetArtifactRequest.FromString,
- response_serializer=buildstream_dot_v2_dot_artifact__pb2.Artifact.SerializeToString,
- ),
- 'UpdateArtifact': grpc.unary_unary_rpc_method_handler(
- servicer.UpdateArtifact,
- request_deserializer=buildstream_dot_v2_dot_artifact__pb2.UpdateArtifactRequest.FromString,
- response_serializer=buildstream_dot_v2_dot_artifact__pb2.Artifact.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'buildstream.v2.ArtifactService', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/buildstream/_protos/buildstream/v2/buildstream.proto b/src/buildstream/_protos/buildstream/v2/buildstream.proto
index d5cac2892..f283d6f3f 100644
--- a/src/buildstream/_protos/buildstream/v2/buildstream.proto
+++ b/src/buildstream/_protos/buildstream/v2/buildstream.proto
@@ -44,15 +44,6 @@ service ReferenceStorage {
}
}
-service Capabilities {
- // GetCapabilities mirrors
- rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) {
- option (google.api.http) = {
- get: "/v2/{instance_name=**}/capabilities"
- };
- }
-}
-
message GetReferenceRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
@@ -102,23 +93,3 @@ message StatusResponse {
// Whether reference updates are allowed for the connected client.
bool allow_updates = 1;
}
-
-message GetCapabilitiesRequest {
- string instance_name = 1;
-}
-
-// Capabilities of the artifact service
-message ArtifactCapabilities {
- bool allow_updates = 1;
-}
-
-// Capabilities of the source service
-message SourceCapabilities {
- bool allow_updates = 1;
-}
-
-// All capabalities will be unset if the service isn't present
-message ServerCapabilities {
- ArtifactCapabilities artifact_capabilities = 1;
- SourceCapabilities source_capabilities = 2;
-} \ No newline at end of file
diff --git a/src/buildstream/_protos/buildstream/v2/buildstream_pb2.py b/src/buildstream/_protos/buildstream/v2/buildstream_pb2.py
index 0780c9fa2..558d4f059 100644
--- a/src/buildstream/_protos/buildstream/v2/buildstream_pb2.py
+++ b/src/buildstream/_protos/buildstream/v2/buildstream_pb2.py
@@ -20,7 +20,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
package='buildstream.v2',
syntax='proto3',
serialized_options=None,
- serialized_pb=b'\n buildstream/v2/buildstream.proto\x12\x0e\x62uildstream.v2\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"9\n\x13GetReferenceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"O\n\x14GetReferenceResponse\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"v\n\x16UpdateReferenceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04keys\x18\x02 \x03(\t\x12\x37\n\x06\x64igest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x19\n\x17UpdateReferenceResponse\"&\n\rStatusRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\'\n\x0eStatusResponse\x12\x15\n\rallow_updates\x18\x01 \x01(\x08\"/\n\x16GetCapabilitiesRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"-\n\x14\x41rtifactCapabilities\x12\x15\n\rallow_updates\x18\x01 \x01(\x08\"+\n\x12SourceCapabilities\x12\x15\n\rallow_updates\x18\x01 \x01(\x08\"\x9a\x01\n\x12ServerCapabilities\x12\x43\n\x15\x61rtifact_capabilities\x18\x01 \x01(\x0b\x32$.buildstream.v2.ArtifactCapabilities\x12?\n\x13source_capabilities\x18\x02 \x01(\x0b\x32\".buildstream.v2.SourceCapabilities2\xca\x03\n\x10ReferenceStorage\x12\x90\x01\n\x0cGetReference\x12#.buildstream.v2.GetReferenceRequest\x1a$.buildstream.v2.GetReferenceResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v2/{instance_name=**}/buildstream/refs/{key}\x12\xa1\x01\n\x0fUpdateReference\x12&.buildstream.v2.UpdateReferenceRequest\x1a\'.buildstream.v2.UpdateReferenceResponse\"=\x82\xd3\xe4\x93\x02\x37\x1a-/v2/{instance_name=**}/buildstream/refs/{key}:\x06\x64igest\x12\x7f\n\x06Status\x12\x1d.buildstream.v2.StatusRequest\x1a\x1e.buildstream.v2.StatusResponse\"6\x82\xd3\xe4\x93\x02\x30\x1a./v2/{instance_name=**}/buildstream/refs:status2\x9b\x01\n\x0c\x43\x61pabilities\x12\x8a\x01\n\x0fGetCapabilities\x12&.buildstream.v2.GetCapabilitiesRequest\x1a\".buildstream.v2.ServerCapabilities\"+\x82\xd3\xe4\x93\x02%\x12#/v2/{instance_name=**}/capabilitiesb\x06proto3'
+ serialized_pb=b'\n buildstream/v2/buildstream.proto\x12\x0e\x62uildstream.v2\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"9\n\x13GetReferenceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"O\n\x14GetReferenceResponse\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"v\n\x16UpdateReferenceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04keys\x18\x02 \x03(\t\x12\x37\n\x06\x64igest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x19\n\x17UpdateReferenceResponse\"&\n\rStatusRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\'\n\x0eStatusResponse\x12\x15\n\rallow_updates\x18\x01 \x01(\x08\x32\xca\x03\n\x10ReferenceStorage\x12\x90\x01\n\x0cGetReference\x12#.buildstream.v2.GetReferenceRequest\x1a$.buildstream.v2.GetReferenceResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v2/{instance_name=**}/buildstream/refs/{key}\x12\xa1\x01\n\x0fUpdateReference\x12&.buildstream.v2.UpdateReferenceRequest\x1a\'.buildstream.v2.UpdateReferenceResponse\"=\x82\xd3\xe4\x93\x02\x37\x1a-/v2/{instance_name=**}/buildstream/refs/{key}:\x06\x64igest\x12\x7f\n\x06Status\x12\x1d.buildstream.v2.StatusRequest\x1a\x1e.buildstream.v2.StatusResponse\"6\x82\xd3\xe4\x93\x02\x30\x1a./v2/{instance_name=**}/buildstream/refs:statusb\x06proto3'
,
dependencies=[build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
@@ -226,151 +226,14 @@ _STATUSRESPONSE = _descriptor.Descriptor(
serialized_end=504,
)
-
-_GETCAPABILITIESREQUEST = _descriptor.Descriptor(
- name='GetCapabilitiesRequest',
- full_name='buildstream.v2.GetCapabilitiesRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='instance_name', full_name='buildstream.v2.GetCapabilitiesRequest.instance_name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=506,
- serialized_end=553,
-)
-
-
-_ARTIFACTCAPABILITIES = _descriptor.Descriptor(
- name='ArtifactCapabilities',
- full_name='buildstream.v2.ArtifactCapabilities',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='allow_updates', full_name='buildstream.v2.ArtifactCapabilities.allow_updates', index=0,
- number=1, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=555,
- serialized_end=600,
-)
-
-
-_SOURCECAPABILITIES = _descriptor.Descriptor(
- name='SourceCapabilities',
- full_name='buildstream.v2.SourceCapabilities',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='allow_updates', full_name='buildstream.v2.SourceCapabilities.allow_updates', index=0,
- number=1, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=602,
- serialized_end=645,
-)
-
-
-_SERVERCAPABILITIES = _descriptor.Descriptor(
- name='ServerCapabilities',
- full_name='buildstream.v2.ServerCapabilities',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='artifact_capabilities', full_name='buildstream.v2.ServerCapabilities.artifact_capabilities', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='source_capabilities', full_name='buildstream.v2.ServerCapabilities.source_capabilities', index=1,
- number=2, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=648,
- serialized_end=802,
-)
-
_GETREFERENCERESPONSE.fields_by_name['digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
_UPDATEREFERENCEREQUEST.fields_by_name['digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
-_SERVERCAPABILITIES.fields_by_name['artifact_capabilities'].message_type = _ARTIFACTCAPABILITIES
-_SERVERCAPABILITIES.fields_by_name['source_capabilities'].message_type = _SOURCECAPABILITIES
DESCRIPTOR.message_types_by_name['GetReferenceRequest'] = _GETREFERENCEREQUEST
DESCRIPTOR.message_types_by_name['GetReferenceResponse'] = _GETREFERENCERESPONSE
DESCRIPTOR.message_types_by_name['UpdateReferenceRequest'] = _UPDATEREFERENCEREQUEST
DESCRIPTOR.message_types_by_name['UpdateReferenceResponse'] = _UPDATEREFERENCERESPONSE
DESCRIPTOR.message_types_by_name['StatusRequest'] = _STATUSREQUEST
DESCRIPTOR.message_types_by_name['StatusResponse'] = _STATUSRESPONSE
-DESCRIPTOR.message_types_by_name['GetCapabilitiesRequest'] = _GETCAPABILITIESREQUEST
-DESCRIPTOR.message_types_by_name['ArtifactCapabilities'] = _ARTIFACTCAPABILITIES
-DESCRIPTOR.message_types_by_name['SourceCapabilities'] = _SOURCECAPABILITIES
-DESCRIPTOR.message_types_by_name['ServerCapabilities'] = _SERVERCAPABILITIES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetReferenceRequest = _reflection.GeneratedProtocolMessageType('GetReferenceRequest', (_message.Message,), {
@@ -415,34 +278,6 @@ StatusResponse = _reflection.GeneratedProtocolMessageType('StatusResponse', (_me
})
_sym_db.RegisterMessage(StatusResponse)
-GetCapabilitiesRequest = _reflection.GeneratedProtocolMessageType('GetCapabilitiesRequest', (_message.Message,), {
- 'DESCRIPTOR' : _GETCAPABILITIESREQUEST,
- '__module__' : 'buildstream.v2.buildstream_pb2'
- # @@protoc_insertion_point(class_scope:buildstream.v2.GetCapabilitiesRequest)
- })
-_sym_db.RegisterMessage(GetCapabilitiesRequest)
-
-ArtifactCapabilities = _reflection.GeneratedProtocolMessageType('ArtifactCapabilities', (_message.Message,), {
- 'DESCRIPTOR' : _ARTIFACTCAPABILITIES,
- '__module__' : 'buildstream.v2.buildstream_pb2'
- # @@protoc_insertion_point(class_scope:buildstream.v2.ArtifactCapabilities)
- })
-_sym_db.RegisterMessage(ArtifactCapabilities)
-
-SourceCapabilities = _reflection.GeneratedProtocolMessageType('SourceCapabilities', (_message.Message,), {
- 'DESCRIPTOR' : _SOURCECAPABILITIES,
- '__module__' : 'buildstream.v2.buildstream_pb2'
- # @@protoc_insertion_point(class_scope:buildstream.v2.SourceCapabilities)
- })
-_sym_db.RegisterMessage(SourceCapabilities)
-
-ServerCapabilities = _reflection.GeneratedProtocolMessageType('ServerCapabilities', (_message.Message,), {
- 'DESCRIPTOR' : _SERVERCAPABILITIES,
- '__module__' : 'buildstream.v2.buildstream_pb2'
- # @@protoc_insertion_point(class_scope:buildstream.v2.ServerCapabilities)
- })
-_sym_db.RegisterMessage(ServerCapabilities)
-
_REFERENCESTORAGE = _descriptor.ServiceDescriptor(
@@ -451,8 +286,8 @@ _REFERENCESTORAGE = _descriptor.ServiceDescriptor(
file=DESCRIPTOR,
index=0,
serialized_options=None,
- serialized_start=805,
- serialized_end=1263,
+ serialized_start=507,
+ serialized_end=965,
methods=[
_descriptor.MethodDescriptor(
name='GetReference',
@@ -486,28 +321,4 @@ _sym_db.RegisterServiceDescriptor(_REFERENCESTORAGE)
DESCRIPTOR.services_by_name['ReferenceStorage'] = _REFERENCESTORAGE
-
-_CAPABILITIES = _descriptor.ServiceDescriptor(
- name='Capabilities',
- full_name='buildstream.v2.Capabilities',
- file=DESCRIPTOR,
- index=1,
- serialized_options=None,
- serialized_start=1266,
- serialized_end=1421,
- methods=[
- _descriptor.MethodDescriptor(
- name='GetCapabilities',
- full_name='buildstream.v2.Capabilities.GetCapabilities',
- index=0,
- containing_service=None,
- input_type=_GETCAPABILITIESREQUEST,
- output_type=_SERVERCAPABILITIES,
- serialized_options=b'\202\323\344\223\002%\022#/v2/{instance_name=**}/capabilities',
- ),
-])
-_sym_db.RegisterServiceDescriptor(_CAPABILITIES)
-
-DESCRIPTOR.services_by_name['Capabilities'] = _CAPABILITIES
-
# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py b/src/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py
index 52d22c593..26a39d06e 100644
--- a/src/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py
+++ b/src/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py
@@ -5,127 +5,135 @@ from buildstream._protos.buildstream.v2 import buildstream_pb2 as buildstream_do
class ReferenceStorageStub(object):
- # missing associated documentation comment in .proto file
- pass
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
- """
- self.GetReference = channel.unary_unary(
- '/buildstream.v2.ReferenceStorage/GetReference',
- request_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.SerializeToString,
- response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.FromString,
- )
- self.UpdateReference = channel.unary_unary(
- '/buildstream.v2.ReferenceStorage/UpdateReference',
- request_serializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.SerializeToString,
- response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.FromString,
- )
- self.Status = channel.unary_unary(
- '/buildstream.v2.ReferenceStorage/Status',
- request_serializer=buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.SerializeToString,
- response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.FromString,
- )
+ """Missing associated documentation comment in .proto file"""
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetReference = channel.unary_unary(
+ '/buildstream.v2.ReferenceStorage/GetReference',
+ request_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.SerializeToString,
+ response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.FromString,
+ )
+ self.UpdateReference = channel.unary_unary(
+ '/buildstream.v2.ReferenceStorage/UpdateReference',
+ request_serializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.SerializeToString,
+ response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.FromString,
+ )
+ self.Status = channel.unary_unary(
+ '/buildstream.v2.ReferenceStorage/Status',
+ request_serializer=buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.SerializeToString,
+ response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.FromString,
+ )
class ReferenceStorageServicer(object):
- # missing associated documentation comment in .proto file
- pass
-
- def GetReference(self, request, context):
- """Retrieve a CAS [Directory][build.bazel.remote.execution.v2.Directory]
- digest by name.
-
- Errors:
- * `NOT_FOUND`: The requested reference is not in the cache.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def UpdateReference(self, request, context):
- """Associate a name with a CAS [Directory][build.bazel.remote.execution.v2.Directory]
- digest.
-
- Errors:
- * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
- entry to the cache.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def Status(self, request, context):
- # missing associated documentation comment in .proto file
- pass
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
+ """Missing associated documentation comment in .proto file"""
+
+ def GetReference(self, request, context):
+ """Retrieve a CAS [Directory][build.bazel.remote.execution.v2.Directory]
+ digest by name.
+
+ Errors:
+ * `NOT_FOUND`: The requested reference is not in the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def UpdateReference(self, request, context):
+ """Associate a name with a CAS [Directory][build.bazel.remote.execution.v2.Directory]
+ digest.
+
+ Errors:
+ * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+ entry to the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Status(self, request, context):
+ """Missing associated documentation comment in .proto file"""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
def add_ReferenceStorageServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'GetReference': grpc.unary_unary_rpc_method_handler(
- servicer.GetReference,
- request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.FromString,
- response_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.SerializeToString,
- ),
- 'UpdateReference': grpc.unary_unary_rpc_method_handler(
- servicer.UpdateReference,
- request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.FromString,
- response_serializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.SerializeToString,
- ),
- 'Status': grpc.unary_unary_rpc_method_handler(
- servicer.Status,
- request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.FromString,
- response_serializer=buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'buildstream.v2.ReferenceStorage', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
-
-
-class CapabilitiesStub(object):
- # missing associated documentation comment in .proto file
- pass
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
- """
- self.GetCapabilities = channel.unary_unary(
- '/buildstream.v2.Capabilities/GetCapabilities',
- request_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetCapabilitiesRequest.SerializeToString,
- response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.ServerCapabilities.FromString,
- )
-
-
-class CapabilitiesServicer(object):
- # missing associated documentation comment in .proto file
- pass
-
- def GetCapabilities(self, request, context):
- """GetCapabilities mirrors
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
-
-def add_CapabilitiesServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'GetCapabilities': grpc.unary_unary_rpc_method_handler(
- servicer.GetCapabilities,
- request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetCapabilitiesRequest.FromString,
- response_serializer=buildstream_dot_v2_dot_buildstream__pb2.ServerCapabilities.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'buildstream.v2.Capabilities', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
+ rpc_method_handlers = {
+ 'GetReference': grpc.unary_unary_rpc_method_handler(
+ servicer.GetReference,
+ request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.FromString,
+ response_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.SerializeToString,
+ ),
+ 'UpdateReference': grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateReference,
+ request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.FromString,
+ response_serializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.SerializeToString,
+ ),
+ 'Status': grpc.unary_unary_rpc_method_handler(
+ servicer.Status,
+ request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.FromString,
+ response_serializer=buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'buildstream.v2.ReferenceStorage', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class ReferenceStorage(object):
+ """Missing associated documentation comment in .proto file"""
+
+ @staticmethod
+ def GetReference(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/buildstream.v2.ReferenceStorage/GetReference',
+ buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.SerializeToString,
+ buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def UpdateReference(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/buildstream.v2.ReferenceStorage/UpdateReference',
+ buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.SerializeToString,
+ buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def Status(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/buildstream.v2.ReferenceStorage/Status',
+ buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.SerializeToString,
+ buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/src/buildstream/_protos/buildstream/v2/source.proto b/src/buildstream/_protos/buildstream/v2/source.proto
index 6fcb43725..fdab98602 100644
--- a/src/buildstream/_protos/buildstream/v2/source.proto
+++ b/src/buildstream/_protos/buildstream/v2/source.proto
@@ -19,21 +19,6 @@ package buildstream.v2;
import "build/bazel/remote/execution/v2/remote_execution.proto";
import "google/api/annotations.proto";
-service SourceService {
- // Retrieve a source message given a reference name from the service
- //
- // Errors:
- // * `NOT_FOUND`: The requested reference is not in the cache.
- rpc GetSource(GetSourceRequest) returns (Source) {}
-
- // Sets a source message on the service
- //
- // Errors:
- // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
- // entry to the cache.
- rpc UpdateSource(UpdateSourceRequest) returns (Source) {}
-}
-
message Source {
// This version number must always be present and can be used to
// further indicate presence or absence of parts of the proto at a
@@ -43,19 +28,3 @@ message Source {
// root directory digest of the files
build.bazel.remote.execution.v2.Digest files = 2;
}
-
-message GetSourceRequest {
- // instance of the service we want to query
- string instance_name = 1;
- // reference key for the source
- string cache_key = 2;
-}
-
-message UpdateSourceRequest {
- // instance of the service we want to query
- string instance_name = 1;
- // reference key ofr the source
- string cache_key = 2;
- // Source that we want to upload to the service
- Source source = 3;
-} \ No newline at end of file
diff --git a/src/buildstream/_protos/buildstream/v2/source_pb2.py b/src/buildstream/_protos/buildstream/v2/source_pb2.py
index 136c4cb2e..8d93de2da 100644
--- a/src/buildstream/_protos/buildstream/v2/source_pb2.py
+++ b/src/buildstream/_protos/buildstream/v2/source_pb2.py
@@ -20,7 +20,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
package='buildstream.v2',
syntax='proto3',
serialized_options=None,
- serialized_pb=b'\n\x1b\x62uildstream/v2/source.proto\x12\x0e\x62uildstream.v2\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"Q\n\x06Source\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x36\n\x05\x66iles\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"<\n\x10GetSourceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x11\n\tcache_key\x18\x02 \x01(\t\"g\n\x13UpdateSourceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x11\n\tcache_key\x18\x02 \x01(\t\x12&\n\x06source\x18\x03 \x01(\x0b\x32\x16.buildstream.v2.Source2\xa7\x01\n\rSourceService\x12G\n\tGetSource\x12 .buildstream.v2.GetSourceRequest\x1a\x16.buildstream.v2.Source\"\x00\x12M\n\x0cUpdateSource\x12#.buildstream.v2.UpdateSourceRequest\x1a\x16.buildstream.v2.Source\"\x00\x62\x06proto3'
+ serialized_pb=b'\n\x1b\x62uildstream/v2/source.proto\x12\x0e\x62uildstream.v2\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"Q\n\x06Source\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x36\n\x05\x66iles\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digestb\x06proto3'
,
dependencies=[build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
@@ -64,94 +64,8 @@ _SOURCE = _descriptor.Descriptor(
serialized_end=214,
)
-
-_GETSOURCEREQUEST = _descriptor.Descriptor(
- name='GetSourceRequest',
- full_name='buildstream.v2.GetSourceRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='instance_name', full_name='buildstream.v2.GetSourceRequest.instance_name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='cache_key', full_name='buildstream.v2.GetSourceRequest.cache_key', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=216,
- serialized_end=276,
-)
-
-
-_UPDATESOURCEREQUEST = _descriptor.Descriptor(
- name='UpdateSourceRequest',
- full_name='buildstream.v2.UpdateSourceRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='instance_name', full_name='buildstream.v2.UpdateSourceRequest.instance_name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='cache_key', full_name='buildstream.v2.UpdateSourceRequest.cache_key', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='source', full_name='buildstream.v2.UpdateSourceRequest.source', index=2,
- number=3, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=278,
- serialized_end=381,
-)
-
_SOURCE.fields_by_name['files'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
-_UPDATESOURCEREQUEST.fields_by_name['source'].message_type = _SOURCE
DESCRIPTOR.message_types_by_name['Source'] = _SOURCE
-DESCRIPTOR.message_types_by_name['GetSourceRequest'] = _GETSOURCEREQUEST
-DESCRIPTOR.message_types_by_name['UpdateSourceRequest'] = _UPDATESOURCEREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Source = _reflection.GeneratedProtocolMessageType('Source', (_message.Message,), {
@@ -161,52 +75,5 @@ Source = _reflection.GeneratedProtocolMessageType('Source', (_message.Message,),
})
_sym_db.RegisterMessage(Source)
-GetSourceRequest = _reflection.GeneratedProtocolMessageType('GetSourceRequest', (_message.Message,), {
- 'DESCRIPTOR' : _GETSOURCEREQUEST,
- '__module__' : 'buildstream.v2.source_pb2'
- # @@protoc_insertion_point(class_scope:buildstream.v2.GetSourceRequest)
- })
-_sym_db.RegisterMessage(GetSourceRequest)
-
-UpdateSourceRequest = _reflection.GeneratedProtocolMessageType('UpdateSourceRequest', (_message.Message,), {
- 'DESCRIPTOR' : _UPDATESOURCEREQUEST,
- '__module__' : 'buildstream.v2.source_pb2'
- # @@protoc_insertion_point(class_scope:buildstream.v2.UpdateSourceRequest)
- })
-_sym_db.RegisterMessage(UpdateSourceRequest)
-
-
-
-_SOURCESERVICE = _descriptor.ServiceDescriptor(
- name='SourceService',
- full_name='buildstream.v2.SourceService',
- file=DESCRIPTOR,
- index=0,
- serialized_options=None,
- serialized_start=384,
- serialized_end=551,
- methods=[
- _descriptor.MethodDescriptor(
- name='GetSource',
- full_name='buildstream.v2.SourceService.GetSource',
- index=0,
- containing_service=None,
- input_type=_GETSOURCEREQUEST,
- output_type=_SOURCE,
- serialized_options=None,
- ),
- _descriptor.MethodDescriptor(
- name='UpdateSource',
- full_name='buildstream.v2.SourceService.UpdateSource',
- index=1,
- containing_service=None,
- input_type=_UPDATESOURCEREQUEST,
- output_type=_SOURCE,
- serialized_options=None,
- ),
-])
-_sym_db.RegisterServiceDescriptor(_SOURCESERVICE)
-
-DESCRIPTOR.services_by_name['SourceService'] = _SOURCESERVICE
# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/buildstream/v2/source_pb2_grpc.py b/src/buildstream/_protos/buildstream/v2/source_pb2_grpc.py
index ecf734afb..a89435267 100644
--- a/src/buildstream/_protos/buildstream/v2/source_pb2_grpc.py
+++ b/src/buildstream/_protos/buildstream/v2/source_pb2_grpc.py
@@ -1,70 +1,3 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
-from buildstream._protos.buildstream.v2 import source_pb2 as buildstream_dot_v2_dot_source__pb2
-
-
-class SourceServiceStub(object):
- # missing associated documentation comment in .proto file
- pass
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
- """
- self.GetSource = channel.unary_unary(
- '/buildstream.v2.SourceService/GetSource',
- request_serializer=buildstream_dot_v2_dot_source__pb2.GetSourceRequest.SerializeToString,
- response_deserializer=buildstream_dot_v2_dot_source__pb2.Source.FromString,
- )
- self.UpdateSource = channel.unary_unary(
- '/buildstream.v2.SourceService/UpdateSource',
- request_serializer=buildstream_dot_v2_dot_source__pb2.UpdateSourceRequest.SerializeToString,
- response_deserializer=buildstream_dot_v2_dot_source__pb2.Source.FromString,
- )
-
-
-class SourceServiceServicer(object):
- # missing associated documentation comment in .proto file
- pass
-
- def GetSource(self, request, context):
- """Retrieve a source message given a reference name from the service
-
- Errors:
- * `NOT_FOUND`: The requested reference is not in the cache.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def UpdateSource(self, request, context):
- """Sets a source message on the service
-
- Errors:
- * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
- entry to the cache.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
-
-def add_SourceServiceServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'GetSource': grpc.unary_unary_rpc_method_handler(
- servicer.GetSource,
- request_deserializer=buildstream_dot_v2_dot_source__pb2.GetSourceRequest.FromString,
- response_serializer=buildstream_dot_v2_dot_source__pb2.Source.SerializeToString,
- ),
- 'UpdateSource': grpc.unary_unary_rpc_method_handler(
- servicer.UpdateSource,
- request_deserializer=buildstream_dot_v2_dot_source__pb2.UpdateSourceRequest.FromString,
- response_serializer=buildstream_dot_v2_dot_source__pb2.Source.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'buildstream.v2.SourceService', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py b/src/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py
index ef993e040..98859f38e 100644
--- a/src/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py
+++ b/src/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py
@@ -5,156 +5,232 @@ from buildstream._protos.google.bytestream import bytestream_pb2 as google_dot_b
class ByteStreamStub(object):
- """#### Introduction
+ """#### Introduction
- The Byte Stream API enables a client to read and write a stream of bytes to
- and from a resource. Resources have names, and these names are supplied in
- the API calls below to identify the resource that is being read from or
- written to.
+ The Byte Stream API enables a client to read and write a stream of bytes to
+ and from a resource. Resources have names, and these names are supplied in
+ the API calls below to identify the resource that is being read from or
+ written to.
- All implementations of the Byte Stream API export the interface defined here:
+ All implementations of the Byte Stream API export the interface defined here:
- * `Read()`: Reads the contents of a resource.
+ * `Read()`: Reads the contents of a resource.
- * `Write()`: Writes the contents of a resource. The client can call `Write()`
- multiple times with the same resource and can check the status of the write
- by calling `QueryWriteStatus()`.
+ * `Write()`: Writes the contents of a resource. The client can call `Write()`
+ multiple times with the same resource and can check the status of the write
+ by calling `QueryWriteStatus()`.
- #### Service parameters and metadata
+ #### Service parameters and metadata
- The ByteStream API provides no direct way to access/modify any metadata
- associated with the resource.
+ The ByteStream API provides no direct way to access/modify any metadata
+ associated with the resource.
- #### Errors
+ #### Errors
- The errors returned by the service are in the Google canonical error space.
- """
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
+ The errors returned by the service are in the Google canonical error space.
"""
- self.Read = channel.unary_stream(
- '/google.bytestream.ByteStream/Read',
- request_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.SerializeToString,
- response_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.FromString,
- )
- self.Write = channel.stream_unary(
- '/google.bytestream.ByteStream/Write',
- request_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.SerializeToString,
- response_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.FromString,
- )
- self.QueryWriteStatus = channel.unary_unary(
- '/google.bytestream.ByteStream/QueryWriteStatus',
- request_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.SerializeToString,
- response_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.FromString,
- )
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.Read = channel.unary_stream(
+ '/google.bytestream.ByteStream/Read',
+ request_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.SerializeToString,
+ response_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.FromString,
+ )
+ self.Write = channel.stream_unary(
+ '/google.bytestream.ByteStream/Write',
+ request_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.SerializeToString,
+ response_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.FromString,
+ )
+ self.QueryWriteStatus = channel.unary_unary(
+ '/google.bytestream.ByteStream/QueryWriteStatus',
+ request_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.SerializeToString,
+ response_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.FromString,
+ )
-class ByteStreamServicer(object):
- """#### Introduction
- The Byte Stream API enables a client to read and write a stream of bytes to
- and from a resource. Resources have names, and these names are supplied in
- the API calls below to identify the resource that is being read from or
- written to.
+class ByteStreamServicer(object):
+ """#### Introduction
- All implementations of the Byte Stream API export the interface defined here:
+ The Byte Stream API enables a client to read and write a stream of bytes to
+ and from a resource. Resources have names, and these names are supplied in
+ the API calls below to identify the resource that is being read from or
+ written to.
- * `Read()`: Reads the contents of a resource.
+ All implementations of the Byte Stream API export the interface defined here:
- * `Write()`: Writes the contents of a resource. The client can call `Write()`
- multiple times with the same resource and can check the status of the write
- by calling `QueryWriteStatus()`.
+ * `Read()`: Reads the contents of a resource.
- #### Service parameters and metadata
+ * `Write()`: Writes the contents of a resource. The client can call `Write()`
+ multiple times with the same resource and can check the status of the write
+ by calling `QueryWriteStatus()`.
- The ByteStream API provides no direct way to access/modify any metadata
- associated with the resource.
+ #### Service parameters and metadata
- #### Errors
+ The ByteStream API provides no direct way to access/modify any metadata
+ associated with the resource.
- The errors returned by the service are in the Google canonical error space.
- """
+ #### Errors
- def Read(self, request, context):
- """`Read()` is used to retrieve the contents of a resource as a sequence
- of bytes. The bytes are returned in a sequence of responses, and the
- responses are delivered as the results of a server-side streaming RPC.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def Write(self, request_iterator, context):
- """`Write()` is used to send the contents of a resource as a sequence of
- bytes. The bytes are sent in a sequence of request protos of a client-side
- streaming RPC.
-
- A `Write()` action is resumable. If there is an error or the connection is
- broken during the `Write()`, the client should check the status of the
- `Write()` by calling `QueryWriteStatus()` and continue writing from the
- returned `committed_size`. This may be less than the amount of data the
- client previously sent.
-
- Calling `Write()` on a resource name that was previously written and
- finalized could cause an error, depending on whether the underlying service
- allows over-writing of previously written resources.
-
- When the client closes the request channel, the service will respond with
- a `WriteResponse`. The service will not view the resource as `complete`
- until the client has sent a `WriteRequest` with `finish_write` set to
- `true`. Sending any requests on a stream after sending a request with
- `finish_write` set to `true` will cause an error. The client **should**
- check the `WriteResponse` it receives to determine how much data the
- service was able to commit and whether the service views the resource as
- `complete` or not.
+ The errors returned by the service are in the Google canonical error space.
"""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def QueryWriteStatus(self, request, context):
- """`QueryWriteStatus()` is used to find the `committed_size` for a resource
- that is being written, which can then be used as the `write_offset` for
- the next `Write()` call.
-
- If the resource does not exist (i.e., the resource has been deleted, or the
- first `Write()` has not yet reached the service), this method returns the
- error `NOT_FOUND`.
-
- The client **may** call `QueryWriteStatus()` at any time to determine how
- much data has been processed for this resource. This is useful if the
- client is buffering data and needs to know which data can be safely
- evicted. For any sequence of `QueryWriteStatus()` calls for a given
- resource name, the sequence of returned `committed_size` values will be
- non-decreasing.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
+
+ def Read(self, request, context):
+ """`Read()` is used to retrieve the contents of a resource as a sequence
+ of bytes. The bytes are returned in a sequence of responses, and the
+ responses are delivered as the results of a server-side streaming RPC.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Write(self, request_iterator, context):
+ """`Write()` is used to send the contents of a resource as a sequence of
+ bytes. The bytes are sent in a sequence of request protos of a client-side
+ streaming RPC.
+
+ A `Write()` action is resumable. If there is an error or the connection is
+ broken during the `Write()`, the client should check the status of the
+ `Write()` by calling `QueryWriteStatus()` and continue writing from the
+ returned `committed_size`. This may be less than the amount of data the
+ client previously sent.
+
+ Calling `Write()` on a resource name that was previously written and
+ finalized could cause an error, depending on whether the underlying service
+ allows over-writing of previously written resources.
+
+ When the client closes the request channel, the service will respond with
+ a `WriteResponse`. The service will not view the resource as `complete`
+ until the client has sent a `WriteRequest` with `finish_write` set to
+ `true`. Sending any requests on a stream after sending a request with
+ `finish_write` set to `true` will cause an error. The client **should**
+ check the `WriteResponse` it receives to determine how much data the
+ service was able to commit and whether the service views the resource as
+ `complete` or not.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def QueryWriteStatus(self, request, context):
+ """`QueryWriteStatus()` is used to find the `committed_size` for a resource
+ that is being written, which can then be used as the `write_offset` for
+ the next `Write()` call.
+
+ If the resource does not exist (i.e., the resource has been deleted, or the
+ first `Write()` has not yet reached the service), this method returns the
+ error `NOT_FOUND`.
+
+ The client **may** call `QueryWriteStatus()` at any time to determine how
+ much data has been processed for this resource. This is useful if the
+ client is buffering data and needs to know which data can be safely
+ evicted. For any sequence of `QueryWriteStatus()` calls for a given
+ resource name, the sequence of returned `committed_size` values will be
+ non-decreasing.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
def add_ByteStreamServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'Read': grpc.unary_stream_rpc_method_handler(
- servicer.Read,
- request_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.FromString,
- response_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.SerializeToString,
- ),
- 'Write': grpc.stream_unary_rpc_method_handler(
- servicer.Write,
- request_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.FromString,
- response_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.SerializeToString,
- ),
- 'QueryWriteStatus': grpc.unary_unary_rpc_method_handler(
- servicer.QueryWriteStatus,
- request_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.FromString,
- response_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'google.bytestream.ByteStream', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
+ rpc_method_handlers = {
+ 'Read': grpc.unary_stream_rpc_method_handler(
+ servicer.Read,
+ request_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.FromString,
+ response_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.SerializeToString,
+ ),
+ 'Write': grpc.stream_unary_rpc_method_handler(
+ servicer.Write,
+ request_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.FromString,
+ response_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.SerializeToString,
+ ),
+ 'QueryWriteStatus': grpc.unary_unary_rpc_method_handler(
+ servicer.QueryWriteStatus,
+ request_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.FromString,
+ response_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'google.bytestream.ByteStream', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class ByteStream(object):
+ """#### Introduction
+
+ The Byte Stream API enables a client to read and write a stream of bytes to
+ and from a resource. Resources have names, and these names are supplied in
+ the API calls below to identify the resource that is being read from or
+ written to.
+
+ All implementations of the Byte Stream API export the interface defined here:
+
+ * `Read()`: Reads the contents of a resource.
+
+ * `Write()`: Writes the contents of a resource. The client can call `Write()`
+ multiple times with the same resource and can check the status of the write
+ by calling `QueryWriteStatus()`.
+
+ #### Service parameters and metadata
+
+ The ByteStream API provides no direct way to access/modify any metadata
+ associated with the resource.
+
+ #### Errors
+
+ The errors returned by the service are in the Google canonical error space.
+ """
+
+ @staticmethod
+ def Read(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_stream(request, target, '/google.bytestream.ByteStream/Read',
+ google_dot_bytestream_dot_bytestream__pb2.ReadRequest.SerializeToString,
+ google_dot_bytestream_dot_bytestream__pb2.ReadResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def Write(request_iterator,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.stream_unary(request_iterator, target, '/google.bytestream.ByteStream/Write',
+ google_dot_bytestream_dot_bytestream__pb2.WriteRequest.SerializeToString,
+ google_dot_bytestream_dot_bytestream__pb2.WriteResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def QueryWriteStatus(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/google.bytestream.ByteStream/QueryWriteStatus',
+ google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.SerializeToString,
+ google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/src/buildstream/_protos/google/longrunning/operations_pb2_grpc.py b/src/buildstream/_protos/google/longrunning/operations_pb2_grpc.py
index 8f89862e7..11a47e0d3 100644
--- a/src/buildstream/_protos/google/longrunning/operations_pb2_grpc.py
+++ b/src/buildstream/_protos/google/longrunning/operations_pb2_grpc.py
@@ -6,127 +6,205 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class OperationsStub(object):
- """Manages long-running operations with an API service.
-
- When an API method normally takes long time to complete, it can be designed
- to return [Operation][google.longrunning.Operation] to the client, and the client can use this
- interface to receive the real response asynchronously by polling the
- operation resource, or pass the operation resource to another API (such as
- Google Cloud Pub/Sub API) to receive the response. Any API service that
- returns long-running operations should implement the `Operations` interface
- so developers can have a consistent client experience.
- """
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
+ """Manages long-running operations with an API service.
+
+ When an API method normally takes long time to complete, it can be designed
+ to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+ interface to receive the real response asynchronously by polling the
+ operation resource, or pass the operation resource to another API (such as
+ Google Cloud Pub/Sub API) to receive the response. Any API service that
+ returns long-running operations should implement the `Operations` interface
+ so developers can have a consistent client experience.
"""
- self.ListOperations = channel.unary_unary(
- '/google.longrunning.Operations/ListOperations',
- request_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString,
- response_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString,
- )
- self.GetOperation = channel.unary_unary(
- '/google.longrunning.Operations/GetOperation',
- request_serializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString,
- response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
- )
- self.DeleteOperation = channel.unary_unary(
- '/google.longrunning.Operations/DeleteOperation',
- request_serializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString,
- response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
- )
- self.CancelOperation = channel.unary_unary(
- '/google.longrunning.Operations/CancelOperation',
- request_serializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString,
- response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
- )
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.ListOperations = channel.unary_unary(
+ '/google.longrunning.Operations/ListOperations',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString,
+ )
+ self.GetOperation = channel.unary_unary(
+ '/google.longrunning.Operations/GetOperation',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
+ self.DeleteOperation = channel.unary_unary(
+ '/google.longrunning.Operations/DeleteOperation',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ )
+ self.CancelOperation = channel.unary_unary(
+ '/google.longrunning.Operations/CancelOperation',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ )
class OperationsServicer(object):
- """Manages long-running operations with an API service.
-
- When an API method normally takes long time to complete, it can be designed
- to return [Operation][google.longrunning.Operation] to the client, and the client can use this
- interface to receive the real response asynchronously by polling the
- operation resource, or pass the operation resource to another API (such as
- Google Cloud Pub/Sub API) to receive the response. Any API service that
- returns long-running operations should implement the `Operations` interface
- so developers can have a consistent client experience.
- """
-
- def ListOperations(self, request, context):
- """Lists operations that match the specified filter in the request. If the
- server doesn't support this method, it returns `UNIMPLEMENTED`.
-
- NOTE: the `name` binding below allows API services to override the binding
- to use different resource name schemes, such as `users/*/operations`.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def GetOperation(self, request, context):
- """Gets the latest state of a long-running operation. Clients can use this
- method to poll the operation result at intervals as recommended by the API
- service.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def DeleteOperation(self, request, context):
- """Deletes a long-running operation. This method indicates that the client is
- no longer interested in the operation result. It does not cancel the
- operation. If the server doesn't support this method, it returns
- `google.rpc.Code.UNIMPLEMENTED`.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def CancelOperation(self, request, context):
- """Starts asynchronous cancellation on a long-running operation. The server
- makes a best effort to cancel the operation, but success is not
- guaranteed. If the server doesn't support this method, it returns
- `google.rpc.Code.UNIMPLEMENTED`. Clients can use
- [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
- other methods to check whether the cancellation succeeded or whether the
- operation completed despite cancellation. On successful cancellation,
- the operation is not deleted; instead, it becomes an operation with
- an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- corresponding to `Code.CANCELLED`.
+ """Manages long-running operations with an API service.
+
+ When an API method normally takes long time to complete, it can be designed
+ to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+ interface to receive the real response asynchronously by polling the
+ operation resource, or pass the operation resource to another API (such as
+ Google Cloud Pub/Sub API) to receive the response. Any API service that
+ returns long-running operations should implement the `Operations` interface
+ so developers can have a consistent client experience.
"""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
+
+ def ListOperations(self, request, context):
+ """Lists operations that match the specified filter in the request. If the
+ server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+ NOTE: the `name` binding below allows API services to override the binding
+ to use different resource name schemes, such as `users/*/operations`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetOperation(self, request, context):
+ """Gets the latest state of a long-running operation. Clients can use this
+ method to poll the operation result at intervals as recommended by the API
+ service.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def DeleteOperation(self, request, context):
+ """Deletes a long-running operation. This method indicates that the client is
+ no longer interested in the operation result. It does not cancel the
+ operation. If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def CancelOperation(self, request, context):
+ """Starts asynchronous cancellation on a long-running operation. The server
+ makes a best effort to cancel the operation, but success is not
+ guaranteed. If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`. Clients can use
+ [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ other methods to check whether the cancellation succeeded or whether the
+ operation completed despite cancellation. On successful cancellation,
+ the operation is not deleted; instead, it becomes an operation with
+ an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ corresponding to `Code.CANCELLED`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
def add_OperationsServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'ListOperations': grpc.unary_unary_rpc_method_handler(
- servicer.ListOperations,
- request_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.FromString,
- response_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.SerializeToString,
- ),
- 'GetOperation': grpc.unary_unary_rpc_method_handler(
- servicer.GetOperation,
- request_deserializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.FromString,
- response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
- ),
- 'DeleteOperation': grpc.unary_unary_rpc_method_handler(
- servicer.DeleteOperation,
- request_deserializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.FromString,
- response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
- ),
- 'CancelOperation': grpc.unary_unary_rpc_method_handler(
- servicer.CancelOperation,
- request_deserializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.FromString,
- response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'google.longrunning.Operations', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
+ rpc_method_handlers = {
+ 'ListOperations': grpc.unary_unary_rpc_method_handler(
+ servicer.ListOperations,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.SerializeToString,
+ ),
+ 'GetOperation': grpc.unary_unary_rpc_method_handler(
+ servicer.GetOperation,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ 'DeleteOperation': grpc.unary_unary_rpc_method_handler(
+ servicer.DeleteOperation,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ 'CancelOperation': grpc.unary_unary_rpc_method_handler(
+ servicer.CancelOperation,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'google.longrunning.Operations', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class Operations(object):
+ """Manages long-running operations with an API service.
+
+ When an API method normally takes long time to complete, it can be designed
+ to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+ interface to receive the real response asynchronously by polling the
+ operation resource, or pass the operation resource to another API (such as
+ Google Cloud Pub/Sub API) to receive the response. Any API service that
+ returns long-running operations should implement the `Operations` interface
+ so developers can have a consistent client experience.
+ """
+
+ @staticmethod
+ def ListOperations(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/ListOperations',
+ google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def GetOperation(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/GetOperation',
+ google_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString,
+ google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def DeleteOperation(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/DeleteOperation',
+ google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString,
+ google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def CancelOperation(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/CancelOperation',
+ google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString,
+ google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/src/buildstream/_sourcecache.py b/src/buildstream/_sourcecache.py
index dcde0b426..fd75be34d 100644
--- a/src/buildstream/_sourcecache.py
+++ b/src/buildstream/_sourcecache.py
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2019 Bloomberg Finance LP
+# Copyright (C) 2019-2020 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
@@ -20,98 +20,14 @@
import os
import grpc
-from ._remote import BaseRemote
from ._cas.casremote import BlobNotFound
from .storage._casbaseddirectory import CasBasedDirectory
-from ._basecache import BaseCache
-from ._exceptions import CASError, CASRemoteError, SourceCacheError, RemoteError
+from ._assetcache import AssetCache
+from ._exceptions import CASError, CASRemoteError, SourceCacheError
from . import utils
-from ._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc, source_pb2, source_pb2_grpc
+from ._protos.buildstream.v2 import source_pb2
-
-class SourceRemote(BaseRemote):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.source_service = None
-
- def close(self):
- self.source_service = None
- super().close()
-
- def _configure_protocols(self):
- # set up source service
- self.source_service = source_pb2_grpc.SourceServiceStub(self.channel)
-
- # _check():
- #
- # Check if this remote provides everything required for the
- # particular kind of remote. This is expected to be called as part
- # of check()
- #
- # Raises:
- # RemoteError: If the upstream has a problem
- #
- def _check(self):
- capabilities_service = buildstream_pb2_grpc.CapabilitiesStub(self.channel)
-
- # check that the service supports sources
- try:
- request = buildstream_pb2.GetCapabilitiesRequest()
- if self.instance_name:
- request.instance_name = self.instance_name
- response = capabilities_service.GetCapabilities(request)
- except grpc.RpcError as e:
- # Check if this remote has the artifact service
- if e.code() == grpc.StatusCode.UNIMPLEMENTED:
- raise RemoteError(
- "Configured remote does not have the BuildStream "
- "capabilities service. Please check remote configuration."
- )
- raise RemoteError("Remote initialisation failed with status {}: {}".format(e.code().name, e.details()))
-
- if not response.source_capabilities:
- raise RemoteError("Configured remote does not support source service")
-
- if self.spec.push and not response.source_capabilities.allow_updates:
- raise RemoteError("Source server does not allow push")
-
- # get_source():
- #
- # Get a source proto for a given source_ref from the remote.
- #
- # Args:
- # source_ref (str): The source ref of the source to pull.
- #
- # Returns:
- # (Source): The source proto
- #
- # Raises:
- # grpc.RpcError: If something goes wrong during the request.
- #
- def get_source(self, source_ref):
- request = source_pb2.GetSourceRequest()
- request.cache_key = source_ref
- return self.source_service.GetSource(request)
-
- # update_source():
- #
- # Update the source on the remote.
- #
- # Args:
- # source_ref (str): The source ref of the source to update.
- # source (Source): The proto to update with.
- #
- # Returns:
- # (bool): Whether the update was successful.
- #
- # Raises:
- # grpc.RpcError: If something goes wrong during the request.
- #
- def update_source(self, source_ref, source):
- request = source_pb2.UpdateSourceRequest()
- request.cache_key = source_ref
- request.source.CopyFrom(source)
- return self.source_service.UpdateSource(request)
+REMOTE_ASSET_SOURCE_URN_TEMPLATE = "urn:fdc:buildstream.build:2020:source:{}"
# Class that keeps config of remotes and deals with caching of sources.
@@ -119,12 +35,10 @@ class SourceRemote(BaseRemote):
# Args:
# context (Context): The Buildstream context
#
-class SourceCache(BaseCache):
+class SourceCache(AssetCache):
spec_name = "source_cache_specs"
- spec_error = SourceCacheError
config_node_name = "source-caches"
- index_remote_class = SourceRemote
def __init__(self, context):
super().__init__(context)
@@ -214,15 +128,15 @@ class SourceCache(BaseCache):
index_remotes = self._index_remotes[project]
storage_remotes = self._storage_remotes[project]
- # First fetch the source proto so we know what to pull
- source_proto = None
+ # First fetch the source directory digest so we know what to pull
+ source_digest = None
for remote in index_remotes:
try:
remote.init()
source.status("Pulling source {} <- {}".format(display_key, remote))
- source_proto = self._pull_source(ref, remote)
- if source_proto is None:
+ source_digest = self._pull_source(ref, remote)
+ if source_digest is None:
source.info(
"Remote source service ({}) does not have source {} cached".format(remote, display_key)
)
@@ -230,7 +144,7 @@ class SourceCache(BaseCache):
except CASError as e:
raise SourceCacheError("Failed to pull source {}: {}".format(display_key, e)) from e
- if not source_proto:
+ if not source_digest:
return False
for remote in storage_remotes:
@@ -239,8 +153,8 @@ class SourceCache(BaseCache):
source.status("Pulling data for source {} <- {}".format(display_key, remote))
# Fetch source blobs
- self.cas._fetch_directory(remote, source_proto.files)
- required_blobs = self.cas.required_blobs_for_directory(source_proto.files)
+ self.cas._fetch_directory(remote, source_digest)
+ required_blobs = self.cas.required_blobs_for_directory(source_digest)
missing_blobs = self.cas.local_missing_blobs(required_blobs)
self.cas.fetch_blobs(remote, missing_blobs)
@@ -337,11 +251,15 @@ class SourceCache(BaseCache):
return os.path.join(self._basedir, ref)
def _pull_source(self, source_ref, remote):
+ uri = REMOTE_ASSET_SOURCE_URN_TEMPLATE.format(source_ref)
+
try:
remote.init()
- response = remote.get_source(source_ref)
- self._store_proto(response, source_ref)
- return response
+ response = remote.fetch_directory([uri])
+ if not response:
+ return None
+ self._store_source(source_ref, response.root_directory_digest)
+ return response.root_directory_digest
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.NOT_FOUND:
@@ -349,12 +267,15 @@ class SourceCache(BaseCache):
return None
def _push_source(self, source_ref, remote):
+ uri = REMOTE_ASSET_SOURCE_URN_TEMPLATE.format(source_ref)
+
try:
remote.init()
source_proto = self._get_source(source_ref)
- return remote.update_source(source_ref, source_proto)
+ remote.push_directory([uri], source_proto.files)
+ return True
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
raise SourceCacheError("Failed to push source with status {}: {}".format(e.code().name, e.details()))
- return None
+ return False
diff --git a/tests/artifactcache/artifactservice.py b/tests/artifactcache/artifactservice.py
deleted file mode 100644
index c640665a3..000000000
--- a/tests/artifactcache/artifactservice.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#
-# Copyright (C) 2019 Bloomberg Finance LP
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library. If not, see <http://www.gnu.org/licenses/>.
-#
-# Authors: Raoul Hidalgo Charman <raoul.hidalgocharman@codethink.co.uk>
-#
-import os
-from urllib.parse import urlparse
-
-import grpc
-import pytest
-
-from buildstream._protos.buildstream.v2.artifact_pb2 import Artifact, GetArtifactRequest, UpdateArtifactRequest
-from buildstream._protos.buildstream.v2.artifact_pb2_grpc import ArtifactServiceStub
-from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as re_pb2
-from buildstream import utils
-
-from tests.testutils.artifactshare import create_artifact_share
-
-
-def test_artifact_get_not_found(tmpdir):
- sharedir = os.path.join(str(tmpdir), "share")
- with create_artifact_share(sharedir) as share:
- # set up artifact service stub
- url = urlparse(share.repo)
- with grpc.insecure_channel("{}:{}".format(url.hostname, url.port)) as channel:
- artifact_stub = ArtifactServiceStub(channel)
-
- # Run GetArtifact and check it throws a not found error
- request = GetArtifactRequest()
- request.cache_key = "@artifact/something/not_there"
- try:
- artifact_stub.GetArtifact(request)
- except grpc.RpcError as e:
- assert e.code() == grpc.StatusCode.NOT_FOUND
- assert e.details() == "Artifact proto not found"
- else:
- assert False
-
-
-# Successfully getting the artifact
-@pytest.mark.parametrize("files", ["present", "absent", "invalid"])
-def test_update_artifact(tmpdir, files):
- sharedir = os.path.join(str(tmpdir), "share")
- with create_artifact_share(sharedir, casd=True) as share:
- # put files object
- if files == "present":
- directory = re_pb2.Directory()
- digest = share.cas.add_object(buffer=directory.SerializeToString())
- elif files == "invalid":
- digest = share.cas.add_object(buffer="abcdefghijklmnop".encode("utf-8"))
- elif files == "absent":
- digest = utils._message_digest("abcdefghijklmnop".encode("utf-8"))
-
- url = urlparse(share.repo)
-
- with grpc.insecure_channel("{}:{}".format(url.hostname, url.port)) as channel:
- artifact_stub = ArtifactServiceStub(channel)
-
- # initialise an artifact
- artifact = Artifact()
- artifact.version = 0
- artifact.build_success = True
- artifact.strong_key = "abcdefghijklmnop"
- artifact.files.hash = "hashymchashash"
- artifact.files.size_bytes = 10
-
- artifact.files.CopyFrom(digest)
-
- # Put it in the artifact share with an UpdateArtifactRequest
- request = UpdateArtifactRequest()
- request.artifact.CopyFrom(artifact)
- request.cache_key = "a-cache-key"
-
- # should return the same artifact back
- if files == "present":
- response = artifact_stub.UpdateArtifact(request)
- assert response == artifact
- else:
- try:
- artifact_stub.UpdateArtifact(request)
- except grpc.RpcError as e:
- assert e.code() == grpc.StatusCode.FAILED_PRECONDITION
- if files == "absent":
- assert e.details() == "Artifact files specified but no files found"
- elif files == "invalid":
- assert e.details() == "Artifact files specified but directory not found"
- return
-
- # If we uploaded the artifact check GetArtifact
- request = GetArtifactRequest()
- request.cache_key = "a-cache-key"
-
- response = artifact_stub.GetArtifact(request)
- assert response == artifact
diff --git a/tests/frontend/push.py b/tests/frontend/push.py
index 50e35461f..3a0afbd87 100644
--- a/tests/frontend/push.py
+++ b/tests/frontend/push.py
@@ -29,6 +29,7 @@ import pytest
from buildstream.exceptions import ErrorDomain
from buildstream.testing import cli, generate_project # pylint: disable=unused-import
+from buildstream.testing.runcli import Cli
from tests.testutils import (
create_artifact_share,
create_element_size,
@@ -497,16 +498,16 @@ def test_recently_pulled_artifact_does_not_expire(cli, datafiles, tmpdir):
assert_shared(cli, share, project, "element1.bst")
assert_shared(cli, share, project, "element2.bst")
- # Remove element1 from the local cache
- cli.remove_artifact_from_cache(project, "element1.bst")
- assert cli.get_element_state(project, "element1.bst") != "cached"
-
- # Pull the element1 from the remote cache (this should update its mtime)
- result = cli.run(project=project, args=["artifact", "pull", "element1.bst", "--remote", share.repo])
+ # Pull the element1 from the remote cache (this should update its mtime).
+ # Use a separate local cache for this to ensure the complete element is pulled.
+ cli2_path = os.path.join(str(tmpdir), "cli2")
+ os.mkdir(cli2_path)
+ cli2 = Cli(cli2_path)
+ result = cli2.run(project=project, args=["artifact", "pull", "element1.bst", "--remote", share.repo])
result.assert_success()
# Ensure element1 is cached locally
- assert cli.get_element_state(project, "element1.bst") == "cached"
+ assert cli2.get_element_state(project, "element1.bst") == "cached"
wait_for_cache_granularity()
diff --git a/tests/testutils/artifactshare.py b/tests/testutils/artifactshare.py
index e471d7989..bd9c97c61 100644
--- a/tests/testutils/artifactshare.py
+++ b/tests/testutils/artifactshare.py
@@ -6,14 +6,20 @@ from collections import namedtuple
from contextlib import ExitStack, contextmanager
from concurrent import futures
from multiprocessing import Process, Queue
+from urllib.parse import urlparse
import grpc
from buildstream._cas import CASCache
from buildstream._cas.casserver import create_server
from buildstream._exceptions import CASError
+from buildstream._protos.build.bazel.remote.asset.v1 import remote_asset_pb2, remote_asset_pb2_grpc
from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
-from buildstream._protos.buildstream.v2 import artifact_pb2, source_pb2
+from buildstream._protos.buildstream.v2 import artifact_pb2
+from buildstream._protos.google.rpc import code_pb2
+
+REMOTE_ASSET_ARTIFACT_URN_TEMPLATE = "urn:fdc:buildstream.build:2020:artifact:{}"
+REMOTE_ASSET_SOURCE_URN_TEMPLATE = "urn:fdc:buildstream.build:2020:source:{}"
class BaseArtifactShare:
@@ -118,10 +124,6 @@ class ArtifactShare(BaseArtifactShare):
#
self.repodir = os.path.join(self.directory, "repo")
os.makedirs(self.repodir)
- self.artifactdir = os.path.join(self.repodir, "artifacts", "refs")
- os.makedirs(self.artifactdir)
- self.sourcedir = os.path.join(self.repodir, "source_protos")
- os.makedirs(self.sourcedir)
logdir = os.path.join(self.directory, "logs") if casd else None
@@ -153,30 +155,56 @@ class ArtifactShare(BaseArtifactShare):
return os.path.exists(object_path)
def get_artifact_proto(self, artifact_name):
- artifact_proto = artifact_pb2.Artifact()
- artifact_path = os.path.join(self.artifactdir, artifact_name)
-
+ url = urlparse(self.repo)
+ channel = grpc.insecure_channel("{}:{}".format(url.hostname, url.port))
try:
- with open(artifact_path, "rb") as f:
- artifact_proto.ParseFromString(f.read())
- except FileNotFoundError:
- return None
+ fetch_service = remote_asset_pb2_grpc.FetchStub(channel)
- return artifact_proto
+ uri = REMOTE_ASSET_ARTIFACT_URN_TEMPLATE.format(artifact_name)
- def get_source_proto(self, source_name):
- source_proto = source_pb2.Source()
- source_path = os.path.join(self.sourcedir, source_name)
+ request = remote_asset_pb2.FetchBlobRequest()
+ request.uris.append(uri)
+
+ try:
+ response = fetch_service.FetchBlob(request)
+ except grpc.RpcError as e:
+ if e.code() == grpc.StatusCode.NOT_FOUND:
+ return None
+ raise
+ if response.status.code != code_pb2.OK:
+ return None
+
+ return response.blob_digest
+ finally:
+ channel.close()
+
+ def get_source_proto(self, source_name):
+ url = urlparse(self.repo)
+ channel = grpc.insecure_channel("{}:{}".format(url.hostname, url.port))
try:
- with open(source_path, "rb") as f:
- source_proto.ParseFromString(f.read())
- except FileNotFoundError:
- return None
+ fetch_service = remote_asset_pb2_grpc.FetchStub(channel)
+
+ uri = REMOTE_ASSET_SOURCE_URN_TEMPLATE.format(source_name)
+
+ request = remote_asset_pb2.FetchDirectoryRequest()
+ request.uris.append(uri)
+
+ try:
+ response = fetch_service.FetchDirectory(request)
+ except grpc.RpcError as e:
+ if e.code() == grpc.StatusCode.NOT_FOUND:
+ return None
+ raise
+
+ if response.status.code != code_pb2.OK:
+ return None
- return source_proto
+ return response.root_directory_digest
+ finally:
+ channel.close()
- def get_cas_files(self, artifact_proto):
+ def get_cas_files(self, artifact_proto_digest):
reachable = set()
@@ -184,6 +212,17 @@ class ArtifactShare(BaseArtifactShare):
self.cas._reachable_refs_dir(reachable, digest, update_mtime=False, check_exists=True)
try:
+ artifact_proto_path = self.cas.objpath(artifact_proto_digest)
+ if not os.path.exists(artifact_proto_path):
+ return None
+
+ artifact_proto = artifact_pb2.Artifact()
+ try:
+ with open(artifact_proto_path, "rb") as f:
+ artifact_proto.ParseFromString(f.read())
+ except FileNotFoundError:
+ return None
+
if str(artifact_proto.files):
reachable_dir(artifact_proto.files)