summaryrefslogtreecommitdiff
path: root/src/buildstream
diff options
context:
space:
mode:
authorChandan Singh <csingh43@bloomberg.net>2019-04-24 22:53:19 +0100
committerChandan Singh <csingh43@bloomberg.net>2019-05-21 12:41:18 +0100
commit070d053e5cc47e572e9f9e647315082bd7a15c63 (patch)
tree7fb0fdff52f9b5f8a18ec8fe9c75b661f9e0839e /src/buildstream
parent6c59e7901a52be961c2a1b671cf2b30f90bc4d0a (diff)
downloadbuildstream-070d053e5cc47e572e9f9e647315082bd7a15c63.tar.gz
Move source from 'buildstream' to 'src/buildstream'
This was discussed in #1008. Fixes #1009.
Diffstat (limited to 'src/buildstream')
-rw-r--r--src/buildstream/__init__.py41
-rw-r--r--src/buildstream/__main__.py17
-rw-r--r--src/buildstream/_artifact.py449
-rw-r--r--src/buildstream/_artifactcache.py617
-rw-r--r--src/buildstream/_artifactelement.py92
-rw-r--r--src/buildstream/_basecache.py307
-rw-r--r--src/buildstream/_cachekey.py68
-rw-r--r--src/buildstream/_cas/__init__.py21
-rw-r--r--src/buildstream/_cas/cascache.py1462
-rw-r--r--src/buildstream/_cas/casremote.py391
-rw-r--r--src/buildstream/_cas/casserver.py619
-rw-r--r--src/buildstream/_context.py766
-rw-r--r--src/buildstream/_elementfactory.py63
-rw-r--r--src/buildstream/_exceptions.py370
-rw-r--r--src/buildstream/_frontend/__init__.py25
-rw-r--r--src/buildstream/_frontend/app.py870
-rw-r--r--src/buildstream/_frontend/cli.py1277
-rw-r--r--src/buildstream/_frontend/complete.py338
-rw-r--r--src/buildstream/_frontend/linuxapp.py64
-rw-r--r--src/buildstream/_frontend/profile.py77
-rw-r--r--src/buildstream/_frontend/status.py523
-rw-r--r--src/buildstream/_frontend/widget.py806
-rw-r--r--src/buildstream/_fuse/__init__.py20
-rw-r--r--src/buildstream/_fuse/fuse.py1006
-rw-r--r--src/buildstream/_fuse/hardlinks.py218
-rw-r--r--src/buildstream/_fuse/mount.py196
-rw-r--r--src/buildstream/_gitsourcebase.py683
-rw-r--r--src/buildstream/_includes.py145
-rw-r--r--src/buildstream/_loader/__init__.py22
-rw-r--r--src/buildstream/_loader/loadelement.py181
-rw-r--r--src/buildstream/_loader/loader.py710
-rw-r--r--src/buildstream/_loader/metaelement.py60
-rw-r--r--src/buildstream/_loader/metasource.py42
-rw-r--r--src/buildstream/_loader/types.py112
-rw-r--r--src/buildstream/_message.py80
-rw-r--r--src/buildstream/_options/__init__.py20
-rw-r--r--src/buildstream/_options/option.py112
-rw-r--r--src/buildstream/_options/optionarch.py84
-rw-r--r--src/buildstream/_options/optionbool.py58
-rw-r--r--src/buildstream/_options/optioneltmask.py46
-rw-r--r--src/buildstream/_options/optionenum.py77
-rw-r--r--src/buildstream/_options/optionflags.py86
-rw-r--r--src/buildstream/_options/optionos.py41
-rw-r--r--src/buildstream/_options/optionpool.py295
-rw-r--r--src/buildstream/_pipeline.py516
-rw-r--r--src/buildstream/_platform/__init__.py20
-rw-r--r--src/buildstream/_platform/darwin.py48
-rw-r--r--src/buildstream/_platform/linux.py150
-rw-r--r--src/buildstream/_platform/platform.py164
-rw-r--r--src/buildstream/_platform/unix.py56
-rw-r--r--src/buildstream/_plugincontext.py239
-rw-r--r--src/buildstream/_profile.py160
-rw-r--r--src/buildstream/_project.py975
-rw-r--r--src/buildstream/_projectrefs.py155
-rw-r--r--src/buildstream/_protos/__init__.py0
-rw-r--r--src/buildstream/_protos/build/__init__.py0
-rw-r--r--src/buildstream/_protos/build/bazel/__init__.py0
-rw-r--r--src/buildstream/_protos/build/bazel/remote/__init__.py0
-rw-r--r--src/buildstream/_protos/build/bazel/remote/execution/__init__.py0
-rw-r--r--src/buildstream/_protos/build/bazel/remote/execution/v2/__init__.py0
-rw-r--r--src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto1331
-rw-r--r--src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py2660
-rw-r--r--src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py593
-rw-r--r--src/buildstream/_protos/build/bazel/semver/__init__.py0
-rw-r--r--src/buildstream/_protos/build/bazel/semver/semver.proto24
-rw-r--r--src/buildstream/_protos/build/bazel/semver/semver_pb2.py90
-rw-r--r--src/buildstream/_protos/build/bazel/semver/semver_pb2_grpc.py3
-rw-r--r--src/buildstream/_protos/buildstream/__init__.py0
-rw-r--r--src/buildstream/_protos/buildstream/v2/__init__.py0
-rw-r--r--src/buildstream/_protos/buildstream/v2/artifact.proto88
-rw-r--r--src/buildstream/_protos/buildstream/v2/artifact_pb2.py387
-rw-r--r--src/buildstream/_protos/buildstream/v2/artifact_pb2_grpc.py68
-rw-r--r--src/buildstream/_protos/buildstream/v2/buildstream.proto95
-rw-r--r--src/buildstream/_protos/buildstream/v2/buildstream_pb2.py325
-rw-r--r--src/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py89
-rw-r--r--src/buildstream/_protos/google/__init__.py0
-rw-r--r--src/buildstream/_protos/google/api/__init__.py0
-rw-r--r--src/buildstream/_protos/google/api/annotations.proto31
-rw-r--r--src/buildstream/_protos/google/api/annotations_pb2.py46
-rw-r--r--src/buildstream/_protos/google/api/annotations_pb2_grpc.py3
-rw-r--r--src/buildstream/_protos/google/api/http.proto313
-rw-r--r--src/buildstream/_protos/google/api/http_pb2.py243
-rw-r--r--src/buildstream/_protos/google/api/http_pb2_grpc.py3
-rw-r--r--src/buildstream/_protos/google/bytestream/__init__.py0
-rw-r--r--src/buildstream/_protos/google/bytestream/bytestream.proto181
-rw-r--r--src/buildstream/_protos/google/bytestream/bytestream_pb2.py353
-rw-r--r--src/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py160
-rw-r--r--src/buildstream/_protos/google/longrunning/__init__.py0
-rw-r--r--src/buildstream/_protos/google/longrunning/operations.proto160
-rw-r--r--src/buildstream/_protos/google/longrunning/operations_pb2.py391
-rw-r--r--src/buildstream/_protos/google/longrunning/operations_pb2_grpc.py132
-rw-r--r--src/buildstream/_protos/google/rpc/__init__.py0
-rw-r--r--src/buildstream/_protos/google/rpc/code.proto186
-rw-r--r--src/buildstream/_protos/google/rpc/code_pb2.py133
-rw-r--r--src/buildstream/_protos/google/rpc/code_pb2_grpc.py3
-rw-r--r--src/buildstream/_protos/google/rpc/status.proto92
-rw-r--r--src/buildstream/_protos/google/rpc/status_pb2.py88
-rw-r--r--src/buildstream/_protos/google/rpc/status_pb2_grpc.py3
-rw-r--r--src/buildstream/_scheduler/__init__.py30
-rw-r--r--src/buildstream/_scheduler/jobs/__init__.py23
-rw-r--r--src/buildstream/_scheduler/jobs/cachesizejob.py41
-rw-r--r--src/buildstream/_scheduler/jobs/cleanupjob.py50
-rw-r--r--src/buildstream/_scheduler/jobs/elementjob.py115
-rw-r--r--src/buildstream/_scheduler/jobs/job.py682
-rw-r--r--src/buildstream/_scheduler/queues/__init__.py1
-rw-r--r--src/buildstream/_scheduler/queues/artifactpushqueue.py44
-rw-r--r--src/buildstream/_scheduler/queues/buildqueue.py117
-rw-r--r--src/buildstream/_scheduler/queues/fetchqueue.py80
-rw-r--r--src/buildstream/_scheduler/queues/pullqueue.py66
-rw-r--r--src/buildstream/_scheduler/queues/queue.py328
-rw-r--r--src/buildstream/_scheduler/queues/sourcepushqueue.py42
-rw-r--r--src/buildstream/_scheduler/queues/trackqueue.py62
-rw-r--r--src/buildstream/_scheduler/resources.py166
-rw-r--r--src/buildstream/_scheduler/scheduler.py602
-rw-r--r--src/buildstream/_signals.py203
-rw-r--r--src/buildstream/_site.py67
-rw-r--r--src/buildstream/_sourcecache.py249
-rw-r--r--src/buildstream/_sourcefactory.py64
-rw-r--r--src/buildstream/_stream.py1512
-rw-r--r--src/buildstream/_variables.py251
-rw-r--r--src/buildstream/_version.py522
-rw-r--r--src/buildstream/_versions.py36
-rw-r--r--src/buildstream/_workspaces.py650
-rw-r--r--src/buildstream/_yaml.py1432
-rw-r--r--src/buildstream/buildelement.py299
-rw-r--r--src/buildstream/data/bst21
-rw-r--r--src/buildstream/data/build-all.sh.in40
-rw-r--r--src/buildstream/data/build-module.sh.in43
-rw-r--r--src/buildstream/data/projectconfig.yaml183
-rw-r--r--src/buildstream/data/userconfig.yaml113
-rw-r--r--src/buildstream/element.py3062
-rw-r--r--src/buildstream/plugin.py929
-rw-r--r--src/buildstream/plugins/elements/__init__.py0
-rw-r--r--src/buildstream/plugins/elements/autotools.py75
-rw-r--r--src/buildstream/plugins/elements/autotools.yaml129
-rw-r--r--src/buildstream/plugins/elements/cmake.py74
-rw-r--r--src/buildstream/plugins/elements/cmake.yaml72
-rw-r--r--src/buildstream/plugins/elements/compose.py194
-rw-r--r--src/buildstream/plugins/elements/compose.yaml34
-rw-r--r--src/buildstream/plugins/elements/distutils.py51
-rw-r--r--src/buildstream/plugins/elements/distutils.yaml49
-rw-r--r--src/buildstream/plugins/elements/filter.py256
-rw-r--r--src/buildstream/plugins/elements/filter.yaml29
-rw-r--r--src/buildstream/plugins/elements/import.py129
-rw-r--r--src/buildstream/plugins/elements/import.yaml14
-rw-r--r--src/buildstream/plugins/elements/junction.py229
-rw-r--r--src/buildstream/plugins/elements/make.py56
-rw-r--r--src/buildstream/plugins/elements/make.yaml42
-rw-r--r--src/buildstream/plugins/elements/makemaker.py51
-rw-r--r--src/buildstream/plugins/elements/makemaker.yaml48
-rw-r--r--src/buildstream/plugins/elements/manual.py51
-rw-r--r--src/buildstream/plugins/elements/manual.yaml22
-rw-r--r--src/buildstream/plugins/elements/meson.py71
-rw-r--r--src/buildstream/plugins/elements/meson.yaml79
-rw-r--r--src/buildstream/plugins/elements/modulebuild.py51
-rw-r--r--src/buildstream/plugins/elements/modulebuild.yaml48
-rw-r--r--src/buildstream/plugins/elements/pip.py51
-rw-r--r--src/buildstream/plugins/elements/pip.yaml36
-rw-r--r--src/buildstream/plugins/elements/qmake.py51
-rw-r--r--src/buildstream/plugins/elements/qmake.yaml50
-rw-r--r--src/buildstream/plugins/elements/script.py69
-rw-r--r--src/buildstream/plugins/elements/script.yaml25
-rw-r--r--src/buildstream/plugins/elements/stack.py66
-rw-r--r--src/buildstream/plugins/sources/__init__.py0
-rw-r--r--src/buildstream/plugins/sources/_downloadablefilesource.py250
-rw-r--r--src/buildstream/plugins/sources/bzr.py210
-rw-r--r--src/buildstream/plugins/sources/deb.py83
-rw-r--r--src/buildstream/plugins/sources/git.py168
-rw-r--r--src/buildstream/plugins/sources/local.py147
-rw-r--r--src/buildstream/plugins/sources/patch.py101
-rw-r--r--src/buildstream/plugins/sources/pip.py254
-rw-r--r--src/buildstream/plugins/sources/remote.py93
-rw-r--r--src/buildstream/plugins/sources/tar.py202
-rw-r--r--src/buildstream/plugins/sources/zip.py181
-rw-r--r--src/buildstream/sandbox/__init__.py22
-rw-r--r--src/buildstream/sandbox/_config.py62
-rw-r--r--src/buildstream/sandbox/_mount.py149
-rw-r--r--src/buildstream/sandbox/_mounter.py147
-rw-r--r--src/buildstream/sandbox/_sandboxbwrap.py433
-rw-r--r--src/buildstream/sandbox/_sandboxchroot.py325
-rw-r--r--src/buildstream/sandbox/_sandboxdummy.py36
-rw-r--r--src/buildstream/sandbox/_sandboxremote.py577
-rw-r--r--src/buildstream/sandbox/sandbox.py717
-rw-r--r--src/buildstream/scriptelement.py297
-rw-r--r--src/buildstream/source.py1274
-rw-r--r--src/buildstream/storage/__init__.py22
-rw-r--r--src/buildstream/storage/_casbaseddirectory.py622
-rw-r--r--src/buildstream/storage/_filebaseddirectory.py273
-rw-r--r--src/buildstream/storage/directory.py211
-rw-r--r--src/buildstream/testing/__init__.py121
-rw-r--r--src/buildstream/testing/_sourcetests/__init__.py0
-rw-r--r--src/buildstream/testing/_sourcetests/build_checkout.py83
-rw-r--r--src/buildstream/testing/_sourcetests/fetch.py107
-rw-r--r--src/buildstream/testing/_sourcetests/mirror.py427
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/base.bst5
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/base/base-alpine.bst17
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/import-bin.bst4
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/import-dev.bst4
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/horsey.bst3
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/pony.bst1
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/zebry.bst3
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/0.bst7
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/1.bst4
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/2.bst4
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/3.bst6
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/4.bst2
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/5.bst2
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/6.bst4
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/7.bst4
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/8.bst4
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/9.bst4
-rw-r--r--src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/run.bst2
-rw-r--r--src/buildstream/testing/_sourcetests/project/files/bar0
-rwxr-xr-xsrc/buildstream/testing/_sourcetests/project/files/bin-files/usr/bin/hello3
-rw-r--r--src/buildstream/testing/_sourcetests/project/files/dev-files/usr/include/pony.h12
-rw-r--r--src/buildstream/testing/_sourcetests/project/files/etc-files/etc/buildstream/config1
-rw-r--r--src/buildstream/testing/_sourcetests/project/files/foo0
-rw-r--r--src/buildstream/testing/_sourcetests/project/files/source-bundle/llamas.txt1
-rw-r--r--src/buildstream/testing/_sourcetests/project/files/sub-project/elements/import-etc.bst4
-rw-r--r--src/buildstream/testing/_sourcetests/project/files/sub-project/files/etc-files/etc/animal.conf1
-rw-r--r--src/buildstream/testing/_sourcetests/project/files/sub-project/project.conf4
-rw-r--r--src/buildstream/testing/_sourcetests/project/project.conf27
-rw-r--r--src/buildstream/testing/_sourcetests/source_determinism.py114
-rw-r--r--src/buildstream/testing/_sourcetests/track.py420
-rw-r--r--src/buildstream/testing/_sourcetests/track_cross_junction.py186
-rw-r--r--src/buildstream/testing/_sourcetests/workspace.py161
-rw-r--r--src/buildstream/testing/_utils/__init__.py10
-rw-r--r--src/buildstream/testing/_utils/junction.py83
-rw-r--r--src/buildstream/testing/_utils/site.py46
-rw-r--r--src/buildstream/testing/integration.py97
-rw-r--r--src/buildstream/testing/repo.py109
-rw-r--r--src/buildstream/testing/runcli.py883
-rw-r--r--src/buildstream/types.py177
-rw-r--r--src/buildstream/utils.py1293
234 files changed, 49947 insertions, 0 deletions
diff --git a/src/buildstream/__init__.py b/src/buildstream/__init__.py
new file mode 100644
index 000000000..62890a62f
--- /dev/null
+++ b/src/buildstream/__init__.py
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+# Plugin author facing APIs
+import os
+if "_BST_COMPLETION" not in os.environ:
+
+ # Special sauce to get the version from versioneer
+ from ._version import get_versions
+ __version__ = get_versions()['version']
+ del get_versions
+
+ from .utils import UtilError, ProgramNotFoundError
+ from .sandbox import Sandbox, SandboxFlags, SandboxCommandError
+ from .types import Scope, Consistency, CoreWarnings
+ from .plugin import Plugin
+ from .source import Source, SourceError, SourceFetcher
+ from .element import Element, ElementError
+ from .buildelement import BuildElement
+ from .scriptelement import ScriptElement
+
+ # XXX We are exposing a private member here as we expect it to move to a
+ # separate package soon. See the following discussion for more details:
+ # https://gitlab.com/BuildStream/buildstream/issues/739#note_124819869
+ from ._gitsourcebase import _GitSourceBase, _GitMirror
diff --git a/src/buildstream/__main__.py b/src/buildstream/__main__.py
new file mode 100644
index 000000000..4b0fdabfe
--- /dev/null
+++ b/src/buildstream/__main__.py
@@ -0,0 +1,17 @@
+##################################################################
+# Private Entry Point #
+##################################################################
+#
+# This allows running the cli when BuildStream is uninstalled,
+# as long as BuildStream repo is in PYTHONPATH, one can run it
+# with:
+#
+# python3 -m buildstream [program args]
+#
+# This is used when we need to run BuildStream before installing,
+# like when we build documentation.
+#
+if __name__ == '__main__':
+ # pylint: disable=no-value-for-parameter
+ from ._frontend.cli import cli
+ cli()
diff --git a/src/buildstream/_artifact.py b/src/buildstream/_artifact.py
new file mode 100644
index 000000000..c353a5151
--- /dev/null
+++ b/src/buildstream/_artifact.py
@@ -0,0 +1,449 @@
+#
+# Copyright (C) 2019 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tom Pollard <tom.pollard@codethink.co.uk>
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+Artifact
+=========
+
+Implementation of the Artifact class which aims to 'abstract' direct
+artifact composite interaction away from Element class
+
+"""
+
+import os
+import tempfile
+
+from ._protos.buildstream.v2.artifact_pb2 import Artifact as ArtifactProto
+from . import _yaml
+from . import utils
+from .types import Scope
+from .storage._casbaseddirectory import CasBasedDirectory
+
+
+# An Artifact class to abtract artifact operations
+# from the Element class
+#
+# Args:
+# element (Element): The Element object
+# context (Context): The BuildStream context
+# strong_key (str): The elements strong cache key, dependant on context
+# weak_key (str): The elements weak cache key
+#
+class Artifact():
+
+ version = 0
+
+ def __init__(self, element, context, *, strong_key=None, weak_key=None):
+ self._element = element
+ self._context = context
+ self._artifacts = context.artifactcache
+ self._cache_key = strong_key
+ self._weak_cache_key = weak_key
+ self._artifactdir = context.artifactdir
+ self._cas = context.get_cascache()
+ self._tmpdir = context.tmpdir
+ self._proto = None
+
+ self._metadata_keys = None # Strong and weak key tuple extracted from the artifact
+ self._metadata_dependencies = None # Dictionary of dependency strong keys from the artifact
+ self._metadata_workspaced = None # Boolean of whether it's a workspaced artifact
+ self._metadata_workspaced_dependencies = None # List of which dependencies are workspaced from the artifact
+ self._cached = None # Boolean of whether the artifact is cached
+
+ # get_files():
+ #
+ # Get a virtual directory for the artifact files content
+ #
+ # Returns:
+ # (Directory): The virtual directory object
+ #
+ def get_files(self):
+ files_digest = self._get_field_digest("files")
+
+ return CasBasedDirectory(self._cas, digest=files_digest)
+
+ # get_buildtree():
+ #
+ # Get a virtual directory for the artifact buildtree content
+ #
+ # Returns:
+ # (Directory): The virtual directory object
+ #
+ def get_buildtree(self):
+ buildtree_digest = self._get_field_digest("buildtree")
+
+ return CasBasedDirectory(self._cas, digest=buildtree_digest)
+
+ # get_extract_key():
+ #
+ # Get the key used to extract the artifact
+ #
+ # Returns:
+ # (str): The key
+ #
+ def get_extract_key(self):
+ return self._cache_key or self._weak_cache_key
+
+ # cache():
+ #
+ # Create the artifact and commit to cache
+ #
+ # Args:
+ # rootdir (str): An absolute path to the temp rootdir for artifact construct
+ # sandbox_build_dir (Directory): Virtual Directory object for the sandbox build-root
+ # collectvdir (Directory): Virtual Directoy object from within the sandbox for collection
+ # buildresult (tuple): bool, short desc and detailed desc of result
+ # publicdata (dict): dict of public data to commit to artifact metadata
+ #
+ # Returns:
+ # (int): The size of the newly cached artifact
+ #
+ def cache(self, rootdir, sandbox_build_dir, collectvdir, buildresult, publicdata):
+
+ context = self._context
+ element = self._element
+ size = 0
+
+ filesvdir = None
+ buildtreevdir = None
+
+ artifact = ArtifactProto()
+
+ artifact.version = self.version
+
+ # Store result
+ artifact.build_success = buildresult[0]
+ artifact.build_error = buildresult[1]
+ artifact.build_error_details = "" if not buildresult[2] else buildresult[2]
+
+ # Store keys
+ artifact.strong_key = self._cache_key
+ artifact.weak_key = self._weak_cache_key
+
+ artifact.was_workspaced = bool(element._get_workspace())
+
+ # Store files
+ if collectvdir:
+ filesvdir = CasBasedDirectory(cas_cache=self._cas)
+ filesvdir.import_files(collectvdir)
+ artifact.files.CopyFrom(filesvdir._get_digest())
+ size += filesvdir.get_size()
+
+ # Store public data
+ with tempfile.NamedTemporaryFile(dir=self._tmpdir) as tmp:
+ _yaml.dump(_yaml.node_sanitize(publicdata), tmp.name)
+ public_data_digest = self._cas.add_object(path=tmp.name, link_directly=True)
+ artifact.public_data.CopyFrom(public_data_digest)
+ size += public_data_digest.size_bytes
+
+ # store build dependencies
+ for e in element.dependencies(Scope.BUILD):
+ new_build = artifact.build_deps.add()
+ new_build.element_name = e.name
+ new_build.cache_key = e._get_cache_key()
+ new_build.was_workspaced = bool(e._get_workspace())
+
+ # Store log file
+ log_filename = context.get_log_filename()
+ if log_filename:
+ digest = self._cas.add_object(path=log_filename)
+ element._build_log_path = self._cas.objpath(digest)
+ log = artifact.logs.add()
+ log.name = os.path.basename(log_filename)
+ log.digest.CopyFrom(digest)
+ size += log.digest.size_bytes
+
+ # Store build tree
+ if sandbox_build_dir:
+ buildtreevdir = CasBasedDirectory(cas_cache=self._cas)
+ buildtreevdir.import_files(sandbox_build_dir)
+ artifact.buildtree.CopyFrom(buildtreevdir._get_digest())
+ size += buildtreevdir.get_size()
+
+ os.makedirs(os.path.dirname(os.path.join(
+ self._artifactdir, element.get_artifact_name())), exist_ok=True)
+ keys = utils._deduplicate([self._cache_key, self._weak_cache_key])
+ for key in keys:
+ path = os.path.join(self._artifactdir, element.get_artifact_name(key=key))
+ with open(path, mode='w+b') as f:
+ f.write(artifact.SerializeToString())
+
+ return size
+
+ # cached_buildtree()
+ #
+ # Check if artifact is cached with expected buildtree. A
+ # buildtree will not be present if the rest of the partial artifact
+ # is not cached.
+ #
+ # Returns:
+ # (bool): True if artifact cached with buildtree, False if
+ # missing expected buildtree. Note this only confirms
+ # if a buildtree is present, not its contents.
+ #
+ def cached_buildtree(self):
+
+ buildtree_digest = self._get_field_digest("buildtree")
+ if buildtree_digest:
+ return self._cas.contains_directory(buildtree_digest, with_files=True)
+ else:
+ return False
+
+ # buildtree_exists()
+ #
+ # Check if artifact was created with a buildtree. This does not check
+ # whether the buildtree is present in the local cache.
+ #
+ # Returns:
+ # (bool): True if artifact was created with buildtree
+ #
+ def buildtree_exists(self):
+
+ artifact = self._get_proto()
+ return bool(str(artifact.buildtree))
+
+ # load_public_data():
+ #
+ # Loads the public data from the cached artifact
+ #
+ # Returns:
+ # (dict): The artifacts cached public data
+ #
+ def load_public_data(self):
+
+ # Load the public data from the artifact
+ artifact = self._get_proto()
+ meta_file = self._cas.objpath(artifact.public_data)
+ data = _yaml.load(meta_file, shortname='public.yaml')
+
+ return data
+
+ # load_build_result():
+ #
+ # Load the build result from the cached artifact
+ #
+ # Returns:
+ # (bool): Whether the artifact of this element present in the artifact cache is of a success
+ # (str): Short description of the result
+ # (str): Detailed description of the result
+ #
+ def load_build_result(self):
+
+ artifact = self._get_proto()
+ build_result = (artifact.build_success,
+ artifact.build_error,
+ artifact.build_error_details)
+
+ return build_result
+
+ # get_metadata_keys():
+ #
+ # Retrieve the strong and weak keys from the given artifact.
+ #
+ # Returns:
+ # (str): The strong key
+ # (str): The weak key
+ #
+ def get_metadata_keys(self):
+
+ if self._metadata_keys is not None:
+ return self._metadata_keys
+
+ # Extract proto
+ artifact = self._get_proto()
+
+ strong_key = artifact.strong_key
+ weak_key = artifact.weak_key
+
+ self._metadata_keys = (strong_key, weak_key)
+
+ return self._metadata_keys
+
+ # get_metadata_dependencies():
+ #
+ # Retrieve the hash of dependency keys from the given artifact.
+ #
+ # Returns:
+ # (dict): A dictionary of element names and their keys
+ #
+ def get_metadata_dependencies(self):
+
+ if self._metadata_dependencies is not None:
+ return self._metadata_dependencies
+
+ # Extract proto
+ artifact = self._get_proto()
+
+ self._metadata_dependencies = {dep.element_name: dep.cache_key for dep in artifact.build_deps}
+
+ return self._metadata_dependencies
+
+ # get_metadata_workspaced():
+ #
+ # Retrieve the hash of dependency from the given artifact.
+ #
+ # Returns:
+ # (bool): Whether the given artifact was workspaced
+ #
+ def get_metadata_workspaced(self):
+
+ if self._metadata_workspaced is not None:
+ return self._metadata_workspaced
+
+ # Extract proto
+ artifact = self._get_proto()
+
+ self._metadata_workspaced = artifact.was_workspaced
+
+ return self._metadata_workspaced
+
+ # get_metadata_workspaced_dependencies():
+ #
+ # Retrieve the hash of workspaced dependencies keys from the given artifact.
+ #
+ # Returns:
+ # (list): List of which dependencies are workspaced
+ #
+ def get_metadata_workspaced_dependencies(self):
+
+ if self._metadata_workspaced_dependencies is not None:
+ return self._metadata_workspaced_dependencies
+
+ # Extract proto
+ artifact = self._get_proto()
+
+ self._metadata_workspaced_dependencies = [dep.element_name for dep in artifact.build_deps
+ if dep.was_workspaced]
+
+ return self._metadata_workspaced_dependencies
+
+ # cached():
+ #
+ # Check whether the artifact corresponding to the stored cache key is
+ # available. This also checks whether all required parts of the artifact
+ # are available, which may depend on command and configuration. The cache
+ # key used for querying is dependant on the current context.
+ #
+ # Returns:
+ # (bool): Whether artifact is in local cache
+ #
+ def cached(self):
+
+ if self._cached is not None:
+ return self._cached
+
+ context = self._context
+
+ artifact = self._get_proto()
+
+ if not artifact:
+ self._cached = False
+ return False
+
+ # Determine whether directories are required
+ require_directories = context.require_artifact_directories
+ # Determine whether file contents are required as well
+ require_files = (context.require_artifact_files or
+ self._element._artifact_files_required())
+
+ # Check whether 'files' subdirectory is available, with or without file contents
+ if (require_directories and str(artifact.files) and
+ not self._cas.contains_directory(artifact.files, with_files=require_files)):
+ self._cached = False
+ return False
+
+ self._cached = True
+ return True
+
+ # cached_logs()
+ #
+ # Check if the artifact is cached with log files.
+ #
+ # Returns:
+ # (bool): True if artifact is cached with logs, False if
+ # element not cached or missing logs.
+ #
+ def cached_logs(self):
+ if not self._element._cached():
+ return False
+
+ artifact = self._get_proto()
+
+ for logfile in artifact.logs:
+ if not self._cas.contains(logfile.digest.hash):
+ return False
+
+ return True
+
+ # reset_cached()
+ #
+ # Allow the Artifact to query the filesystem to determine whether it
+ # is cached or not.
+ #
+ # NOTE: Due to the fact that a normal buildstream run does not make an
+ # artifact *not* cached (`bst artifact delete` can do so, but doesn't
+ # query the Artifact afterwards), it does not update_cached if the
+ # artifact is already cached. If a cached artifact ever has its key
+ # changed, this will need to be revisited.
+ #
+ def reset_cached(self):
+ if self._cached is False:
+ self._cached = None
+
+ # _get_proto()
+ #
+ # Returns:
+ # (Artifact): Artifact proto
+ #
+ def _get_proto(self):
+ # Check if we've already cached the proto object
+ if self._proto is not None:
+ return self._proto
+
+ key = self.get_extract_key()
+
+ proto_path = os.path.join(self._artifactdir,
+ self._element.get_artifact_name(key=key))
+ artifact = ArtifactProto()
+ try:
+ with open(proto_path, mode='r+b') as f:
+ artifact.ParseFromString(f.read())
+ except FileNotFoundError:
+ return None
+
+ os.utime(proto_path)
+ # Cache the proto object
+ self._proto = artifact
+
+ return self._proto
+
+ # _get_artifact_field()
+ #
+ # Returns:
+ # (Digest): Digest of field specified
+ #
+ def _get_field_digest(self, field):
+ artifact_proto = self._get_proto()
+ digest = getattr(artifact_proto, field)
+ if not str(digest):
+ return None
+
+ return digest
diff --git a/src/buildstream/_artifactcache.py b/src/buildstream/_artifactcache.py
new file mode 100644
index 000000000..091b44dda
--- /dev/null
+++ b/src/buildstream/_artifactcache.py
@@ -0,0 +1,617 @@
+#
+# Copyright (C) 2017-2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+import os
+import grpc
+
+from ._basecache import BaseCache
+from .types import _KeyStrength
+from ._exceptions import ArtifactError, CASError, CASCacheError
+from ._protos.buildstream.v2 import artifact_pb2, artifact_pb2_grpc
+
+from ._cas import CASRemoteSpec
+from .storage._casbaseddirectory import CasBasedDirectory
+from ._artifact import Artifact
+from . import utils
+
+
+# An ArtifactCacheSpec holds the user configuration for a single remote
+# artifact cache.
+#
+# Args:
+# url (str): Location of the remote artifact cache
+# push (bool): Whether we should attempt to push artifacts to this cache,
+# in addition to pulling from it.
+#
+class ArtifactCacheSpec(CASRemoteSpec):
+ pass
+
+
+# An ArtifactCache manages artifacts.
+#
+# Args:
+# context (Context): The BuildStream context
+#
+class ArtifactCache(BaseCache):
+
+ spec_class = ArtifactCacheSpec
+ spec_name = "artifact_cache_specs"
+ spec_error = ArtifactError
+ config_node_name = "artifacts"
+
+ def __init__(self, context):
+ super().__init__(context)
+
+ self._required_elements = set() # The elements required for this session
+
+ # create artifact directory
+ self.artifactdir = context.artifactdir
+ os.makedirs(self.artifactdir, exist_ok=True)
+
+ self.casquota.add_remove_callbacks(self.unrequired_artifacts, self.remove)
+ self.casquota.add_list_refs_callback(self.list_artifacts)
+
+ self.cas.add_reachable_directories_callback(self._reachable_directories)
+ self.cas.add_reachable_digests_callback(self._reachable_digests)
+
+ # mark_required_elements():
+ #
+ # Mark elements whose artifacts are required for the current run.
+ #
+ # Artifacts whose elements are in this list will be locked by the artifact
+ # cache and not touched for the duration of the current pipeline.
+ #
+ # Args:
+ # elements (iterable): A set of elements to mark as required
+ #
+ def mark_required_elements(self, elements):
+
+ # We risk calling this function with a generator, so we
+ # better consume it first.
+ #
+ elements = list(elements)
+
+ # Mark the elements as required. We cannot know that we know the
+ # cache keys yet, so we only check that later when deleting.
+ #
+ self._required_elements.update(elements)
+
+ # For the cache keys which were resolved so far, we bump
+ # the mtime of them.
+ #
+ # This is just in case we have concurrent instances of
+ # BuildStream running with the same artifact cache, it will
+ # reduce the likelyhood of one instance deleting artifacts
+ # which are required by the other.
+ for element in elements:
+ strong_key = element._get_cache_key(strength=_KeyStrength.STRONG)
+ weak_key = element._get_cache_key(strength=_KeyStrength.WEAK)
+ for key in (strong_key, weak_key):
+ if key:
+ ref = element.get_artifact_name(key)
+
+ try:
+ self.update_mtime(ref)
+ except ArtifactError:
+ pass
+
+ def update_mtime(self, ref):
+ try:
+ os.utime(os.path.join(self.artifactdir, ref))
+ except FileNotFoundError as e:
+ raise ArtifactError("Couldn't find artifact: {}".format(ref)) from e
+
+ # unrequired_artifacts()
+ #
+ # Returns iterator over artifacts that are not required in the build plan
+ #
+ # Returns:
+ # (iter): Iterator over tuples of (float, str) where float is the time
+ # and str is the artifact ref
+ #
+ def unrequired_artifacts(self):
+ required_artifacts = set(map(lambda x: x.get_artifact_name(),
+ self._required_elements))
+ for (mtime, artifact) in self._list_refs_mtimes(self.artifactdir):
+ if artifact not in required_artifacts:
+ yield (mtime, artifact)
+
+ def required_artifacts(self):
+ # Build a set of the cache keys which are required
+ # based on the required elements at cleanup time
+ #
+ # We lock both strong and weak keys - deleting one but not the
+ # other won't save space, but would be a user inconvenience.
+ for element in self._required_elements:
+ yield element._get_cache_key(strength=_KeyStrength.STRONG)
+ yield element._get_cache_key(strength=_KeyStrength.WEAK)
+
+ def full(self):
+ return self.casquota.full()
+
+ # add_artifact_size()
+ #
+ # Adds the reported size of a newly cached artifact to the
+ # overall estimated size.
+ #
+ # Args:
+ # artifact_size (int): The size to add.
+ #
+ def add_artifact_size(self, artifact_size):
+ cache_size = self.casquota.get_cache_size()
+ cache_size += artifact_size
+
+ self.casquota.set_cache_size(cache_size)
+
+ # preflight():
+ #
+ # Preflight check.
+ #
+ def preflight(self):
+ self.cas.preflight()
+
+ # contains():
+ #
+ # Check whether the artifact for the specified Element is already available
+ # in the local artifact cache.
+ #
+ # Args:
+ # element (Element): The Element to check
+ # key (str): The cache key to use
+ #
+ # Returns: True if the artifact is in the cache, False otherwise
+ #
+ def contains(self, element, key):
+ ref = element.get_artifact_name(key)
+
+ return os.path.exists(os.path.join(self.artifactdir, ref))
+
+ # list_artifacts():
+ #
+ # List artifacts in this cache in LRU order.
+ #
+ # Args:
+ # glob (str): An option glob expression to be used to list artifacts satisfying the glob
+ #
+ # Returns:
+ # ([str]) - A list of artifact names as generated in LRU order
+ #
+ def list_artifacts(self, *, glob=None):
+ return [ref for _, ref in sorted(list(self._list_refs_mtimes(self.artifactdir, glob_expr=glob)))]
+
+ # remove():
+ #
+ # Removes the artifact for the specified ref from the local
+ # artifact cache.
+ #
+ # Args:
+ # ref (artifact_name): The name of the artifact to remove (as
+ # generated by `Element.get_artifact_name`)
+ # defer_prune (bool): Optionally declare whether pruning should
+ # occur immediately after the ref is removed.
+ #
+ # Returns:
+ # (int): The amount of space recovered in the cache, in bytes
+ #
+ def remove(self, ref, *, defer_prune=False):
+ try:
+ return self.cas.remove(ref, basedir=self.artifactdir, defer_prune=defer_prune)
+ except CASCacheError as e:
+ raise ArtifactError("{}".format(e)) from e
+
+ # prune():
+ #
+ # Prune the artifact cache of unreachable refs
+ #
+ def prune(self):
+ return self.cas.prune()
+
+ # diff():
+ #
+ # Return a list of files that have been added or modified between
+ # the artifacts described by key_a and key_b. This expects the
+ # provided keys to be strong cache keys
+ #
+ # Args:
+ # element (Element): The element whose artifacts to compare
+ # key_a (str): The first artifact strong key
+ # key_b (str): The second artifact strong key
+ #
+ def diff(self, element, key_a, key_b):
+ context = self.context
+ artifact_a = Artifact(element, context, strong_key=key_a)
+ artifact_b = Artifact(element, context, strong_key=key_b)
+ digest_a = artifact_a._get_proto().files
+ digest_b = artifact_b._get_proto().files
+
+ added = []
+ removed = []
+ modified = []
+
+ self.cas.diff_trees(digest_a, digest_b, added=added, removed=removed, modified=modified)
+
+ return modified, removed, added
+
+ # push():
+ #
+ # Push committed artifact to remote repository.
+ #
+ # Args:
+ # element (Element): The Element whose artifact is to be pushed
+ # artifact (Artifact): The artifact being pushed
+ #
+ # Returns:
+ # (bool): True if any remote was updated, False if no pushes were required
+ #
+ # Raises:
+ # (ArtifactError): if there was an error
+ #
+ def push(self, element, artifact):
+ project = element._get_project()
+
+ push_remotes = [r for r in self._remotes[project] if r.spec.push]
+
+ pushed = False
+
+ for remote in push_remotes:
+ remote.init()
+ display_key = element._get_brief_display_key()
+ element.status("Pushing artifact {} -> {}".format(display_key, remote.spec.url))
+
+ if self._push_artifact(element, artifact, remote):
+ element.info("Pushed artifact {} -> {}".format(display_key, remote.spec.url))
+ pushed = True
+ else:
+ element.info("Remote ({}) already has artifact {} cached".format(
+ remote.spec.url, element._get_brief_display_key()
+ ))
+
+ return pushed
+
+ # pull():
+ #
+ # Pull artifact from one of the configured remote repositories.
+ #
+ # Args:
+ # element (Element): The Element whose artifact is to be fetched
+ # key (str): The cache key to use
+ # pull_buildtrees (bool): Whether to pull buildtrees or not
+ #
+ # Returns:
+ # (bool): True if pull was successful, False if artifact was not available
+ #
+ def pull(self, element, key, *, pull_buildtrees=False):
+ display_key = key[:self.context.log_key_length]
+ project = element._get_project()
+
+ for remote in self._remotes[project]:
+ remote.init()
+ try:
+ element.status("Pulling artifact {} <- {}".format(display_key, remote.spec.url))
+
+ if self._pull_artifact(element, key, remote, pull_buildtrees=pull_buildtrees):
+ element.info("Pulled artifact {} <- {}".format(display_key, remote.spec.url))
+ # no need to pull from additional remotes
+ return True
+ else:
+ element.info("Remote ({}) does not have artifact {} cached".format(
+ remote.spec.url, display_key
+ ))
+
+ except CASError as e:
+ raise ArtifactError("Failed to pull artifact {}: {}".format(
+ display_key, e)) from e
+
+ return False
+
+ # pull_tree():
+ #
+ # Pull a single Tree rather than an artifact.
+ # Does not update local refs.
+ #
+ # Args:
+ # project (Project): The current project
+ # digest (Digest): The digest of the tree
+ #
+ def pull_tree(self, project, digest):
+ for remote in self._remotes[project]:
+ digest = self.cas.pull_tree(remote, digest)
+
+ if digest:
+ # no need to pull from additional remotes
+ return digest
+
+ return None
+
+ # push_message():
+ #
+ # Push the given protobuf message to all remotes.
+ #
+ # Args:
+ # project (Project): The current project
+ # message (Message): A protobuf message to push.
+ #
+ # Raises:
+ # (ArtifactError): if there was an error
+ #
+ def push_message(self, project, message):
+
+ if self._has_push_remotes:
+ push_remotes = [r for r in self._remotes[project] if r.spec.push]
+ else:
+ push_remotes = []
+
+ if not push_remotes:
+ raise ArtifactError("push_message was called, but no remote artifact " +
+ "servers are configured as push remotes.")
+
+ for remote in push_remotes:
+ message_digest = remote.push_message(message)
+
+ return message_digest
+
+ # link_key():
+ #
+ # Add a key for an existing artifact.
+ #
+ # Args:
+ # element (Element): The Element whose artifact is to be linked
+ # oldkey (str): An existing cache key for the artifact
+ # newkey (str): A new cache key for the artifact
+ #
+ def link_key(self, element, oldkey, newkey):
+ oldref = element.get_artifact_name(oldkey)
+ newref = element.get_artifact_name(newkey)
+
+ if not os.path.exists(os.path.join(self.artifactdir, newref)):
+ os.link(os.path.join(self.artifactdir, oldref),
+ os.path.join(self.artifactdir, newref))
+
+ # get_artifact_logs():
+ #
+ # Get the logs of an existing artifact
+ #
+ # Args:
+ # ref (str): The ref of the artifact
+ #
+ # Returns:
+ # logsdir (CasBasedDirectory): A CasBasedDirectory containing the artifact's logs
+ #
+ def get_artifact_logs(self, ref):
+ cache_id = self.cas.resolve_ref(ref, update_mtime=True)
+ vdir = CasBasedDirectory(self.cas, digest=cache_id).descend('logs')
+ return vdir
+
+ # fetch_missing_blobs():
+ #
+ # Fetch missing blobs from configured remote repositories.
+ #
+ # Args:
+ # project (Project): The current project
+ # missing_blobs (list): The Digests of the blobs to fetch
+ #
+ def fetch_missing_blobs(self, project, missing_blobs):
+ for remote in self._remotes[project]:
+ if not missing_blobs:
+ break
+
+ remote.init()
+
+ # fetch_blobs() will return the blobs that are still missing
+ missing_blobs = self.cas.fetch_blobs(remote, missing_blobs)
+
+ if missing_blobs:
+ raise ArtifactError("Blobs not found on configured artifact servers")
+
+ # find_missing_blobs():
+ #
+ # Find missing blobs from configured push remote repositories.
+ #
+ # Args:
+ # project (Project): The current project
+ # missing_blobs (list): The Digests of the blobs to check
+ #
+ # Returns:
+ # (list): The Digests of the blobs missing on at least one push remote
+ #
+ def find_missing_blobs(self, project, missing_blobs):
+ if not missing_blobs:
+ return []
+
+ push_remotes = [r for r in self._remotes[project] if r.spec.push]
+
+ remote_missing_blobs_set = set()
+
+ for remote in push_remotes:
+ remote.init()
+
+ remote_missing_blobs = self.cas.remote_missing_blobs(remote, missing_blobs)
+ remote_missing_blobs_set.update(remote_missing_blobs)
+
+ return list(remote_missing_blobs_set)
+
+ ################################################
+ # Local Private Methods #
+ ################################################
+
+ # _reachable_directories()
+ #
+ # Returns:
+ # (iter): Iterator over directories digests available from artifacts.
+ #
+ def _reachable_directories(self):
+ for root, _, files in os.walk(self.artifactdir):
+ for artifact_file in files:
+ artifact = artifact_pb2.Artifact()
+ with open(os.path.join(root, artifact_file), 'r+b') as f:
+ artifact.ParseFromString(f.read())
+
+ if str(artifact.files):
+ yield artifact.files
+
+ if str(artifact.buildtree):
+ yield artifact.buildtree
+
+ # _reachable_digests()
+ #
+ # Returns:
+ # (iter): Iterator over single file digests in artifacts
+ #
+ def _reachable_digests(self):
+ for root, _, files in os.walk(self.artifactdir):
+ for artifact_file in files:
+ artifact = artifact_pb2.Artifact()
+ with open(os.path.join(root, artifact_file), 'r+b') as f:
+ artifact.ParseFromString(f.read())
+
+ if str(artifact.public_data):
+ yield artifact.public_data
+
+ for log_file in artifact.logs:
+ yield log_file.digest
+
+ # _push_artifact()
+ #
+ # Pushes relevant directories and then artifact proto to remote.
+ #
+ # Args:
+ # element (Element): The element
+ # artifact (Artifact): The related artifact being pushed
+ # remote (CASRemote): Remote to push to
+ #
+ # Returns:
+ # (bool): whether the push was successful
+ #
+ def _push_artifact(self, element, artifact, remote):
+
+ artifact_proto = artifact._get_proto()
+
+ keys = list(utils._deduplicate([artifact_proto.strong_key, artifact_proto.weak_key]))
+
+ # Check whether the artifact is on the server
+ present = False
+ for key in keys:
+ get_artifact = artifact_pb2.GetArtifactRequest()
+ get_artifact.cache_key = element.get_artifact_name(key)
+ try:
+ artifact_service = artifact_pb2_grpc.ArtifactServiceStub(remote.channel)
+ artifact_service.GetArtifact(get_artifact)
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ raise ArtifactError("Error checking artifact cache: {}"
+ .format(e.details()))
+ else:
+ present = True
+ if present:
+ return False
+
+ try:
+ self.cas._send_directory(remote, artifact_proto.files)
+
+ if str(artifact_proto.buildtree):
+ try:
+ self.cas._send_directory(remote, artifact_proto.buildtree)
+ except FileNotFoundError:
+ pass
+
+ digests = []
+ if str(artifact_proto.public_data):
+ digests.append(artifact_proto.public_data)
+
+ for log_file in artifact_proto.logs:
+ digests.append(log_file.digest)
+
+ self.cas.send_blobs(remote, digests)
+
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
+ raise ArtifactError("Failed to push artifact blobs: {}".format(e.details()))
+ return False
+
+ # finally need to send the artifact proto
+ for key in keys:
+ update_artifact = artifact_pb2.UpdateArtifactRequest()
+ update_artifact.cache_key = element.get_artifact_name(key)
+ update_artifact.artifact.CopyFrom(artifact_proto)
+
+ try:
+ artifact_service = artifact_pb2_grpc.ArtifactServiceStub(remote.channel)
+ artifact_service.UpdateArtifact(update_artifact)
+ except grpc.RpcError as e:
+ raise ArtifactError("Failed to push artifact: {}".format(e.details()))
+
+ return True
+
+ # _pull_artifact()
+ #
+ # Args:
+ # element (Element): element to pull
+ # key (str): specific key of element to pull
+ # remote (CASRemote): remote to pull from
+ # pull_buildtree (bool): whether to pull buildtrees or not
+ #
+ # Returns:
+ # (bool): whether the pull was successful
+ #
+ def _pull_artifact(self, element, key, remote, pull_buildtrees=False):
+
+ def __pull_digest(digest):
+ self.cas._fetch_directory(remote, digest)
+ required_blobs = self.cas.required_blobs_for_directory(digest)
+ missing_blobs = self.cas.local_missing_blobs(required_blobs)
+ if missing_blobs:
+ self.cas.fetch_blobs(remote, missing_blobs)
+
+ request = artifact_pb2.GetArtifactRequest()
+ request.cache_key = element.get_artifact_name(key=key)
+ try:
+ artifact_service = artifact_pb2_grpc.ArtifactServiceStub(remote.channel)
+ artifact = artifact_service.GetArtifact(request)
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ raise ArtifactError("Failed to pull artifact: {}".format(e.details()))
+ return False
+
+ try:
+ if str(artifact.files):
+ __pull_digest(artifact.files)
+
+ if pull_buildtrees and str(artifact.buildtree):
+ __pull_digest(artifact.buildtree)
+
+ digests = []
+ if str(artifact.public_data):
+ digests.append(artifact.public_data)
+
+ for log_digest in artifact.logs:
+ digests.append(log_digest.digest)
+
+ self.cas.fetch_blobs(remote, digests)
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ raise ArtifactError("Failed to pull artifact: {}".format(e.details()))
+ return False
+
+ # Write the artifact proto to cache
+ artifact_path = os.path.join(self.artifactdir, request.cache_key)
+ os.makedirs(os.path.dirname(artifact_path), exist_ok=True)
+ with open(artifact_path, 'w+b') as f:
+ f.write(artifact.SerializeToString())
+
+ return True
diff --git a/src/buildstream/_artifactelement.py b/src/buildstream/_artifactelement.py
new file mode 100644
index 000000000..d65d46173
--- /dev/null
+++ b/src/buildstream/_artifactelement.py
@@ -0,0 +1,92 @@
+#
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# James Ennis <james.ennis@codethink.co.uk>
+from . import Element
+from . import _cachekey
+from ._exceptions import ArtifactElementError
+from ._loader.metaelement import MetaElement
+
+
+# ArtifactElement()
+#
+# Object to be used for directly processing an artifact
+#
+# Args:
+# context (Context): The Context object
+# ref (str): The artifact ref
+#
+class ArtifactElement(Element):
+ def __init__(self, context, ref):
+ _, element, key = verify_artifact_ref(ref)
+
+ self._ref = ref
+ self._key = key
+
+ project = context.get_toplevel_project()
+ meta = MetaElement(project, element) # NOTE element has no .bst suffix
+ plugin_conf = None
+
+ super().__init__(context, project, meta, plugin_conf)
+
+ # Override Element.get_artifact_name()
+ def get_artifact_name(self, key=None):
+ return self._ref
+
+ # Dummy configure method
+ def configure(self, node):
+ pass
+
+ # Dummy preflight method
+ def preflight(self):
+ pass
+
+ # Override Element._calculate_cache_key
+ def _calculate_cache_key(self, dependencies=None):
+ return self._key
+
+ # Override Element._get_cache_key()
+ def _get_cache_key(self, strength=None):
+ return self._key
+
+
+# verify_artifact_ref()
+#
+# Verify that a ref string matches the format of an artifact
+#
+# Args:
+# ref (str): The artifact ref
+#
+# Returns:
+# project (str): The project's name
+# element (str): The element's name
+# key (str): The cache key
+#
+# Raises:
+# ArtifactElementError if the ref string does not match
+# the expected format
+#
+def verify_artifact_ref(ref):
+ try:
+ project, element, key = ref.split('/', 2) # This will raise a Value error if unable to split
+ # Explicitly raise a ValueError if the key length is not as expected
+ if not _cachekey.is_key(key):
+ raise ValueError
+ except ValueError:
+ raise ArtifactElementError("Artifact: {} is not of the expected format".format(ref))
+
+ return project, element, key
diff --git a/src/buildstream/_basecache.py b/src/buildstream/_basecache.py
new file mode 100644
index 000000000..68654b2a0
--- /dev/null
+++ b/src/buildstream/_basecache.py
@@ -0,0 +1,307 @@
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Raoul Hidalgo Charman <raoul.hidalgocharman@codethink.co.uk>
+#
+import multiprocessing
+import os
+from fnmatch import fnmatch
+
+from . import utils
+from . import _yaml
+from ._cas import CASRemote
+from ._message import Message, MessageType
+from ._exceptions import LoadError
+
+
+# Base Cache for Caches to derive from
+#
+class BaseCache():
+
+ # None of these should ever be called in the base class, but this appeases
+ # pylint to some degree
+ spec_class = None
+ spec_name = None
+ spec_error = None
+ config_node_name = None
+
+ def __init__(self, context):
+ self.context = context
+ self.cas = context.get_cascache()
+ self.casquota = context.get_casquota()
+ self.casquota._calculate_cache_quota()
+
+ self._remotes_setup = False # Check to prevent double-setup of remotes
+ # Per-project list of _CASRemote instances.
+ self._remotes = {}
+
+ self.global_remote_specs = []
+ self.project_remote_specs = {}
+
+ self._has_fetch_remotes = False
+ self._has_push_remotes = False
+
+ # specs_from_config_node()
+ #
+ # Parses the configuration of remote artifact caches from a config block.
+ #
+ # Args:
+ # config_node (dict): The config block, which may contain the 'artifacts' key
+ # basedir (str): The base directory for relative paths
+ #
+ # Returns:
+ # A list of ArtifactCacheSpec instances.
+ #
+ # Raises:
+ # LoadError, if the config block contains invalid keys.
+ #
+ @classmethod
+ def specs_from_config_node(cls, config_node, basedir=None):
+ cache_specs = []
+
+ try:
+ artifacts = [_yaml.node_get(config_node, dict, cls.config_node_name)]
+ except LoadError:
+ try:
+ artifacts = _yaml.node_get(config_node, list, cls.config_node_name, default_value=[])
+ except LoadError:
+ provenance = _yaml.node_get_provenance(config_node, key=cls.config_node_name)
+ raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA,
+ "%s: 'artifacts' must be a single 'url:' mapping, or a list of mappings" %
+ (str(provenance)))
+
+ for spec_node in artifacts:
+ cache_specs.append(cls.spec_class._new_from_config_node(spec_node, basedir))
+
+ return cache_specs
+
+ # _configured_remote_cache_specs():
+ #
+ # Return the list of configured remotes for a given project, in priority
+ # order. This takes into account the user and project configuration.
+ #
+ # Args:
+ # context (Context): The BuildStream context
+ # project (Project): The BuildStream project
+ #
+ # Returns:
+ # A list of ArtifactCacheSpec instances describing the remote artifact caches.
+ #
+ @classmethod
+ def _configured_remote_cache_specs(cls, context, project):
+ project_overrides = context.get_overrides(project.name)
+ project_extra_specs = cls.specs_from_config_node(project_overrides)
+
+ project_specs = getattr(project, cls.spec_name)
+ context_specs = getattr(context, cls.spec_name)
+
+ return list(utils._deduplicate(
+ project_extra_specs + project_specs + context_specs))
+
+ # setup_remotes():
+ #
+ # Sets up which remotes to use
+ #
+ # Args:
+ # use_config (bool): Whether to use project configuration
+ # remote_url (str): Remote cache URL
+ #
+ # This requires that all of the projects which are to be processed in the session
+ # have already been loaded and are observable in the Context.
+ #
+ def setup_remotes(self, *, use_config=False, remote_url=None):
+
+ # Ensure we do not double-initialise since this can be expensive
+ assert not self._remotes_setup
+ self._remotes_setup = True
+
+ # Initialize remote caches. We allow the commandline to override
+ # the user config in some cases (for example `bst artifact push --remote=...`).
+ has_remote_caches = False
+ if remote_url:
+ # pylint: disable=not-callable
+ self._set_remotes([self.spec_class(remote_url, push=True)])
+ has_remote_caches = True
+ if use_config:
+ for project in self.context.get_projects():
+ caches = self._configured_remote_cache_specs(self.context, project)
+ if caches: # caches is a list of spec_class instances
+ self._set_remotes(caches, project=project)
+ has_remote_caches = True
+ if has_remote_caches:
+ self._initialize_remotes()
+
+ # initialize_remotes():
+ #
+ # This will contact each remote cache.
+ #
+ # Args:
+ # on_failure (callable): Called if we fail to contact one of the caches.
+ #
+ def initialize_remotes(self, *, on_failure=None):
+ remote_specs = self.global_remote_specs
+
+ for project in self.project_remote_specs:
+ remote_specs += self.project_remote_specs[project]
+
+ remote_specs = list(utils._deduplicate(remote_specs))
+
+ remotes = {}
+ q = multiprocessing.Queue()
+ for remote_spec in remote_specs:
+
+ error = CASRemote.check_remote(remote_spec, q)
+
+ if error and on_failure:
+ on_failure(remote_spec.url, error)
+ elif error:
+ raise self.spec_error(error) # pylint: disable=not-callable
+ else:
+ self._has_fetch_remotes = True
+ if remote_spec.push:
+ self._has_push_remotes = True
+
+ remotes[remote_spec.url] = CASRemote(remote_spec)
+
+ for project in self.context.get_projects():
+ remote_specs = self.global_remote_specs
+ if project in self.project_remote_specs:
+ remote_specs = list(utils._deduplicate(remote_specs + self.project_remote_specs[project]))
+
+ project_remotes = []
+
+ for remote_spec in remote_specs:
+ # Errors are already handled in the loop above,
+ # skip unreachable remotes here.
+ if remote_spec.url not in remotes:
+ continue
+
+ remote = remotes[remote_spec.url]
+ project_remotes.append(remote)
+
+ self._remotes[project] = project_remotes
+
+ # has_fetch_remotes():
+ #
+ # Check whether any remote repositories are available for fetching.
+ #
+ # Args:
+ # plugin (Plugin): The Plugin to check
+ #
+ # Returns: True if any remote repositories are configured, False otherwise
+ #
+ def has_fetch_remotes(self, *, plugin=None):
+ if not self._has_fetch_remotes:
+ # No project has fetch remotes
+ return False
+ elif plugin is None:
+ # At least one (sub)project has fetch remotes
+ return True
+ else:
+ # Check whether the specified element's project has fetch remotes
+ remotes_for_project = self._remotes[plugin._get_project()]
+ return bool(remotes_for_project)
+
+ # has_push_remotes():
+ #
+ # Check whether any remote repositories are available for pushing.
+ #
+ # Args:
+ # element (Element): The Element to check
+ #
+ # Returns: True if any remote repository is configured, False otherwise
+ #
+ def has_push_remotes(self, *, plugin=None):
+ if not self._has_push_remotes:
+ # No project has push remotes
+ return False
+ elif plugin is None:
+ # At least one (sub)project has push remotes
+ return True
+ else:
+ # Check whether the specified element's project has push remotes
+ remotes_for_project = self._remotes[plugin._get_project()]
+ return any(remote.spec.push for remote in remotes_for_project)
+
+ ################################################
+ # Local Private Methods #
+ ################################################
+
+ # _message()
+ #
+ # Local message propagator
+ #
+ def _message(self, message_type, message, **kwargs):
+ args = dict(kwargs)
+ self.context.message(
+ Message(None, message_type, message, **args))
+
+ # _set_remotes():
+ #
+ # Set the list of remote caches. If project is None, the global list of
+ # remote caches will be set, which is used by all projects. If a project is
+ # specified, the per-project list of remote caches will be set.
+ #
+ # Args:
+ # remote_specs (list): List of ArtifactCacheSpec instances, in priority order.
+ # project (Project): The Project instance for project-specific remotes
+ def _set_remotes(self, remote_specs, *, project=None):
+ if project is None:
+ # global remotes
+ self.global_remote_specs = remote_specs
+ else:
+ self.project_remote_specs[project] = remote_specs
+
+ # _initialize_remotes()
+ #
+ # An internal wrapper which calls the abstract method and
+ # reports takes care of messaging
+ #
+ def _initialize_remotes(self):
+ def remote_failed(url, error):
+ self._message(MessageType.WARN, "Failed to initialize remote {}: {}".format(url, error))
+
+ with self.context.timed_activity("Initializing remote caches", silent_nested=True):
+ self.initialize_remotes(on_failure=remote_failed)
+
+ # _list_refs_mtimes()
+ #
+ # List refs in a directory, given a base path. Also returns the
+ # associated mtimes
+ #
+ # Args:
+ # base_path (str): Base path to traverse over
+ # glob_expr (str|None): Optional glob expression to match against files
+ #
+ # Returns:
+ # (iter (mtime, filename)]): iterator of tuples of mtime and refs
+ #
+ def _list_refs_mtimes(self, base_path, *, glob_expr=None):
+ path = base_path
+ if glob_expr is not None:
+ globdir = os.path.dirname(glob_expr)
+ if not any(c in "*?[" for c in globdir):
+ # path prefix contains no globbing characters so
+ # append the glob to optimise the os.walk()
+ path = os.path.join(base_path, globdir)
+
+ for root, _, files in os.walk(path):
+ for filename in files:
+ ref_path = os.path.join(root, filename)
+ relative_path = os.path.relpath(ref_path, base_path) # Relative to refs head
+ if not glob_expr or fnmatch(relative_path, glob_expr):
+ # Obtain the mtime (the time a file was last modified)
+ yield (os.path.getmtime(ref_path), relative_path)
diff --git a/src/buildstream/_cachekey.py b/src/buildstream/_cachekey.py
new file mode 100644
index 000000000..e56b582fa
--- /dev/null
+++ b/src/buildstream/_cachekey.py
@@ -0,0 +1,68 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+
+import hashlib
+
+import ujson
+
+from . import _yaml
+
+# Internal record of the size of a cache key
+_CACHEKEY_SIZE = len(hashlib.sha256().hexdigest())
+
+
+# Hex digits
+_HEX_DIGITS = "0123456789abcdef"
+
+
+# is_key()
+#
+# Check if the passed in string *could be* a cache key. This basically checks
+# that the length matches a sha256 hex digest, and that the string does not
+# contain any non-hex characters and is fully lower case.
+#
+# Args:
+# key (str): The string to check
+#
+# Returns:
+# (bool): Whether or not `key` could be a cache key
+#
+def is_key(key):
+ if len(key) != _CACHEKEY_SIZE:
+ return False
+ return not any(ch not in _HEX_DIGITS for ch in key)
+
+
+# generate_key()
+#
+# Generate an sha256 hex digest from the given value. The value
+# can be a simple value or recursive dictionary with lists etc,
+# anything simple enough to serialize.
+#
+# Args:
+# value: A value to get a key for
+#
+# Returns:
+# (str): An sha256 hex digest of the given value
+#
+def generate_key(value):
+ ordered = _yaml.node_sanitize(value)
+ ustring = ujson.dumps(ordered, sort_keys=True, escape_forward_slashes=False).encode('utf-8')
+ return hashlib.sha256(ustring).hexdigest()
diff --git a/src/buildstream/_cas/__init__.py b/src/buildstream/_cas/__init__.py
new file mode 100644
index 000000000..46bd9567f
--- /dev/null
+++ b/src/buildstream/_cas/__init__.py
@@ -0,0 +1,21 @@
+#
+# Copyright (C) 2017-2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .cascache import CASCache, CASQuota, CASCacheUsage
+from .casremote import CASRemote, CASRemoteSpec
diff --git a/src/buildstream/_cas/cascache.py b/src/buildstream/_cas/cascache.py
new file mode 100644
index 000000000..ad8013d18
--- /dev/null
+++ b/src/buildstream/_cas/cascache.py
@@ -0,0 +1,1462 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+import hashlib
+import itertools
+import os
+import stat
+import errno
+import uuid
+import contextlib
+
+import grpc
+
+from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
+from .._protos.buildstream.v2 import buildstream_pb2
+
+from .. import utils
+from .._exceptions import CASCacheError, LoadError, LoadErrorReason
+from .._message import Message, MessageType
+
+from .casremote import BlobNotFound, _CASBatchRead, _CASBatchUpdate
+
+_BUFFER_SIZE = 65536
+
+
+CACHE_SIZE_FILE = "cache_size"
+
+
+# CASCacheUsage
+#
+# A simple object to report the current CAS cache usage details.
+#
+# Note that this uses the user configured cache quota
+# rather than the internal quota with protective headroom
+# removed, to provide a more sensible value to display to
+# the user.
+#
+# Args:
+# cas (CASQuota): The CAS cache to get the status of
+#
+class CASCacheUsage():
+
+ def __init__(self, casquota):
+ self.quota_config = casquota._config_cache_quota # Configured quota
+ self.quota_size = casquota._cache_quota_original # Resolved cache quota in bytes
+ self.used_size = casquota.get_cache_size() # Size used by artifacts in bytes
+ self.used_percent = 0 # Percentage of the quota used
+ if self.quota_size is not None:
+ self.used_percent = int(self.used_size * 100 / self.quota_size)
+
+ # Formattable into a human readable string
+ #
+ def __str__(self):
+ return "{} / {} ({}%)" \
+ .format(utils._pretty_size(self.used_size, dec_places=1),
+ self.quota_config,
+ self.used_percent)
+
+
+# A CASCache manages a CAS repository as specified in the Remote Execution API.
+#
+# Args:
+# path (str): The root directory for the CAS repository
+# cache_quota (int): User configured cache quota
+#
+class CASCache():
+
+ def __init__(self, path):
+ self.casdir = os.path.join(path, 'cas')
+ self.tmpdir = os.path.join(path, 'tmp')
+ os.makedirs(os.path.join(self.casdir, 'refs', 'heads'), exist_ok=True)
+ os.makedirs(os.path.join(self.casdir, 'objects'), exist_ok=True)
+ os.makedirs(self.tmpdir, exist_ok=True)
+
+ self.__reachable_directory_callbacks = []
+ self.__reachable_digest_callbacks = []
+
+ # preflight():
+ #
+ # Preflight check.
+ #
+ def preflight(self):
+ headdir = os.path.join(self.casdir, 'refs', 'heads')
+ objdir = os.path.join(self.casdir, 'objects')
+ if not (os.path.isdir(headdir) and os.path.isdir(objdir)):
+ raise CASCacheError("CAS repository check failed for '{}'".format(self.casdir))
+
+ # contains():
+ #
+ # Check whether the specified ref is already available in the local CAS cache.
+ #
+ # Args:
+ # ref (str): The ref to check
+ #
+ # Returns: True if the ref is in the cache, False otherwise
+ #
+ def contains(self, ref):
+ refpath = self._refpath(ref)
+
+ # This assumes that the repository doesn't have any dangling pointers
+ return os.path.exists(refpath)
+
+ # contains_directory():
+ #
+ # Check whether the specified directory and subdirecotires are in the cache,
+ # i.e non dangling.
+ #
+ # Args:
+ # digest (Digest): The directory digest to check
+ # with_files (bool): Whether to check files as well
+ #
+ # Returns: True if the directory is available in the local cache
+ #
+ def contains_directory(self, digest, *, with_files):
+ try:
+ directory = remote_execution_pb2.Directory()
+ with open(self.objpath(digest), 'rb') as f:
+ directory.ParseFromString(f.read())
+
+ # Optionally check presence of files
+ if with_files:
+ for filenode in directory.files:
+ if not os.path.exists(self.objpath(filenode.digest)):
+ return False
+
+ # Check subdirectories
+ for dirnode in directory.directories:
+ if not self.contains_directory(dirnode.digest, with_files=with_files):
+ return False
+
+ return True
+ except FileNotFoundError:
+ return False
+
+ # checkout():
+ #
+ # Checkout the specified directory digest.
+ #
+ # Args:
+ # dest (str): The destination path
+ # tree (Digest): The directory digest to extract
+ # can_link (bool): Whether we can create hard links in the destination
+ #
+ def checkout(self, dest, tree, *, can_link=False):
+ os.makedirs(dest, exist_ok=True)
+
+ directory = remote_execution_pb2.Directory()
+
+ with open(self.objpath(tree), 'rb') as f:
+ directory.ParseFromString(f.read())
+
+ for filenode in directory.files:
+ # regular file, create hardlink
+ fullpath = os.path.join(dest, filenode.name)
+ if can_link:
+ utils.safe_link(self.objpath(filenode.digest), fullpath)
+ else:
+ utils.safe_copy(self.objpath(filenode.digest), fullpath)
+
+ if filenode.is_executable:
+ os.chmod(fullpath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
+ stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
+
+ for dirnode in directory.directories:
+ fullpath = os.path.join(dest, dirnode.name)
+ self.checkout(fullpath, dirnode.digest, can_link=can_link)
+
+ for symlinknode in directory.symlinks:
+ # symlink
+ fullpath = os.path.join(dest, symlinknode.name)
+ os.symlink(symlinknode.target, fullpath)
+
+ # commit():
+ #
+ # Commit directory to cache.
+ #
+ # Args:
+ # refs (list): The refs to set
+ # path (str): The directory to import
+ #
+ def commit(self, refs, path):
+ tree = self._commit_directory(path)
+
+ for ref in refs:
+ self.set_ref(ref, tree)
+
+ # diff():
+ #
+ # Return a list of files that have been added or modified between
+ # the refs described by ref_a and ref_b.
+ #
+ # Args:
+ # ref_a (str): The first ref
+ # ref_b (str): The second ref
+ # subdir (str): A subdirectory to limit the comparison to
+ #
+ def diff(self, ref_a, ref_b):
+ tree_a = self.resolve_ref(ref_a)
+ tree_b = self.resolve_ref(ref_b)
+
+ added = []
+ removed = []
+ modified = []
+
+ self.diff_trees(tree_a, tree_b, added=added, removed=removed, modified=modified)
+
+ return modified, removed, added
+
+ # pull():
+ #
+ # Pull a ref from a remote repository.
+ #
+ # Args:
+ # ref (str): The ref to pull
+ # remote (CASRemote): The remote repository to pull from
+ #
+ # Returns:
+ # (bool): True if pull was successful, False if ref was not available
+ #
+ def pull(self, ref, remote):
+ try:
+ remote.init()
+
+ request = buildstream_pb2.GetReferenceRequest(instance_name=remote.spec.instance_name)
+ request.key = ref
+ response = remote.ref_storage.GetReference(request)
+
+ tree = response.digest
+
+ # Fetch Directory objects
+ self._fetch_directory(remote, tree)
+
+ # Fetch files, excluded_subdirs determined in pullqueue
+ required_blobs = self.required_blobs_for_directory(tree)
+ missing_blobs = self.local_missing_blobs(required_blobs)
+ if missing_blobs:
+ self.fetch_blobs(remote, missing_blobs)
+
+ self.set_ref(ref, tree)
+
+ return True
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ raise CASCacheError("Failed to pull ref {}: {}".format(ref, e)) from e
+ else:
+ return False
+ except BlobNotFound as e:
+ return False
+
+ # pull_tree():
+ #
+ # Pull a single Tree rather than a ref.
+ # Does not update local refs.
+ #
+ # Args:
+ # remote (CASRemote): The remote to pull from
+ # digest (Digest): The digest of the tree
+ #
+ def pull_tree(self, remote, digest):
+ try:
+ remote.init()
+
+ digest = self._fetch_tree(remote, digest)
+
+ return digest
+
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ raise
+
+ return None
+
+ # link_ref():
+ #
+ # Add an alias for an existing ref.
+ #
+ # Args:
+ # oldref (str): An existing ref
+ # newref (str): A new ref for the same directory
+ #
+ def link_ref(self, oldref, newref):
+ tree = self.resolve_ref(oldref)
+
+ self.set_ref(newref, tree)
+
+ # push():
+ #
+ # Push committed refs to remote repository.
+ #
+ # Args:
+ # refs (list): The refs to push
+ # remote (CASRemote): The remote to push to
+ #
+ # Returns:
+ # (bool): True if any remote was updated, False if no pushes were required
+ #
+ # Raises:
+ # (CASCacheError): if there was an error
+ #
+ def push(self, refs, remote):
+ skipped_remote = True
+ try:
+ for ref in refs:
+ tree = self.resolve_ref(ref)
+
+ # Check whether ref is already on the server in which case
+ # there is no need to push the ref
+ try:
+ request = buildstream_pb2.GetReferenceRequest(instance_name=remote.spec.instance_name)
+ request.key = ref
+ response = remote.ref_storage.GetReference(request)
+
+ if response.digest.hash == tree.hash and response.digest.size_bytes == tree.size_bytes:
+ # ref is already on the server with the same tree
+ continue
+
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ # Intentionally re-raise RpcError for outer except block.
+ raise
+
+ self._send_directory(remote, tree)
+
+ request = buildstream_pb2.UpdateReferenceRequest(instance_name=remote.spec.instance_name)
+ request.keys.append(ref)
+ request.digest.CopyFrom(tree)
+ remote.ref_storage.UpdateReference(request)
+
+ skipped_remote = False
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
+ raise CASCacheError("Failed to push ref {}: {}".format(refs, e), temporary=True) from e
+
+ return not skipped_remote
+
+ # objpath():
+ #
+ # Return the path of an object based on its digest.
+ #
+ # Args:
+ # digest (Digest): The digest of the object
+ #
+ # Returns:
+ # (str): The path of the object
+ #
+ def objpath(self, digest):
+ return os.path.join(self.casdir, 'objects', digest.hash[:2], digest.hash[2:])
+
+ # add_object():
+ #
+ # Hash and write object to CAS.
+ #
+ # Args:
+ # digest (Digest): An optional Digest object to populate
+ # path (str): Path to file to add
+ # buffer (bytes): Byte buffer to add
+ # link_directly (bool): Whether file given by path can be linked
+ #
+ # Returns:
+ # (Digest): The digest of the added object
+ #
+ # Either `path` or `buffer` must be passed, but not both.
+ #
+ def add_object(self, *, digest=None, path=None, buffer=None, link_directly=False):
+ # Exactly one of the two parameters has to be specified
+ assert (path is None) != (buffer is None)
+
+ if digest is None:
+ digest = remote_execution_pb2.Digest()
+
+ try:
+ h = hashlib.sha256()
+ # Always write out new file to avoid corruption if input file is modified
+ with contextlib.ExitStack() as stack:
+ if path is not None and link_directly:
+ tmp = stack.enter_context(open(path, 'rb'))
+ for chunk in iter(lambda: tmp.read(_BUFFER_SIZE), b""):
+ h.update(chunk)
+ else:
+ tmp = stack.enter_context(self._temporary_object())
+
+ if path:
+ with open(path, 'rb') as f:
+ for chunk in iter(lambda: f.read(_BUFFER_SIZE), b""):
+ h.update(chunk)
+ tmp.write(chunk)
+ else:
+ h.update(buffer)
+ tmp.write(buffer)
+
+ tmp.flush()
+
+ digest.hash = h.hexdigest()
+ digest.size_bytes = os.fstat(tmp.fileno()).st_size
+
+ # Place file at final location
+ objpath = self.objpath(digest)
+ os.makedirs(os.path.dirname(objpath), exist_ok=True)
+ os.link(tmp.name, objpath)
+
+ except FileExistsError as e:
+ # We can ignore the failed link() if the object is already in the repo.
+ pass
+
+ except OSError as e:
+ raise CASCacheError("Failed to hash object: {}".format(e)) from e
+
+ return digest
+
+ # set_ref():
+ #
+ # Create or replace a ref.
+ #
+ # Args:
+ # ref (str): The name of the ref
+ #
+ def set_ref(self, ref, tree):
+ refpath = self._refpath(ref)
+ os.makedirs(os.path.dirname(refpath), exist_ok=True)
+ with utils.save_file_atomic(refpath, 'wb', tempdir=self.tmpdir) as f:
+ f.write(tree.SerializeToString())
+
+ # resolve_ref():
+ #
+ # Resolve a ref to a digest.
+ #
+ # Args:
+ # ref (str): The name of the ref
+ # update_mtime (bool): Whether to update the mtime of the ref
+ #
+ # Returns:
+ # (Digest): The digest stored in the ref
+ #
+ def resolve_ref(self, ref, *, update_mtime=False):
+ refpath = self._refpath(ref)
+
+ try:
+ with open(refpath, 'rb') as f:
+ if update_mtime:
+ os.utime(refpath)
+
+ digest = remote_execution_pb2.Digest()
+ digest.ParseFromString(f.read())
+ return digest
+
+ except FileNotFoundError as e:
+ raise CASCacheError("Attempt to access unavailable ref: {}".format(e)) from e
+
+ # update_mtime()
+ #
+ # Update the mtime of a ref.
+ #
+ # Args:
+ # ref (str): The ref to update
+ #
+ def update_mtime(self, ref):
+ try:
+ os.utime(self._refpath(ref))
+ except FileNotFoundError as e:
+ raise CASCacheError("Attempt to access unavailable ref: {}".format(e)) from e
+
+ # list_objects():
+ #
+ # List cached objects in Least Recently Modified (LRM) order.
+ #
+ # Returns:
+ # (list) - A list of objects and timestamps in LRM order
+ #
+ def list_objects(self):
+ objs = []
+ mtimes = []
+
+ for root, _, files in os.walk(os.path.join(self.casdir, 'objects')):
+ for filename in files:
+ obj_path = os.path.join(root, filename)
+ try:
+ mtimes.append(os.path.getmtime(obj_path))
+ except FileNotFoundError:
+ pass
+ else:
+ objs.append(obj_path)
+
+ # NOTE: Sorted will sort from earliest to latest, thus the
+ # first element of this list will be the file modified earliest.
+ return sorted(zip(mtimes, objs))
+
+ def clean_up_refs_until(self, time):
+ ref_heads = os.path.join(self.casdir, 'refs', 'heads')
+
+ for root, _, files in os.walk(ref_heads):
+ for filename in files:
+ ref_path = os.path.join(root, filename)
+ # Obtain the mtime (the time a file was last modified)
+ if os.path.getmtime(ref_path) < time:
+ os.unlink(ref_path)
+
+ # remove():
+ #
+ # Removes the given symbolic ref from the repo.
+ #
+ # Args:
+ # ref (str): A symbolic ref
+ # basedir (str): Path of base directory the ref is in, defaults to
+ # CAS refs heads
+ # defer_prune (bool): Whether to defer pruning to the caller. NOTE:
+ # The space won't be freed until you manually
+ # call prune.
+ #
+ # Returns:
+ # (int|None) The amount of space pruned from the repository in
+ # Bytes, or None if defer_prune is True
+ #
+ def remove(self, ref, *, basedir=None, defer_prune=False):
+
+ if basedir is None:
+ basedir = os.path.join(self.casdir, 'refs', 'heads')
+ # Remove cache ref
+ self._remove_ref(ref, basedir)
+
+ if not defer_prune:
+ pruned = self.prune()
+ return pruned
+
+ return None
+
+ # adds callback of iterator over reachable directory digests
+ def add_reachable_directories_callback(self, callback):
+ self.__reachable_directory_callbacks.append(callback)
+
+ # adds callbacks of iterator over reachable file digests
+ def add_reachable_digests_callback(self, callback):
+ self.__reachable_digest_callbacks.append(callback)
+
+ # prune():
+ #
+ # Prune unreachable objects from the repo.
+ #
+ def prune(self):
+ ref_heads = os.path.join(self.casdir, 'refs', 'heads')
+
+ pruned = 0
+ reachable = set()
+
+ # Check which objects are reachable
+ for root, _, files in os.walk(ref_heads):
+ for filename in files:
+ ref_path = os.path.join(root, filename)
+ ref = os.path.relpath(ref_path, ref_heads)
+
+ tree = self.resolve_ref(ref)
+ self._reachable_refs_dir(reachable, tree)
+
+ # check callback directory digests that are reachable
+ for digest_callback in self.__reachable_directory_callbacks:
+ for digest in digest_callback():
+ self._reachable_refs_dir(reachable, digest)
+
+ # check callback file digests that are reachable
+ for digest_callback in self.__reachable_digest_callbacks:
+ for digest in digest_callback():
+ reachable.add(digest.hash)
+
+ # Prune unreachable objects
+ for root, _, files in os.walk(os.path.join(self.casdir, 'objects')):
+ for filename in files:
+ objhash = os.path.basename(root) + filename
+ if objhash not in reachable:
+ obj_path = os.path.join(root, filename)
+ pruned += os.stat(obj_path).st_size
+ os.unlink(obj_path)
+
+ return pruned
+
+ def update_tree_mtime(self, tree):
+ reachable = set()
+ self._reachable_refs_dir(reachable, tree, update_mtime=True)
+
+ # remote_missing_blobs_for_directory():
+ #
+ # Determine which blobs of a directory tree are missing on the remote.
+ #
+ # Args:
+ # digest (Digest): The directory digest
+ #
+ # Returns: List of missing Digest objects
+ #
+ def remote_missing_blobs_for_directory(self, remote, digest):
+ required_blobs = self.required_blobs_for_directory(digest)
+
+ return self.remote_missing_blobs(remote, required_blobs)
+
+ # remote_missing_blobs():
+ #
+ # Determine which blobs are missing on the remote.
+ #
+ # Args:
+ # blobs (Digest): The directory digest
+ #
+ # Returns: List of missing Digest objects
+ #
+ def remote_missing_blobs(self, remote, blobs):
+ missing_blobs = dict()
+ # Limit size of FindMissingBlobs request
+ for required_blobs_group in _grouper(blobs, 512):
+ request = remote_execution_pb2.FindMissingBlobsRequest(instance_name=remote.spec.instance_name)
+
+ for required_digest in required_blobs_group:
+ d = request.blob_digests.add()
+ d.CopyFrom(required_digest)
+
+ response = remote.cas.FindMissingBlobs(request)
+ for missing_digest in response.missing_blob_digests:
+ d = remote_execution_pb2.Digest()
+ d.CopyFrom(missing_digest)
+ missing_blobs[d.hash] = d
+
+ return missing_blobs.values()
+
+ # local_missing_blobs():
+ #
+ # Check local cache for missing blobs.
+ #
+ # Args:
+ # digests (list): The Digests of blobs to check
+ #
+ # Returns: Missing Digest objects
+ #
+ def local_missing_blobs(self, digests):
+ missing_blobs = []
+ for digest in digests:
+ objpath = self.objpath(digest)
+ if not os.path.exists(objpath):
+ missing_blobs.append(digest)
+ return missing_blobs
+
+ # required_blobs_for_directory():
+ #
+ # Generator that returns the Digests of all blobs in the tree specified by
+ # the Digest of the toplevel Directory object.
+ #
+ def required_blobs_for_directory(self, directory_digest, *, excluded_subdirs=None):
+ if not excluded_subdirs:
+ excluded_subdirs = []
+
+ # parse directory, and recursively add blobs
+
+ yield directory_digest
+
+ directory = remote_execution_pb2.Directory()
+
+ with open(self.objpath(directory_digest), 'rb') as f:
+ directory.ParseFromString(f.read())
+
+ for filenode in directory.files:
+ yield filenode.digest
+
+ for dirnode in directory.directories:
+ if dirnode.name not in excluded_subdirs:
+ yield from self.required_blobs_for_directory(dirnode.digest)
+
+ def diff_trees(self, tree_a, tree_b, *, added, removed, modified, path=""):
+ dir_a = remote_execution_pb2.Directory()
+ dir_b = remote_execution_pb2.Directory()
+
+ if tree_a:
+ with open(self.objpath(tree_a), 'rb') as f:
+ dir_a.ParseFromString(f.read())
+ if tree_b:
+ with open(self.objpath(tree_b), 'rb') as f:
+ dir_b.ParseFromString(f.read())
+
+ a = 0
+ b = 0
+ while a < len(dir_a.files) or b < len(dir_b.files):
+ if b < len(dir_b.files) and (a >= len(dir_a.files) or
+ dir_a.files[a].name > dir_b.files[b].name):
+ added.append(os.path.join(path, dir_b.files[b].name))
+ b += 1
+ elif a < len(dir_a.files) and (b >= len(dir_b.files) or
+ dir_b.files[b].name > dir_a.files[a].name):
+ removed.append(os.path.join(path, dir_a.files[a].name))
+ a += 1
+ else:
+ # File exists in both directories
+ if dir_a.files[a].digest.hash != dir_b.files[b].digest.hash:
+ modified.append(os.path.join(path, dir_a.files[a].name))
+ a += 1
+ b += 1
+
+ a = 0
+ b = 0
+ while a < len(dir_a.directories) or b < len(dir_b.directories):
+ if b < len(dir_b.directories) and (a >= len(dir_a.directories) or
+ dir_a.directories[a].name > dir_b.directories[b].name):
+ self.diff_trees(None, dir_b.directories[b].digest,
+ added=added, removed=removed, modified=modified,
+ path=os.path.join(path, dir_b.directories[b].name))
+ b += 1
+ elif a < len(dir_a.directories) and (b >= len(dir_b.directories) or
+ dir_b.directories[b].name > dir_a.directories[a].name):
+ self.diff_trees(dir_a.directories[a].digest, None,
+ added=added, removed=removed, modified=modified,
+ path=os.path.join(path, dir_a.directories[a].name))
+ a += 1
+ else:
+ # Subdirectory exists in both directories
+ if dir_a.directories[a].digest.hash != dir_b.directories[b].digest.hash:
+ self.diff_trees(dir_a.directories[a].digest, dir_b.directories[b].digest,
+ added=added, removed=removed, modified=modified,
+ path=os.path.join(path, dir_a.directories[a].name))
+ a += 1
+ b += 1
+
+ ################################################
+ # Local Private Methods #
+ ################################################
+
+ def _refpath(self, ref):
+ return os.path.join(self.casdir, 'refs', 'heads', ref)
+
+ # _remove_ref()
+ #
+ # Removes a ref.
+ #
+ # This also takes care of pruning away directories which can
+ # be removed after having removed the given ref.
+ #
+ # Args:
+ # ref (str): The ref to remove
+ # basedir (str): Path of base directory the ref is in
+ #
+ # Raises:
+ # (CASCacheError): If the ref didnt exist, or a system error
+ # occurred while removing it
+ #
+ def _remove_ref(self, ref, basedir):
+
+ # Remove the ref itself
+ refpath = os.path.join(basedir, ref)
+
+ try:
+ os.unlink(refpath)
+ except FileNotFoundError as e:
+ raise CASCacheError("Could not find ref '{}'".format(ref)) from e
+
+ # Now remove any leading directories
+
+ components = list(os.path.split(ref))
+ while components:
+ components.pop()
+ refdir = os.path.join(basedir, *components)
+
+ # Break out once we reach the base
+ if refdir == basedir:
+ break
+
+ try:
+ os.rmdir(refdir)
+ except FileNotFoundError:
+ # The parent directory did not exist, but it's
+ # parent directory might still be ready to prune
+ pass
+ except OSError as e:
+ if e.errno == errno.ENOTEMPTY:
+ # The parent directory was not empty, so we
+ # cannot prune directories beyond this point
+ break
+
+ # Something went wrong here
+ raise CASCacheError("System error while removing ref '{}': {}".format(ref, e)) from e
+
+ # _commit_directory():
+ #
+ # Adds local directory to content addressable store.
+ #
+ # Adds files, symbolic links and recursively other directories in
+ # a local directory to the content addressable store.
+ #
+ # Args:
+ # path (str): Path to the directory to add.
+ # dir_digest (Digest): An optional Digest object to use.
+ #
+ # Returns:
+ # (Digest): Digest object for the directory added.
+ #
+ def _commit_directory(self, path, *, dir_digest=None):
+ directory = remote_execution_pb2.Directory()
+
+ for name in sorted(os.listdir(path)):
+ full_path = os.path.join(path, name)
+ mode = os.lstat(full_path).st_mode
+ if stat.S_ISDIR(mode):
+ dirnode = directory.directories.add()
+ dirnode.name = name
+ self._commit_directory(full_path, dir_digest=dirnode.digest)
+ elif stat.S_ISREG(mode):
+ filenode = directory.files.add()
+ filenode.name = name
+ self.add_object(path=full_path, digest=filenode.digest)
+ filenode.is_executable = (mode & stat.S_IXUSR) == stat.S_IXUSR
+ elif stat.S_ISLNK(mode):
+ symlinknode = directory.symlinks.add()
+ symlinknode.name = name
+ symlinknode.target = os.readlink(full_path)
+ elif stat.S_ISSOCK(mode):
+ # The process serving the socket can't be cached anyway
+ pass
+ else:
+ raise CASCacheError("Unsupported file type for {}".format(full_path))
+
+ return self.add_object(digest=dir_digest,
+ buffer=directory.SerializeToString())
+
+ def _get_subdir(self, tree, subdir):
+ head, name = os.path.split(subdir)
+ if head:
+ tree = self._get_subdir(tree, head)
+
+ directory = remote_execution_pb2.Directory()
+
+ with open(self.objpath(tree), 'rb') as f:
+ directory.ParseFromString(f.read())
+
+ for dirnode in directory.directories:
+ if dirnode.name == name:
+ return dirnode.digest
+
+ raise CASCacheError("Subdirectory {} not found".format(name))
+
+ def _reachable_refs_dir(self, reachable, tree, update_mtime=False, check_exists=False):
+ if tree.hash in reachable:
+ return
+ try:
+ if update_mtime:
+ os.utime(self.objpath(tree))
+
+ reachable.add(tree.hash)
+
+ directory = remote_execution_pb2.Directory()
+
+ with open(self.objpath(tree), 'rb') as f:
+ directory.ParseFromString(f.read())
+
+ except FileNotFoundError:
+ # Just exit early if the file doesn't exist
+ return
+
+ for filenode in directory.files:
+ if update_mtime:
+ os.utime(self.objpath(filenode.digest))
+ if check_exists:
+ if not os.path.exists(self.objpath(filenode.digest)):
+ raise FileNotFoundError
+ reachable.add(filenode.digest.hash)
+
+ for dirnode in directory.directories:
+ self._reachable_refs_dir(reachable, dirnode.digest, update_mtime=update_mtime, check_exists=check_exists)
+
+ # _temporary_object():
+ #
+ # Returns:
+ # (file): A file object to a named temporary file.
+ #
+ # Create a named temporary file with 0o0644 access rights.
+ @contextlib.contextmanager
+ def _temporary_object(self):
+ with utils._tempnamedfile(dir=self.tmpdir) as f:
+ os.chmod(f.name,
+ stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
+ yield f
+
+ # _ensure_blob():
+ #
+ # Fetch and add blob if it's not already local.
+ #
+ # Args:
+ # remote (Remote): The remote to use.
+ # digest (Digest): Digest object for the blob to fetch.
+ #
+ # Returns:
+ # (str): The path of the object
+ #
+ def _ensure_blob(self, remote, digest):
+ objpath = self.objpath(digest)
+ if os.path.exists(objpath):
+ # already in local repository
+ return objpath
+
+ with self._temporary_object() as f:
+ remote._fetch_blob(digest, f)
+
+ added_digest = self.add_object(path=f.name, link_directly=True)
+ assert added_digest.hash == digest.hash
+
+ return objpath
+
+ def _batch_download_complete(self, batch, *, missing_blobs=None):
+ for digest, data in batch.send(missing_blobs=missing_blobs):
+ with self._temporary_object() as f:
+ f.write(data)
+ f.flush()
+
+ added_digest = self.add_object(path=f.name, link_directly=True)
+ assert added_digest.hash == digest.hash
+
+ # Helper function for _fetch_directory().
+ def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue):
+ self._batch_download_complete(batch)
+
+ # All previously scheduled directories are now locally available,
+ # move them to the processing queue.
+ fetch_queue.extend(fetch_next_queue)
+ fetch_next_queue.clear()
+ return _CASBatchRead(remote)
+
+ # Helper function for _fetch_directory().
+ def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False):
+ in_local_cache = os.path.exists(self.objpath(digest))
+
+ if in_local_cache:
+ # Skip download, already in local cache.
+ pass
+ elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
+ not remote.batch_read_supported):
+ # Too large for batch request, download in independent request.
+ self._ensure_blob(remote, digest)
+ in_local_cache = True
+ else:
+ if not batch.add(digest):
+ # Not enough space left in batch request.
+ # Complete pending batch first.
+ batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
+ batch.add(digest)
+
+ if recursive:
+ if in_local_cache:
+ # Add directory to processing queue.
+ fetch_queue.append(digest)
+ else:
+ # Directory will be available after completing pending batch.
+ # Add directory to deferred processing queue.
+ fetch_next_queue.append(digest)
+
+ return batch
+
+ # _fetch_directory():
+ #
+ # Fetches remote directory and adds it to content addressable store.
+ #
+ # This recursively fetches directory objects but doesn't fetch any
+ # files.
+ #
+ # Args:
+ # remote (Remote): The remote to use.
+ # dir_digest (Digest): Digest object for the directory to fetch.
+ #
+ def _fetch_directory(self, remote, dir_digest):
+ # TODO Use GetTree() if the server supports it
+
+ fetch_queue = [dir_digest]
+ fetch_next_queue = []
+ batch = _CASBatchRead(remote)
+
+ while len(fetch_queue) + len(fetch_next_queue) > 0:
+ if not fetch_queue:
+ batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
+
+ dir_digest = fetch_queue.pop(0)
+
+ objpath = self._ensure_blob(remote, dir_digest)
+
+ directory = remote_execution_pb2.Directory()
+ with open(objpath, 'rb') as f:
+ directory.ParseFromString(f.read())
+
+ for dirnode in directory.directories:
+ batch = self._fetch_directory_node(remote, dirnode.digest, batch,
+ fetch_queue, fetch_next_queue, recursive=True)
+
+ # Fetch final batch
+ self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
+
+ def _fetch_tree(self, remote, digest):
+ # download but do not store the Tree object
+ with utils._tempnamedfile(dir=self.tmpdir) as out:
+ remote._fetch_blob(digest, out)
+
+ tree = remote_execution_pb2.Tree()
+
+ with open(out.name, 'rb') as f:
+ tree.ParseFromString(f.read())
+
+ tree.children.extend([tree.root])
+ for directory in tree.children:
+ dirbuffer = directory.SerializeToString()
+ dirdigest = self.add_object(buffer=dirbuffer)
+ assert dirdigest.size_bytes == len(dirbuffer)
+
+ return dirdigest
+
+ # fetch_blobs():
+ #
+ # Fetch blobs from remote CAS. Returns missing blobs that could not be fetched.
+ #
+ # Args:
+ # remote (CASRemote): The remote repository to fetch from
+ # digests (list): The Digests of blobs to fetch
+ #
+ # Returns: The Digests of the blobs that were not available on the remote CAS
+ #
+ def fetch_blobs(self, remote, digests):
+ missing_blobs = []
+
+ batch = _CASBatchRead(remote)
+
+ for digest in digests:
+ if (digest.size_bytes >= remote.max_batch_total_size_bytes or
+ not remote.batch_read_supported):
+ # Too large for batch request, download in independent request.
+ try:
+ self._ensure_blob(remote, digest)
+ except grpc.RpcError as e:
+ if e.code() == grpc.StatusCode.NOT_FOUND:
+ missing_blobs.append(digest)
+ else:
+ raise CASCacheError("Failed to fetch blob: {}".format(e)) from e
+ else:
+ if not batch.add(digest):
+ # Not enough space left in batch request.
+ # Complete pending batch first.
+ self._batch_download_complete(batch, missing_blobs=missing_blobs)
+
+ batch = _CASBatchRead(remote)
+ batch.add(digest)
+
+ # Complete last pending batch
+ self._batch_download_complete(batch, missing_blobs=missing_blobs)
+
+ return missing_blobs
+
+ # send_blobs():
+ #
+ # Upload blobs to remote CAS.
+ #
+ # Args:
+ # remote (CASRemote): The remote repository to upload to
+ # digests (list): The Digests of Blobs to upload
+ #
+ def send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
+ batch = _CASBatchUpdate(remote)
+
+ for digest in digests:
+ with open(self.objpath(digest), 'rb') as f:
+ assert os.fstat(f.fileno()).st_size == digest.size_bytes
+
+ if (digest.size_bytes >= remote.max_batch_total_size_bytes or
+ not remote.batch_update_supported):
+ # Too large for batch request, upload in independent request.
+ remote._send_blob(digest, f, u_uid=u_uid)
+ else:
+ if not batch.add(digest, f):
+ # Not enough space left in batch request.
+ # Complete pending batch first.
+ batch.send()
+ batch = _CASBatchUpdate(remote)
+ batch.add(digest, f)
+
+ # Send final batch
+ batch.send()
+
+ def _send_directory(self, remote, digest, u_uid=uuid.uuid4()):
+ missing_blobs = self.remote_missing_blobs_for_directory(remote, digest)
+
+ # Upload any blobs missing on the server
+ self.send_blobs(remote, missing_blobs, u_uid)
+
+
+class CASQuota:
+ def __init__(self, context):
+ self.context = context
+ self.cas = context.get_cascache()
+ self.casdir = self.cas.casdir
+ self._config_cache_quota = context.config_cache_quota
+ self._config_cache_quota_string = context.config_cache_quota_string
+ self._cache_size = None # The current cache size, sometimes it's an estimate
+ self._cache_quota = None # The cache quota
+ self._cache_quota_original = None # The cache quota as specified by the user, in bytes
+ self._cache_quota_headroom = None # The headroom in bytes before reaching the quota or full disk
+ self._cache_lower_threshold = None # The target cache size for a cleanup
+ self.available_space = None
+
+ self._message = context.message
+
+ self._remove_callbacks = [] # Callbacks to remove unrequired refs and their remove method
+ self._list_refs_callbacks = [] # Callbacks to all refs
+
+ self._calculate_cache_quota()
+
+ # compute_cache_size()
+ #
+ # Computes the real artifact cache size.
+ #
+ # Returns:
+ # (int): The size of the artifact cache.
+ #
+ def compute_cache_size(self):
+ self._cache_size = utils._get_dir_size(self.casdir)
+ return self._cache_size
+
+ # get_cache_size()
+ #
+ # Fetches the cached size of the cache, this is sometimes
+ # an estimate and periodically adjusted to the real size
+ # when a cache size calculation job runs.
+ #
+ # When it is an estimate, the value is either correct, or
+ # it is greater than the actual cache size.
+ #
+ # Returns:
+ # (int) An approximation of the artifact cache size, in bytes.
+ #
+ def get_cache_size(self):
+
+ # If we don't currently have an estimate, figure out the real cache size.
+ if self._cache_size is None:
+ stored_size = self._read_cache_size()
+ if stored_size is not None:
+ self._cache_size = stored_size
+ else:
+ self.compute_cache_size()
+
+ return self._cache_size
+
+ # set_cache_size()
+ #
+ # Forcefully set the overall cache size.
+ #
+ # This is used to update the size in the main process after
+ # having calculated in a cleanup or a cache size calculation job.
+ #
+ # Args:
+ # cache_size (int): The size to set.
+ # write_to_disk (bool): Whether to write the value to disk.
+ #
+ def set_cache_size(self, cache_size, *, write_to_disk=True):
+
+ assert cache_size is not None
+
+ self._cache_size = cache_size
+ if write_to_disk:
+ self._write_cache_size(self._cache_size)
+
+ # full()
+ #
+ # Checks if the artifact cache is full, either
+ # because the user configured quota has been exceeded
+ # or because the underlying disk is almost full.
+ #
+ # Returns:
+ # (bool): True if the artifact cache is full
+ #
+ def full(self):
+
+ if self.get_cache_size() > self._cache_quota:
+ return True
+
+ _, volume_avail = self._get_cache_volume_size()
+ if volume_avail < self._cache_quota_headroom:
+ return True
+
+ return False
+
+ # add_remove_callbacks()
+ #
+ # This adds tuples of iterators over unrequired objects (currently
+ # artifacts and source refs), and a callback to remove them.
+ #
+ # Args:
+ # callback (iter(unrequired), remove): tuple of iterator and remove
+ # method associated.
+ #
+ def add_remove_callbacks(self, list_unrequired, remove_method):
+ self._remove_callbacks.append((list_unrequired, remove_method))
+
+ def add_list_refs_callback(self, list_callback):
+ self._list_refs_callbacks.append(list_callback)
+
+ ################################################
+ # Local Private Methods #
+ ################################################
+
+ # _read_cache_size()
+ #
+ # Reads and returns the size of the artifact cache that's stored in the
+ # cache's size file
+ #
+ # Returns:
+ # (int): The size of the artifact cache, as recorded in the file
+ #
+ def _read_cache_size(self):
+ size_file_path = os.path.join(self.casdir, CACHE_SIZE_FILE)
+
+ if not os.path.exists(size_file_path):
+ return None
+
+ with open(size_file_path, "r") as f:
+ size = f.read()
+
+ try:
+ num_size = int(size)
+ except ValueError as e:
+ raise CASCacheError("Size '{}' parsed from '{}' was not an integer".format(
+ size, size_file_path)) from e
+
+ return num_size
+
+ # _write_cache_size()
+ #
+ # Writes the given size of the artifact to the cache's size file
+ #
+ # Args:
+ # size (int): The size of the artifact cache to record
+ #
+ def _write_cache_size(self, size):
+ assert isinstance(size, int)
+ size_file_path = os.path.join(self.casdir, CACHE_SIZE_FILE)
+ with utils.save_file_atomic(size_file_path, "w", tempdir=self.cas.tmpdir) as f:
+ f.write(str(size))
+
+ # _get_cache_volume_size()
+ #
+ # Get the available space and total space for the volume on
+ # which the artifact cache is located.
+ #
+ # Returns:
+ # (int): The total number of bytes on the volume
+ # (int): The number of available bytes on the volume
+ #
+ # NOTE: We use this stub to allow the test cases
+ # to override what an artifact cache thinks
+ # about it's disk size and available bytes.
+ #
+ def _get_cache_volume_size(self):
+ return utils._get_volume_size(self.casdir)
+
+ # _calculate_cache_quota()
+ #
+ # Calculates and sets the cache quota and lower threshold based on the
+ # quota set in Context.
+ # It checks that the quota is both a valid expression, and that there is
+ # enough disk space to satisfy that quota
+ #
+ def _calculate_cache_quota(self):
+ # Headroom intended to give BuildStream a bit of leeway.
+ # This acts as the minimum size of cache_quota and also
+ # is taken from the user requested cache_quota.
+ #
+ if 'BST_TEST_SUITE' in os.environ:
+ self._cache_quota_headroom = 0
+ else:
+ self._cache_quota_headroom = 2e9
+
+ total_size, available_space = self._get_cache_volume_size()
+ cache_size = self.get_cache_size()
+ self.available_space = available_space
+
+ # Ensure system has enough storage for the cache_quota
+ #
+ # If cache_quota is none, set it to the maximum it could possibly be.
+ #
+ # Also check that cache_quota is at least as large as our headroom.
+ #
+ cache_quota = self._config_cache_quota
+ if cache_quota is None:
+ # The user has set no limit, so we may take all the space.
+ cache_quota = min(cache_size + available_space, total_size)
+ if cache_quota < self._cache_quota_headroom: # Check minimum
+ raise LoadError(
+ LoadErrorReason.INVALID_DATA,
+ "Invalid cache quota ({}): BuildStream requires a minimum cache quota of {}.".format(
+ utils._pretty_size(cache_quota),
+ utils._pretty_size(self._cache_quota_headroom)))
+ elif cache_quota > total_size:
+ # A quota greater than the total disk size is certianly an error
+ raise CASCacheError("Your system does not have enough available " +
+ "space to support the cache quota specified.",
+ detail=("You have specified a quota of {quota} total disk space.\n" +
+ "The filesystem containing {local_cache_path} only " +
+ "has {total_size} total disk space.")
+ .format(
+ quota=self._config_cache_quota,
+ local_cache_path=self.casdir,
+ total_size=utils._pretty_size(total_size)),
+ reason='insufficient-storage-for-quota')
+
+ elif cache_quota > cache_size + available_space:
+ # The quota does not fit in the available space, this is a warning
+ if '%' in self._config_cache_quota_string:
+ available = (available_space / total_size) * 100
+ available = '{}% of total disk space'.format(round(available, 1))
+ else:
+ available = utils._pretty_size(available_space)
+
+ self._message(Message(
+ None,
+ MessageType.WARN,
+ "Your system does not have enough available " +
+ "space to support the cache quota specified.",
+ detail=("You have specified a quota of {quota} total disk space.\n" +
+ "The filesystem containing {local_cache_path} only " +
+ "has {available_size} available.")
+ .format(quota=self._config_cache_quota,
+ local_cache_path=self.casdir,
+ available_size=available)))
+
+ # Place a slight headroom (2e9 (2GB) on the cache_quota) into
+ # cache_quota to try and avoid exceptions.
+ #
+ # Of course, we might still end up running out during a build
+ # if we end up writing more than 2G, but hey, this stuff is
+ # already really fuzzy.
+ #
+ self._cache_quota_original = cache_quota
+ self._cache_quota = cache_quota - self._cache_quota_headroom
+ self._cache_lower_threshold = self._cache_quota / 2
+
+ # clean():
+ #
+ # Clean the artifact cache as much as possible.
+ #
+ # Args:
+ # progress (callable): A callback to call when a ref is removed
+ #
+ # Returns:
+ # (int): The size of the cache after having cleaned up
+ #
+ def clean(self, progress=None):
+ context = self.context
+
+ # Some accumulative statistics
+ removed_ref_count = 0
+ space_saved = 0
+
+ total_refs = 0
+ for refs in self._list_refs_callbacks:
+ total_refs += len(list(refs()))
+
+ # Start off with an announcement with as much info as possible
+ volume_size, volume_avail = self._get_cache_volume_size()
+ self._message(Message(
+ None, MessageType.STATUS, "Starting cache cleanup",
+ detail=("Elements required by the current build plan:\n" + "{}\n" +
+ "User specified quota: {} ({})\n" +
+ "Cache usage: {}\n" +
+ "Cache volume: {} total, {} available")
+ .format(
+ total_refs,
+ context.config_cache_quota,
+ utils._pretty_size(self._cache_quota, dec_places=2),
+ utils._pretty_size(self.get_cache_size(), dec_places=2),
+ utils._pretty_size(volume_size, dec_places=2),
+ utils._pretty_size(volume_avail, dec_places=2))))
+
+ # Do a real computation of the cache size once, just in case
+ self.compute_cache_size()
+ usage = CASCacheUsage(self)
+ self._message(Message(None, MessageType.STATUS,
+ "Cache usage recomputed: {}".format(usage)))
+
+ # Collect digests and their remove method
+ all_unrequired_refs = []
+ for (unrequired_refs, remove) in self._remove_callbacks:
+ for (mtime, ref) in unrequired_refs():
+ all_unrequired_refs.append((mtime, ref, remove))
+
+ # Pair refs and their remove method sorted in time order
+ all_unrequired_refs = [(ref, remove) for (_, ref, remove) in sorted(all_unrequired_refs)]
+
+ # Go through unrequired refs and remove them, oldest first
+ made_space = False
+ for (ref, remove) in all_unrequired_refs:
+ size = remove(ref)
+ removed_ref_count += 1
+ space_saved += size
+
+ self._message(Message(
+ None, MessageType.STATUS,
+ "Freed {: <7} {}".format(
+ utils._pretty_size(size, dec_places=2),
+ ref)))
+
+ self.set_cache_size(self._cache_size - size)
+
+ # User callback
+ #
+ # Currently this process is fairly slow, but we should
+ # think about throttling this progress() callback if this
+ # becomes too intense.
+ if progress:
+ progress()
+
+ if self.get_cache_size() < self._cache_lower_threshold:
+ made_space = True
+ break
+
+ if not made_space and self.full():
+ # If too many artifacts are required, and we therefore
+ # can't remove them, we have to abort the build.
+ #
+ # FIXME: Asking the user what to do may be neater
+ #
+ default_conf = os.path.join(os.environ['XDG_CONFIG_HOME'],
+ 'buildstream.conf')
+ detail = ("Aborted after removing {} refs and saving {} disk space.\n"
+ "The remaining {} in the cache is required by the {} references in your build plan\n\n"
+ "There is not enough space to complete the build.\n"
+ "Please increase the cache-quota in {} and/or make more disk space."
+ .format(removed_ref_count,
+ utils._pretty_size(space_saved, dec_places=2),
+ utils._pretty_size(self.get_cache_size(), dec_places=2),
+ total_refs,
+ (context.config_origin or default_conf)))
+
+ raise CASCacheError("Cache too full. Aborting.",
+ detail=detail,
+ reason="cache-too-full")
+
+ # Informational message about the side effects of the cleanup
+ self._message(Message(
+ None, MessageType.INFO, "Cleanup completed",
+ detail=("Removed {} refs and saving {} disk space.\n" +
+ "Cache usage is now: {}")
+ .format(removed_ref_count,
+ utils._pretty_size(space_saved, dec_places=2),
+ utils._pretty_size(self.get_cache_size(), dec_places=2))))
+
+ return self.get_cache_size()
+
+
+def _grouper(iterable, n):
+ while True:
+ try:
+ current = next(iterable)
+ except StopIteration:
+ return
+ yield itertools.chain([current], itertools.islice(iterable, n - 1))
diff --git a/src/buildstream/_cas/casremote.py b/src/buildstream/_cas/casremote.py
new file mode 100644
index 000000000..aac0d2802
--- /dev/null
+++ b/src/buildstream/_cas/casremote.py
@@ -0,0 +1,391 @@
+from collections import namedtuple
+import io
+import os
+import multiprocessing
+import signal
+from urllib.parse import urlparse
+import uuid
+
+import grpc
+
+from .. import _yaml
+from .._protos.google.rpc import code_pb2
+from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
+from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
+from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
+
+from .._exceptions import CASRemoteError, LoadError, LoadErrorReason
+from .. import _signals
+from .. import utils
+
+# The default limit for gRPC messages is 4 MiB.
+# Limit payload to 1 MiB to leave sufficient headroom for metadata.
+_MAX_PAYLOAD_BYTES = 1024 * 1024
+
+
+class CASRemoteSpec(namedtuple('CASRemoteSpec', 'url push server_cert client_key client_cert instance_name')):
+
+ # _new_from_config_node
+ #
+ # Creates an CASRemoteSpec() from a YAML loaded node
+ #
+ @staticmethod
+ def _new_from_config_node(spec_node, basedir=None):
+ _yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert', 'instance-name'])
+ url = _yaml.node_get(spec_node, str, 'url')
+ push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
+ if not url:
+ provenance = _yaml.node_get_provenance(spec_node, 'url')
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: empty artifact cache URL".format(provenance))
+
+ instance_name = _yaml.node_get(spec_node, str, 'instance-name', default_value=None)
+
+ server_cert = _yaml.node_get(spec_node, str, 'server-cert', default_value=None)
+ if server_cert and basedir:
+ server_cert = os.path.join(basedir, server_cert)
+
+ client_key = _yaml.node_get(spec_node, str, 'client-key', default_value=None)
+ if client_key and basedir:
+ client_key = os.path.join(basedir, client_key)
+
+ client_cert = _yaml.node_get(spec_node, str, 'client-cert', default_value=None)
+ if client_cert and basedir:
+ client_cert = os.path.join(basedir, client_cert)
+
+ if client_key and not client_cert:
+ provenance = _yaml.node_get_provenance(spec_node, 'client-key')
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: 'client-key' was specified without 'client-cert'".format(provenance))
+
+ if client_cert and not client_key:
+ provenance = _yaml.node_get_provenance(spec_node, 'client-cert')
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: 'client-cert' was specified without 'client-key'".format(provenance))
+
+ return CASRemoteSpec(url, push, server_cert, client_key, client_cert, instance_name)
+
+
+CASRemoteSpec.__new__.__defaults__ = (None, None, None, None)
+
+
+class BlobNotFound(CASRemoteError):
+
+ def __init__(self, blob, msg):
+ self.blob = blob
+ super().__init__(msg)
+
+
+# Represents a single remote CAS cache.
+#
+class CASRemote():
+ def __init__(self, spec):
+ self.spec = spec
+ self._initialized = False
+ self.channel = None
+ self.instance_name = None
+ self.bytestream = None
+ self.cas = None
+ self.ref_storage = None
+ self.batch_update_supported = None
+ self.batch_read_supported = None
+ self.capabilities = None
+ self.max_batch_total_size_bytes = None
+
+ def init(self):
+ if not self._initialized:
+ url = urlparse(self.spec.url)
+ if url.scheme == 'http':
+ port = url.port or 80
+ self.channel = grpc.insecure_channel('{}:{}'.format(url.hostname, port))
+ elif url.scheme == 'https':
+ port = url.port or 443
+
+ if self.spec.server_cert:
+ with open(self.spec.server_cert, 'rb') as f:
+ server_cert_bytes = f.read()
+ else:
+ server_cert_bytes = None
+
+ if self.spec.client_key:
+ with open(self.spec.client_key, 'rb') as f:
+ client_key_bytes = f.read()
+ else:
+ client_key_bytes = None
+
+ if self.spec.client_cert:
+ with open(self.spec.client_cert, 'rb') as f:
+ client_cert_bytes = f.read()
+ else:
+ client_cert_bytes = None
+
+ credentials = grpc.ssl_channel_credentials(root_certificates=server_cert_bytes,
+ private_key=client_key_bytes,
+ certificate_chain=client_cert_bytes)
+ self.channel = grpc.secure_channel('{}:{}'.format(url.hostname, port), credentials)
+ else:
+ raise CASRemoteError("Unsupported URL: {}".format(self.spec.url))
+
+ self.instance_name = self.spec.instance_name or None
+
+ self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
+ self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
+ self.capabilities = remote_execution_pb2_grpc.CapabilitiesStub(self.channel)
+ self.ref_storage = buildstream_pb2_grpc.ReferenceStorageStub(self.channel)
+
+ self.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
+ try:
+ request = remote_execution_pb2.GetCapabilitiesRequest()
+ if self.instance_name:
+ request.instance_name = self.instance_name
+ response = self.capabilities.GetCapabilities(request)
+ server_max_batch_total_size_bytes = response.cache_capabilities.max_batch_total_size_bytes
+ if 0 < server_max_batch_total_size_bytes < self.max_batch_total_size_bytes:
+ self.max_batch_total_size_bytes = server_max_batch_total_size_bytes
+ except grpc.RpcError as e:
+ # Simply use the defaults for servers that don't implement GetCapabilities()
+ if e.code() != grpc.StatusCode.UNIMPLEMENTED:
+ raise
+
+ # Check whether the server supports BatchReadBlobs()
+ self.batch_read_supported = False
+ try:
+ request = remote_execution_pb2.BatchReadBlobsRequest()
+ if self.instance_name:
+ request.instance_name = self.instance_name
+ response = self.cas.BatchReadBlobs(request)
+ self.batch_read_supported = True
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.UNIMPLEMENTED:
+ raise
+
+ # Check whether the server supports BatchUpdateBlobs()
+ self.batch_update_supported = False
+ try:
+ request = remote_execution_pb2.BatchUpdateBlobsRequest()
+ if self.instance_name:
+ request.instance_name = self.instance_name
+ response = self.cas.BatchUpdateBlobs(request)
+ self.batch_update_supported = True
+ except grpc.RpcError as e:
+ if (e.code() != grpc.StatusCode.UNIMPLEMENTED and
+ e.code() != grpc.StatusCode.PERMISSION_DENIED):
+ raise
+
+ self._initialized = True
+
+ # check_remote
+ #
+ # Used when checking whether remote_specs work in the buildstream main
+ # thread, runs this in a seperate process to avoid creation of gRPC threads
+ # in the main BuildStream process
+ # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
+ @classmethod
+ def check_remote(cls, remote_spec, q):
+
+ def __check_remote():
+ try:
+ remote = cls(remote_spec)
+ remote.init()
+
+ request = buildstream_pb2.StatusRequest()
+ response = remote.ref_storage.Status(request)
+
+ if remote_spec.push and not response.allow_updates:
+ q.put('CAS server does not allow push')
+ else:
+ # No error
+ q.put(None)
+
+ except grpc.RpcError as e:
+ # str(e) is too verbose for errors reported to the user
+ q.put(e.details())
+
+ except Exception as e: # pylint: disable=broad-except
+ # Whatever happens, we need to return it to the calling process
+ #
+ q.put(str(e))
+
+ p = multiprocessing.Process(target=__check_remote)
+
+ try:
+ # Keep SIGINT blocked in the child process
+ with _signals.blocked([signal.SIGINT], ignore=False):
+ p.start()
+
+ error = q.get()
+ p.join()
+ except KeyboardInterrupt:
+ utils._kill_process_tree(p.pid)
+ raise
+
+ return error
+
+ # push_message():
+ #
+ # Push the given protobuf message to a remote.
+ #
+ # Args:
+ # message (Message): A protobuf message to push.
+ #
+ # Raises:
+ # (CASRemoteError): if there was an error
+ #
+ def push_message(self, message):
+
+ message_buffer = message.SerializeToString()
+ message_digest = utils._message_digest(message_buffer)
+
+ self.init()
+
+ with io.BytesIO(message_buffer) as b:
+ self._send_blob(message_digest, b)
+
+ return message_digest
+
+ ################################################
+ # Local Private Methods #
+ ################################################
+ def _fetch_blob(self, digest, stream):
+ if self.instance_name:
+ resource_name = '/'.join([self.instance_name, 'blobs',
+ digest.hash, str(digest.size_bytes)])
+ else:
+ resource_name = '/'.join(['blobs',
+ digest.hash, str(digest.size_bytes)])
+
+ request = bytestream_pb2.ReadRequest()
+ request.resource_name = resource_name
+ request.read_offset = 0
+ for response in self.bytestream.Read(request):
+ stream.write(response.data)
+ stream.flush()
+
+ assert digest.size_bytes == os.fstat(stream.fileno()).st_size
+
+ def _send_blob(self, digest, stream, u_uid=uuid.uuid4()):
+ if self.instance_name:
+ resource_name = '/'.join([self.instance_name, 'uploads', str(u_uid), 'blobs',
+ digest.hash, str(digest.size_bytes)])
+ else:
+ resource_name = '/'.join(['uploads', str(u_uid), 'blobs',
+ digest.hash, str(digest.size_bytes)])
+
+ def request_stream(resname, instream):
+ offset = 0
+ finished = False
+ remaining = digest.size_bytes
+ while not finished:
+ chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
+ remaining -= chunk_size
+
+ request = bytestream_pb2.WriteRequest()
+ request.write_offset = offset
+ # max. _MAX_PAYLOAD_BYTES chunks
+ request.data = instream.read(chunk_size)
+ request.resource_name = resname
+ request.finish_write = remaining <= 0
+
+ yield request
+
+ offset += chunk_size
+ finished = request.finish_write
+
+ response = self.bytestream.Write(request_stream(resource_name, stream))
+
+ assert response.committed_size == digest.size_bytes
+
+
+# Represents a batch of blobs queued for fetching.
+#
+class _CASBatchRead():
+ def __init__(self, remote):
+ self._remote = remote
+ self._max_total_size_bytes = remote.max_batch_total_size_bytes
+ self._request = remote_execution_pb2.BatchReadBlobsRequest()
+ if remote.instance_name:
+ self._request.instance_name = remote.instance_name
+ self._size = 0
+ self._sent = False
+
+ def add(self, digest):
+ assert not self._sent
+
+ new_batch_size = self._size + digest.size_bytes
+ if new_batch_size > self._max_total_size_bytes:
+ # Not enough space left in current batch
+ return False
+
+ request_digest = self._request.digests.add()
+ request_digest.hash = digest.hash
+ request_digest.size_bytes = digest.size_bytes
+ self._size = new_batch_size
+ return True
+
+ def send(self, *, missing_blobs=None):
+ assert not self._sent
+ self._sent = True
+
+ if not self._request.digests:
+ return
+
+ batch_response = self._remote.cas.BatchReadBlobs(self._request)
+
+ for response in batch_response.responses:
+ if response.status.code == code_pb2.NOT_FOUND:
+ if missing_blobs is None:
+ raise BlobNotFound(response.digest.hash, "Failed to download blob {}: {}".format(
+ response.digest.hash, response.status.code))
+ else:
+ missing_blobs.append(response.digest)
+
+ if response.status.code != code_pb2.OK:
+ raise CASRemoteError("Failed to download blob {}: {}".format(
+ response.digest.hash, response.status.code))
+ if response.digest.size_bytes != len(response.data):
+ raise CASRemoteError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
+ response.digest.hash, response.digest.size_bytes, len(response.data)))
+
+ yield (response.digest, response.data)
+
+
+# Represents a batch of blobs queued for upload.
+#
+class _CASBatchUpdate():
+ def __init__(self, remote):
+ self._remote = remote
+ self._max_total_size_bytes = remote.max_batch_total_size_bytes
+ self._request = remote_execution_pb2.BatchUpdateBlobsRequest()
+ if remote.instance_name:
+ self._request.instance_name = remote.instance_name
+ self._size = 0
+ self._sent = False
+
+ def add(self, digest, stream):
+ assert not self._sent
+
+ new_batch_size = self._size + digest.size_bytes
+ if new_batch_size > self._max_total_size_bytes:
+ # Not enough space left in current batch
+ return False
+
+ blob_request = self._request.requests.add()
+ blob_request.digest.hash = digest.hash
+ blob_request.digest.size_bytes = digest.size_bytes
+ blob_request.data = stream.read(digest.size_bytes)
+ self._size = new_batch_size
+ return True
+
+ def send(self):
+ assert not self._sent
+ self._sent = True
+
+ if not self._request.requests:
+ return
+
+ batch_response = self._remote.cas.BatchUpdateBlobs(self._request)
+
+ for response in batch_response.responses:
+ if response.status.code != code_pb2.OK:
+ raise CASRemoteError("Failed to upload blob {}: {}".format(
+ response.digest.hash, response.status.code))
diff --git a/src/buildstream/_cas/casserver.py b/src/buildstream/_cas/casserver.py
new file mode 100644
index 000000000..c08a4d577
--- /dev/null
+++ b/src/buildstream/_cas/casserver.py
@@ -0,0 +1,619 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+from concurrent import futures
+import logging
+import os
+import signal
+import sys
+import tempfile
+import uuid
+import errno
+import threading
+
+import grpc
+from google.protobuf.message import DecodeError
+import click
+
+from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
+from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
+from .._protos.google.rpc import code_pb2
+from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc, \
+ artifact_pb2, artifact_pb2_grpc
+
+from .._exceptions import CASError
+
+from .cascache import CASCache
+
+
+# The default limit for gRPC messages is 4 MiB.
+# Limit payload to 1 MiB to leave sufficient headroom for metadata.
+_MAX_PAYLOAD_BYTES = 1024 * 1024
+
+
+# Trying to push an artifact that is too large
+class ArtifactTooLargeException(Exception):
+ pass
+
+
+# create_server():
+#
+# Create gRPC CAS artifact server as specified in the Remote Execution API.
+#
+# Args:
+# repo (str): Path to CAS repository
+# enable_push (bool): Whether to allow blob uploads and artifact updates
+#
+def create_server(repo, *, enable_push,
+ max_head_size=int(10e9),
+ min_head_size=int(2e9)):
+ cas = CASCache(os.path.abspath(repo))
+ artifactdir = os.path.join(os.path.abspath(repo), 'artifacts', 'refs')
+
+ # Use max_workers default from Python 3.5+
+ max_workers = (os.cpu_count() or 1) * 5
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers))
+
+ cache_cleaner = _CacheCleaner(cas, max_head_size, min_head_size)
+
+ bytestream_pb2_grpc.add_ByteStreamServicer_to_server(
+ _ByteStreamServicer(cas, cache_cleaner, enable_push=enable_push), server)
+
+ remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
+ _ContentAddressableStorageServicer(cas, cache_cleaner, enable_push=enable_push), server)
+
+ remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(
+ _CapabilitiesServicer(), server)
+
+ buildstream_pb2_grpc.add_ReferenceStorageServicer_to_server(
+ _ReferenceStorageServicer(cas, enable_push=enable_push), server)
+
+ artifact_pb2_grpc.add_ArtifactServiceServicer_to_server(
+ _ArtifactServicer(cas, artifactdir), server)
+
+ return server
+
+
+@click.command(short_help="CAS Artifact Server")
+@click.option('--port', '-p', type=click.INT, required=True, help="Port number")
+@click.option('--server-key', help="Private server key for TLS (PEM-encoded)")
+@click.option('--server-cert', help="Public server certificate for TLS (PEM-encoded)")
+@click.option('--client-certs', help="Public client certificates for TLS (PEM-encoded)")
+@click.option('--enable-push', default=False, is_flag=True,
+ help="Allow clients to upload blobs and update artifact cache")
+@click.option('--head-room-min', type=click.INT,
+ help="Disk head room minimum in bytes",
+ default=2e9)
+@click.option('--head-room-max', type=click.INT,
+ help="Disk head room maximum in bytes",
+ default=10e9)
+@click.argument('repo')
+def server_main(repo, port, server_key, server_cert, client_certs, enable_push,
+ head_room_min, head_room_max):
+ server = create_server(repo,
+ max_head_size=head_room_max,
+ min_head_size=head_room_min,
+ enable_push=enable_push)
+
+ use_tls = bool(server_key)
+
+ if bool(server_cert) != use_tls:
+ click.echo("ERROR: --server-key and --server-cert are both required for TLS", err=True)
+ sys.exit(-1)
+
+ if client_certs and not use_tls:
+ click.echo("ERROR: --client-certs can only be used with --server-key", err=True)
+ sys.exit(-1)
+
+ if use_tls:
+ # Read public/private key pair
+ with open(server_key, 'rb') as f:
+ server_key_bytes = f.read()
+ with open(server_cert, 'rb') as f:
+ server_cert_bytes = f.read()
+
+ if client_certs:
+ with open(client_certs, 'rb') as f:
+ client_certs_bytes = f.read()
+ else:
+ client_certs_bytes = None
+
+ credentials = grpc.ssl_server_credentials([(server_key_bytes, server_cert_bytes)],
+ root_certificates=client_certs_bytes,
+ require_client_auth=bool(client_certs))
+ server.add_secure_port('[::]:{}'.format(port), credentials)
+ else:
+ server.add_insecure_port('[::]:{}'.format(port))
+
+ # Run artifact server
+ server.start()
+ try:
+ while True:
+ signal.pause()
+ except KeyboardInterrupt:
+ server.stop(0)
+
+
+class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
+ def __init__(self, cas, cache_cleaner, *, enable_push):
+ super().__init__()
+ self.cas = cas
+ self.enable_push = enable_push
+ self.cache_cleaner = cache_cleaner
+
+ def Read(self, request, context):
+ resource_name = request.resource_name
+ client_digest = _digest_from_download_resource_name(resource_name)
+ if client_digest is None:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+ return
+
+ if request.read_offset > client_digest.size_bytes:
+ context.set_code(grpc.StatusCode.OUT_OF_RANGE)
+ return
+
+ try:
+ with open(self.cas.objpath(client_digest), 'rb') as f:
+ if os.fstat(f.fileno()).st_size != client_digest.size_bytes:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+ return
+
+ if request.read_offset > 0:
+ f.seek(request.read_offset)
+
+ remaining = client_digest.size_bytes - request.read_offset
+ while remaining > 0:
+ chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
+ remaining -= chunk_size
+
+ response = bytestream_pb2.ReadResponse()
+ # max. 64 kB chunks
+ response.data = f.read(chunk_size)
+ yield response
+ except FileNotFoundError:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+
+ def Write(self, request_iterator, context):
+ response = bytestream_pb2.WriteResponse()
+
+ if not self.enable_push:
+ context.set_code(grpc.StatusCode.PERMISSION_DENIED)
+ return response
+
+ offset = 0
+ finished = False
+ resource_name = None
+ with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
+ for request in request_iterator:
+ if finished or request.write_offset != offset:
+ context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
+ return response
+
+ if resource_name is None:
+ # First request
+ resource_name = request.resource_name
+ client_digest = _digest_from_upload_resource_name(resource_name)
+ if client_digest is None:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+ return response
+
+ while True:
+ if client_digest.size_bytes == 0:
+ break
+ try:
+ self.cache_cleaner.clean_up(client_digest.size_bytes)
+ except ArtifactTooLargeException as e:
+ context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
+ context.set_details(str(e))
+ return response
+
+ try:
+ os.posix_fallocate(out.fileno(), 0, client_digest.size_bytes)
+ break
+ except OSError as e:
+ # Multiple upload can happen in the same time
+ if e.errno != errno.ENOSPC:
+ raise
+
+ elif request.resource_name:
+ # If it is set on subsequent calls, it **must** match the value of the first request.
+ if request.resource_name != resource_name:
+ context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
+ return response
+
+ if (offset + len(request.data)) > client_digest.size_bytes:
+ context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
+ return response
+
+ out.write(request.data)
+ offset += len(request.data)
+ if request.finish_write:
+ if client_digest.size_bytes != offset:
+ context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
+ return response
+ out.flush()
+ digest = self.cas.add_object(path=out.name, link_directly=True)
+ if digest.hash != client_digest.hash:
+ context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
+ return response
+ finished = True
+
+ assert finished
+
+ response.committed_size = offset
+ return response
+
+
+class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
+ def __init__(self, cas, cache_cleaner, *, enable_push):
+ super().__init__()
+ self.cas = cas
+ self.enable_push = enable_push
+ self.cache_cleaner = cache_cleaner
+
+ def FindMissingBlobs(self, request, context):
+ response = remote_execution_pb2.FindMissingBlobsResponse()
+ for digest in request.blob_digests:
+ objpath = self.cas.objpath(digest)
+ try:
+ os.utime(objpath)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ d = response.missing_blob_digests.add()
+ d.hash = digest.hash
+ d.size_bytes = digest.size_bytes
+
+ return response
+
+ def BatchReadBlobs(self, request, context):
+ response = remote_execution_pb2.BatchReadBlobsResponse()
+ batch_size = 0
+
+ for digest in request.digests:
+ batch_size += digest.size_bytes
+ if batch_size > _MAX_PAYLOAD_BYTES:
+ context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
+ return response
+
+ blob_response = response.responses.add()
+ blob_response.digest.hash = digest.hash
+ blob_response.digest.size_bytes = digest.size_bytes
+ try:
+ with open(self.cas.objpath(digest), 'rb') as f:
+ if os.fstat(f.fileno()).st_size != digest.size_bytes:
+ blob_response.status.code = code_pb2.NOT_FOUND
+ continue
+
+ blob_response.data = f.read(digest.size_bytes)
+ except FileNotFoundError:
+ blob_response.status.code = code_pb2.NOT_FOUND
+
+ return response
+
+ def BatchUpdateBlobs(self, request, context):
+ response = remote_execution_pb2.BatchUpdateBlobsResponse()
+
+ if not self.enable_push:
+ context.set_code(grpc.StatusCode.PERMISSION_DENIED)
+ return response
+
+ batch_size = 0
+
+ for blob_request in request.requests:
+ digest = blob_request.digest
+
+ batch_size += digest.size_bytes
+ if batch_size > _MAX_PAYLOAD_BYTES:
+ context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
+ return response
+
+ blob_response = response.responses.add()
+ blob_response.digest.hash = digest.hash
+ blob_response.digest.size_bytes = digest.size_bytes
+
+ if len(blob_request.data) != digest.size_bytes:
+ blob_response.status.code = code_pb2.FAILED_PRECONDITION
+ continue
+
+ try:
+ self.cache_cleaner.clean_up(digest.size_bytes)
+
+ with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
+ out.write(blob_request.data)
+ out.flush()
+ server_digest = self.cas.add_object(path=out.name)
+ if server_digest.hash != digest.hash:
+ blob_response.status.code = code_pb2.FAILED_PRECONDITION
+
+ except ArtifactTooLargeException:
+ blob_response.status.code = code_pb2.RESOURCE_EXHAUSTED
+
+ return response
+
+
+class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
+ def GetCapabilities(self, request, context):
+ response = remote_execution_pb2.ServerCapabilities()
+
+ cache_capabilities = response.cache_capabilities
+ cache_capabilities.digest_function.append(remote_execution_pb2.SHA256)
+ cache_capabilities.action_cache_update_capabilities.update_enabled = False
+ cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
+ cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.CacheCapabilities.ALLOWED
+
+ response.deprecated_api_version.major = 2
+ response.low_api_version.major = 2
+ response.high_api_version.major = 2
+
+ return response
+
+
+class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer):
+ def __init__(self, cas, *, enable_push):
+ super().__init__()
+ self.cas = cas
+ self.enable_push = enable_push
+
+ def GetReference(self, request, context):
+ response = buildstream_pb2.GetReferenceResponse()
+
+ try:
+ tree = self.cas.resolve_ref(request.key, update_mtime=True)
+ try:
+ self.cas.update_tree_mtime(tree)
+ except FileNotFoundError:
+ self.cas.remove(request.key, defer_prune=True)
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+ return response
+
+ response.digest.hash = tree.hash
+ response.digest.size_bytes = tree.size_bytes
+ except CASError:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+
+ return response
+
+ def UpdateReference(self, request, context):
+ response = buildstream_pb2.UpdateReferenceResponse()
+
+ if not self.enable_push:
+ context.set_code(grpc.StatusCode.PERMISSION_DENIED)
+ return response
+
+ for key in request.keys:
+ self.cas.set_ref(key, request.digest)
+
+ return response
+
+ def Status(self, request, context):
+ response = buildstream_pb2.StatusResponse()
+
+ response.allow_updates = self.enable_push
+
+ return response
+
+
+class _ArtifactServicer(artifact_pb2_grpc.ArtifactServiceServicer):
+
+ def __init__(self, cas, artifactdir):
+ super().__init__()
+ self.cas = cas
+ self.artifactdir = artifactdir
+ os.makedirs(artifactdir, exist_ok=True)
+
+ def GetArtifact(self, request, context):
+ artifact_path = os.path.join(self.artifactdir, request.cache_key)
+ if not os.path.exists(artifact_path):
+ context.abort(grpc.StatusCode.NOT_FOUND, "Artifact proto not found")
+
+ artifact = artifact_pb2.Artifact()
+ with open(artifact_path, 'rb') as f:
+ artifact.ParseFromString(f.read())
+
+ # Now update mtimes of files present.
+ try:
+
+ if str(artifact.files):
+ self.cas.update_tree_mtime(artifact.files)
+
+ if str(artifact.buildtree):
+ # buildtrees might not be there
+ try:
+ self.cas.update_tree_mtime(artifact.buildtree)
+ except FileNotFoundError:
+ pass
+
+ if str(artifact.public_data):
+ os.utime(self.cas.objpath(artifact.public_data))
+
+ for log_file in artifact.logs:
+ os.utime(self.cas.objpath(log_file.digest))
+
+ except FileNotFoundError:
+ os.unlink(artifact_path)
+ context.abort(grpc.StatusCode.NOT_FOUND,
+ "Artifact files incomplete")
+ except DecodeError:
+ context.abort(grpc.StatusCode.NOT_FOUND,
+ "Artifact files not valid")
+
+ return artifact
+
+ def UpdateArtifact(self, request, context):
+ artifact = request.artifact
+
+ # Check that the files specified are in the CAS
+ self._check_directory("files", artifact.files, context)
+
+ # Unset protocol buffers don't evaluated to False but do return empty
+ # strings, hence str()
+ if str(artifact.public_data):
+ self._check_file("public data", artifact.public_data, context)
+
+ for log_file in artifact.logs:
+ self._check_file("log digest", log_file.digest, context)
+
+ # Add the artifact proto to the cas
+ artifact_path = os.path.join(self.artifactdir, request.cache_key)
+ os.makedirs(os.path.dirname(artifact_path), exist_ok=True)
+ with open(artifact_path, 'wb') as f:
+ f.write(artifact.SerializeToString())
+
+ return artifact
+
+ def _check_directory(self, name, digest, context):
+ try:
+ directory = remote_execution_pb2.Directory()
+ with open(self.cas.objpath(digest), 'rb') as f:
+ directory.ParseFromString(f.read())
+ except FileNotFoundError:
+ context.abort(grpc.StatusCode.FAILED_PRECONDITION,
+ "Artifact {} specified but no files found".format(name))
+ except DecodeError:
+ context.abort(grpc.StatusCode.FAILED_PRECONDITION,
+ "Artifact {} specified but directory not found".format(name))
+
+ def _check_file(self, name, digest, context):
+ if not os.path.exists(self.cas.objpath(digest)):
+ context.abort(grpc.StatusCode.FAILED_PRECONDITION,
+ "Artifact {} specified but not found".format(name))
+
+
+def _digest_from_download_resource_name(resource_name):
+ parts = resource_name.split('/')
+
+ # Accept requests from non-conforming BuildStream 1.1.x clients
+ if len(parts) == 2:
+ parts.insert(0, 'blobs')
+
+ if len(parts) != 3 or parts[0] != 'blobs':
+ return None
+
+ try:
+ digest = remote_execution_pb2.Digest()
+ digest.hash = parts[1]
+ digest.size_bytes = int(parts[2])
+ return digest
+ except ValueError:
+ return None
+
+
+def _digest_from_upload_resource_name(resource_name):
+ parts = resource_name.split('/')
+
+ # Accept requests from non-conforming BuildStream 1.1.x clients
+ if len(parts) == 2:
+ parts.insert(0, 'uploads')
+ parts.insert(1, str(uuid.uuid4()))
+ parts.insert(2, 'blobs')
+
+ if len(parts) < 5 or parts[0] != 'uploads' or parts[2] != 'blobs':
+ return None
+
+ try:
+ uuid_ = uuid.UUID(hex=parts[1])
+ if uuid_.version != 4:
+ return None
+
+ digest = remote_execution_pb2.Digest()
+ digest.hash = parts[3]
+ digest.size_bytes = int(parts[4])
+ return digest
+ except ValueError:
+ return None
+
+
+class _CacheCleaner:
+
+ __cleanup_cache_lock = threading.Lock()
+
+ def __init__(self, cas, max_head_size, min_head_size=int(2e9)):
+ self.__cas = cas
+ self.__max_head_size = max_head_size
+ self.__min_head_size = min_head_size
+
+ def __has_space(self, object_size):
+ stats = os.statvfs(self.__cas.casdir)
+ free_disk_space = (stats.f_bavail * stats.f_bsize) - self.__min_head_size
+ total_disk_space = (stats.f_blocks * stats.f_bsize) - self.__min_head_size
+
+ if object_size > total_disk_space:
+ raise ArtifactTooLargeException("Artifact of size: {} is too large for "
+ "the filesystem which mounts the remote "
+ "cache".format(object_size))
+
+ return object_size <= free_disk_space
+
+ # _clean_up_cache()
+ #
+ # Keep removing Least Recently Pushed (LRP) artifacts in a cache until there
+ # is enough space for the incoming artifact
+ #
+ # Args:
+ # object_size: The size of the object being received in bytes
+ #
+ # Returns:
+ # int: The total bytes removed on the filesystem
+ #
+ def clean_up(self, object_size):
+ if self.__has_space(object_size):
+ return 0
+
+ with _CacheCleaner.__cleanup_cache_lock:
+ if self.__has_space(object_size):
+ # Another thread has done the cleanup for us
+ return 0
+
+ stats = os.statvfs(self.__cas.casdir)
+ target_disk_space = (stats.f_bavail * stats.f_bsize) - self.__max_head_size
+
+ # obtain a list of LRP artifacts
+ LRP_objects = self.__cas.list_objects()
+
+ removed_size = 0 # in bytes
+ last_mtime = 0
+
+ while object_size - removed_size > target_disk_space:
+ try:
+ last_mtime, to_remove = LRP_objects.pop(0) # The first element in the list is the LRP artifact
+ except IndexError:
+ # This exception is caught if there are no more artifacts in the list
+ # LRP_artifacts. This means the the artifact is too large for the filesystem
+ # so we abort the process
+ raise ArtifactTooLargeException("Artifact of size {} is too large for "
+ "the filesystem which mounts the remote "
+ "cache".format(object_size))
+
+ try:
+ size = os.stat(to_remove).st_size
+ os.unlink(to_remove)
+ removed_size += size
+ except FileNotFoundError:
+ pass
+
+ self.__cas.clean_up_refs_until(last_mtime)
+
+ if removed_size > 0:
+ logging.info("Successfully removed %d bytes from the cache", removed_size)
+ else:
+ logging.info("No artifacts were removed from the cache.")
+
+ return removed_size
diff --git a/src/buildstream/_context.py b/src/buildstream/_context.py
new file mode 100644
index 000000000..151ea636a
--- /dev/null
+++ b/src/buildstream/_context.py
@@ -0,0 +1,766 @@
+#
+# Copyright (C) 2016-2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+import os
+import shutil
+import datetime
+from collections import deque
+from collections.abc import Mapping
+from contextlib import contextmanager
+from . import utils
+from . import _cachekey
+from . import _signals
+from . import _site
+from . import _yaml
+from ._exceptions import LoadError, LoadErrorReason, BstError
+from ._message import Message, MessageType
+from ._profile import Topics, PROFILER
+from ._artifactcache import ArtifactCache
+from ._sourcecache import SourceCache
+from ._cas import CASCache, CASQuota, CASCacheUsage
+from ._workspaces import Workspaces, WorkspaceProjectCache
+from .plugin import Plugin
+from .sandbox import SandboxRemote
+
+
+# Context()
+#
+# The Context object holds all of the user preferences
+# and context for a given invocation of BuildStream.
+#
+# This is a collection of data from configuration files and command
+# line arguments and consists of information such as where to store
+# logs and artifacts, where to perform builds and cache downloaded sources,
+# verbosity levels and basically anything pertaining to the context
+# in which BuildStream was invoked.
+#
+# Args:
+# directory (str): The directory that buildstream was invoked in
+#
+class Context():
+
+ def __init__(self, directory=None):
+
+ # Filename indicating which configuration file was used, or None for the defaults
+ self.config_origin = None
+
+ # The directory under which other directories are based
+ self.cachedir = None
+
+ # The directory where various sources are stored
+ self.sourcedir = None
+
+ # specs for source cache remotes
+ self.source_cache_specs = None
+
+ # The directory where build sandboxes will be created
+ self.builddir = None
+
+ # The directory for CAS
+ self.casdir = None
+
+ # The directory for artifact protos
+ self.artifactdir = None
+
+ # The directory for temporary files
+ self.tmpdir = None
+
+ # Default root location for workspaces
+ self.workspacedir = None
+
+ # The locations from which to push and pull prebuilt artifacts
+ self.artifact_cache_specs = None
+
+ # The global remote execution configuration
+ self.remote_execution_specs = None
+
+ # The directory to store build logs
+ self.logdir = None
+
+ # The abbreviated cache key length to display in the UI
+ self.log_key_length = None
+
+ # Whether debug mode is enabled
+ self.log_debug = None
+
+ # Whether verbose mode is enabled
+ self.log_verbose = None
+
+ # Maximum number of lines to print from build logs
+ self.log_error_lines = None
+
+ # Maximum number of lines to print in the master log for a detailed message
+ self.log_message_lines = None
+
+ # Format string for printing the pipeline at startup time
+ self.log_element_format = None
+
+ # Format string for printing message lines in the master log
+ self.log_message_format = None
+
+ # Maximum number of fetch or refresh tasks
+ self.sched_fetchers = None
+
+ # Maximum number of build tasks
+ self.sched_builders = None
+
+ # Maximum number of push tasks
+ self.sched_pushers = None
+
+ # Maximum number of retries for network tasks
+ self.sched_network_retries = None
+
+ # What to do when a build fails in non interactive mode
+ self.sched_error_action = None
+
+ # Size of the artifact cache in bytes
+ self.config_cache_quota = None
+
+ # User specified cache quota, used for display messages
+ self.config_cache_quota_string = None
+
+ # Whether or not to attempt to pull build trees globally
+ self.pull_buildtrees = None
+
+ # Whether or not to cache build trees on artifact creation
+ self.cache_buildtrees = None
+
+ # Whether directory trees are required for all artifacts in the local cache
+ self.require_artifact_directories = True
+
+ # Whether file contents are required for all artifacts in the local cache
+ self.require_artifact_files = True
+
+ # Whether elements must be rebuilt when their dependencies have changed
+ self._strict_build_plan = None
+
+ # Make sure the XDG vars are set in the environment before loading anything
+ self._init_xdg()
+
+ # Private variables
+ self._cache_key = None
+ self._message_handler = None
+ self._message_depth = deque()
+ self._artifactcache = None
+ self._sourcecache = None
+ self._projects = []
+ self._project_overrides = _yaml.new_empty_node()
+ self._workspaces = None
+ self._workspace_project_cache = WorkspaceProjectCache()
+ self._log_handle = None
+ self._log_filename = None
+ self._cascache = None
+ self._casquota = None
+ self._directory = directory
+
+ # load()
+ #
+ # Loads the configuration files
+ #
+ # Args:
+ # config (filename): The user specified configuration file, if any
+ #
+
+ # Raises:
+ # LoadError
+ #
+ # This will first load the BuildStream default configuration and then
+ # override that configuration with the configuration file indicated
+ # by *config*, if any was specified.
+ #
+ @PROFILER.profile(Topics.LOAD_CONTEXT, "load")
+ def load(self, config=None):
+ # If a specific config file is not specified, default to trying
+ # a $XDG_CONFIG_HOME/buildstream.conf file
+ #
+ if not config:
+ default_config = os.path.join(os.environ['XDG_CONFIG_HOME'],
+ 'buildstream.conf')
+ if os.path.exists(default_config):
+ config = default_config
+
+ # Load default config
+ #
+ defaults = _yaml.load(_site.default_user_config)
+
+ if config:
+ self.config_origin = os.path.abspath(config)
+ user_config = _yaml.load(config)
+ _yaml.composite(defaults, user_config)
+
+ # Give obsoletion warnings
+ if 'builddir' in defaults:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "builddir is obsolete, use cachedir")
+
+ if 'artifactdir' in defaults:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "artifactdir is obsolete")
+
+ _yaml.node_validate(defaults, [
+ 'cachedir', 'sourcedir', 'builddir', 'logdir', 'scheduler',
+ 'artifacts', 'source-caches', 'logging', 'projects', 'cache', 'prompt',
+ 'workspacedir', 'remote-execution',
+ ])
+
+ for directory in ['cachedir', 'sourcedir', 'logdir', 'workspacedir']:
+ # Allow the ~ tilde expansion and any environment variables in
+ # path specification in the config files.
+ #
+ path = _yaml.node_get(defaults, str, directory)
+ path = os.path.expanduser(path)
+ path = os.path.expandvars(path)
+ path = os.path.normpath(path)
+ setattr(self, directory, path)
+
+ # add directories not set by users
+ self.tmpdir = os.path.join(self.cachedir, 'tmp')
+ self.casdir = os.path.join(self.cachedir, 'cas')
+ self.builddir = os.path.join(self.cachedir, 'build')
+ self.artifactdir = os.path.join(self.cachedir, 'artifacts', 'refs')
+
+ # Move old artifact cas to cas if it exists and create symlink
+ old_casdir = os.path.join(self.cachedir, 'artifacts', 'cas')
+ if (os.path.exists(old_casdir) and not os.path.islink(old_casdir) and
+ not os.path.exists(self.casdir)):
+ os.rename(old_casdir, self.casdir)
+ os.symlink(self.casdir, old_casdir)
+
+ # Cleanup old extract directories
+ old_extractdirs = [os.path.join(self.cachedir, 'artifacts', 'extract'),
+ os.path.join(self.cachedir, 'extract')]
+ for old_extractdir in old_extractdirs:
+ if os.path.isdir(old_extractdir):
+ shutil.rmtree(old_extractdir, ignore_errors=True)
+
+ # Load quota configuration
+ # We need to find the first existing directory in the path of our
+ # cachedir - the cachedir may not have been created yet.
+ cache = _yaml.node_get(defaults, Mapping, 'cache')
+ _yaml.node_validate(cache, ['quota', 'pull-buildtrees', 'cache-buildtrees'])
+
+ self.config_cache_quota_string = _yaml.node_get(cache, str, 'quota')
+ try:
+ self.config_cache_quota = utils._parse_size(self.config_cache_quota_string,
+ self.casdir)
+ except utils.UtilError as e:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}\nPlease specify the value in bytes or as a % of full disk space.\n"
+ "\nValid values are, for example: 800M 10G 1T 50%\n"
+ .format(str(e))) from e
+
+ # Load artifact share configuration
+ self.artifact_cache_specs = ArtifactCache.specs_from_config_node(defaults)
+
+ # Load source cache config
+ self.source_cache_specs = SourceCache.specs_from_config_node(defaults)
+
+ self.remote_execution_specs = SandboxRemote.specs_from_config_node(defaults)
+
+ # Load pull build trees configuration
+ self.pull_buildtrees = _yaml.node_get(cache, bool, 'pull-buildtrees')
+
+ # Load cache build trees configuration
+ self.cache_buildtrees = _node_get_option_str(
+ cache, 'cache-buildtrees', ['always', 'auto', 'never'])
+
+ # Load logging config
+ logging = _yaml.node_get(defaults, Mapping, 'logging')
+ _yaml.node_validate(logging, [
+ 'key-length', 'verbose',
+ 'error-lines', 'message-lines',
+ 'debug', 'element-format', 'message-format'
+ ])
+ self.log_key_length = _yaml.node_get(logging, int, 'key-length')
+ self.log_debug = _yaml.node_get(logging, bool, 'debug')
+ self.log_verbose = _yaml.node_get(logging, bool, 'verbose')
+ self.log_error_lines = _yaml.node_get(logging, int, 'error-lines')
+ self.log_message_lines = _yaml.node_get(logging, int, 'message-lines')
+ self.log_element_format = _yaml.node_get(logging, str, 'element-format')
+ self.log_message_format = _yaml.node_get(logging, str, 'message-format')
+
+ # Load scheduler config
+ scheduler = _yaml.node_get(defaults, Mapping, 'scheduler')
+ _yaml.node_validate(scheduler, [
+ 'on-error', 'fetchers', 'builders',
+ 'pushers', 'network-retries'
+ ])
+ self.sched_error_action = _node_get_option_str(
+ scheduler, 'on-error', ['continue', 'quit', 'terminate'])
+ self.sched_fetchers = _yaml.node_get(scheduler, int, 'fetchers')
+ self.sched_builders = _yaml.node_get(scheduler, int, 'builders')
+ self.sched_pushers = _yaml.node_get(scheduler, int, 'pushers')
+ self.sched_network_retries = _yaml.node_get(scheduler, int, 'network-retries')
+
+ # Load per-projects overrides
+ self._project_overrides = _yaml.node_get(defaults, dict, 'projects', default_value={})
+
+ # Shallow validation of overrides, parts of buildstream which rely
+ # on the overrides are expected to validate elsewhere.
+ for _, overrides in _yaml.node_items(self._project_overrides):
+ _yaml.node_validate(overrides,
+ ['artifacts', 'source-caches', 'options',
+ 'strict', 'default-mirror',
+ 'remote-execution'])
+
+ @property
+ def artifactcache(self):
+ if not self._artifactcache:
+ self._artifactcache = ArtifactCache(self)
+
+ return self._artifactcache
+
+ # get_cache_usage()
+ #
+ # Fetches the current usage of the artifact cache
+ #
+ # Returns:
+ # (CASCacheUsage): The current status
+ #
+ def get_cache_usage(self):
+ return CASCacheUsage(self.get_casquota())
+
+ @property
+ def sourcecache(self):
+ if not self._sourcecache:
+ self._sourcecache = SourceCache(self)
+
+ return self._sourcecache
+
+ # add_project():
+ #
+ # Add a project to the context.
+ #
+ # Args:
+ # project (Project): The project to add
+ #
+ def add_project(self, project):
+ if not self._projects:
+ self._workspaces = Workspaces(project, self._workspace_project_cache)
+ self._projects.append(project)
+
+ # get_projects():
+ #
+ # Return the list of projects in the context.
+ #
+ # Returns:
+ # (list): The list of projects
+ #
+ def get_projects(self):
+ return self._projects
+
+ # get_toplevel_project():
+ #
+ # Return the toplevel project, the one which BuildStream was
+ # invoked with as opposed to a junctioned subproject.
+ #
+ # Returns:
+ # (Project): The Project object
+ #
+ def get_toplevel_project(self):
+ return self._projects[0]
+
+ # get_workspaces():
+ #
+ # Return a Workspaces object containing a list of workspaces.
+ #
+ # Returns:
+ # (Workspaces): The Workspaces object
+ #
+ def get_workspaces(self):
+ return self._workspaces
+
+ # get_workspace_project_cache():
+ #
+ # Return the WorkspaceProjectCache object used for this BuildStream invocation
+ #
+ # Returns:
+ # (WorkspaceProjectCache): The WorkspaceProjectCache object
+ #
+ def get_workspace_project_cache(self):
+ return self._workspace_project_cache
+
+ # get_overrides():
+ #
+ # Fetch the override dictionary for the active project. This returns
+ # a node loaded from YAML and as such, values loaded from the returned
+ # node should be loaded using the _yaml.node_get() family of functions.
+ #
+ # Args:
+ # project_name (str): The project name
+ #
+ # Returns:
+ # (Mapping): The overrides dictionary for the specified project
+ #
+ def get_overrides(self, project_name):
+ return _yaml.node_get(self._project_overrides, Mapping, project_name, default_value={})
+
+ # get_strict():
+ #
+ # Fetch whether we are strict or not
+ #
+ # Returns:
+ # (bool): Whether or not to use strict build plan
+ #
+ def get_strict(self):
+ if self._strict_build_plan is None:
+ # Either we're not overridden or we've never worked it out before
+ # so work out if we should be strict, and then cache the result
+ toplevel = self.get_toplevel_project()
+ overrides = self.get_overrides(toplevel.name)
+ self._strict_build_plan = _yaml.node_get(overrides, bool, 'strict', default_value=True)
+
+ # If it was set by the CLI, it overrides any config
+ # Ditto if we've already computed this, then we return the computed
+ # value which we cache here too.
+ return self._strict_build_plan
+
+ # get_cache_key():
+ #
+ # Returns the cache key, calculating it if necessary
+ #
+ # Returns:
+ # (str): A hex digest cache key for the Context
+ #
+ def get_cache_key(self):
+ if self._cache_key is None:
+
+ # Anything that alters the build goes into the unique key
+ self._cache_key = _cachekey.generate_key(_yaml.new_empty_node())
+
+ return self._cache_key
+
+ # set_message_handler()
+ #
+ # Sets the handler for any status messages propagated through
+ # the context.
+ #
+ # The message handler should have the same signature as
+ # the message() method
+ def set_message_handler(self, handler):
+ self._message_handler = handler
+
+ # silent_messages():
+ #
+ # Returns:
+ # (bool): Whether messages are currently being silenced
+ #
+ def silent_messages(self):
+ for silent in self._message_depth:
+ if silent:
+ return True
+ return False
+
+ # message():
+ #
+ # Proxies a message back to the caller, this is the central
+ # point through which all messages pass.
+ #
+ # Args:
+ # message: A Message object
+ #
+ def message(self, message):
+
+ # Tag message only once
+ if message.depth is None:
+ message.depth = len(list(self._message_depth))
+
+ # If we are recording messages, dump a copy into the open log file.
+ self._record_message(message)
+
+ # Send it off to the log handler (can be the frontend,
+ # or it can be the child task which will propagate
+ # to the frontend)
+ assert self._message_handler
+
+ self._message_handler(message, context=self)
+
+ # silence()
+ #
+ # A context manager to silence messages, this behaves in
+ # the same way as the `silent_nested` argument of the
+ # Context._timed_activity() context manager: especially
+ # important messages will not be silenced.
+ #
+ @contextmanager
+ def silence(self):
+ self._push_message_depth(True)
+ try:
+ yield
+ finally:
+ self._pop_message_depth()
+
+ # timed_activity()
+ #
+ # Context manager for performing timed activities and logging those
+ #
+ # Args:
+ # context (Context): The invocation context object
+ # activity_name (str): The name of the activity
+ # detail (str): An optional detailed message, can be multiline output
+ # silent_nested (bool): If specified, nested messages will be silenced
+ #
+ @contextmanager
+ def timed_activity(self, activity_name, *, unique_id=None, detail=None, silent_nested=False):
+
+ starttime = datetime.datetime.now()
+ stopped_time = None
+
+ def stop_time():
+ nonlocal stopped_time
+ stopped_time = datetime.datetime.now()
+
+ def resume_time():
+ nonlocal stopped_time
+ nonlocal starttime
+ sleep_time = datetime.datetime.now() - stopped_time
+ starttime += sleep_time
+
+ with _signals.suspendable(stop_time, resume_time):
+ try:
+ # Push activity depth for status messages
+ message = Message(unique_id, MessageType.START, activity_name, detail=detail)
+ self.message(message)
+ self._push_message_depth(silent_nested)
+ yield
+
+ except BstError:
+ # Note the failure in status messages and reraise, the scheduler
+ # expects an error when there is an error.
+ elapsed = datetime.datetime.now() - starttime
+ message = Message(unique_id, MessageType.FAIL, activity_name, elapsed=elapsed)
+ self._pop_message_depth()
+ self.message(message)
+ raise
+
+ elapsed = datetime.datetime.now() - starttime
+ message = Message(unique_id, MessageType.SUCCESS, activity_name, elapsed=elapsed)
+ self._pop_message_depth()
+ self.message(message)
+
+ # recorded_messages()
+ #
+ # Records all messages in a log file while the context manager
+ # is active.
+ #
+ # In addition to automatically writing all messages to the
+ # specified logging file, an open file handle for process stdout
+ # and stderr will be available via the Context.get_log_handle() API,
+ # and the full logfile path will be available via the
+ # Context.get_log_filename() API.
+ #
+ # Args:
+ # filename (str): A logging directory relative filename,
+ # the pid and .log extension will be automatically
+ # appended
+ #
+ # Yields:
+ # (str): The fully qualified log filename
+ #
+ @contextmanager
+ def recorded_messages(self, filename):
+
+ # We dont allow recursing in this context manager, and
+ # we also do not allow it in the main process.
+ assert self._log_handle is None
+ assert self._log_filename is None
+ assert not utils._is_main_process()
+
+ # Create the fully qualified logfile in the log directory,
+ # appending the pid and .log extension at the end.
+ self._log_filename = os.path.join(self.logdir,
+ '{}.{}.log'.format(filename, os.getpid()))
+
+ # Ensure the directory exists first
+ directory = os.path.dirname(self._log_filename)
+ os.makedirs(directory, exist_ok=True)
+
+ with open(self._log_filename, 'a') as logfile:
+
+ # Write one last line to the log and flush it to disk
+ def flush_log():
+
+ # If the process currently had something happening in the I/O stack
+ # then trying to reenter the I/O stack will fire a runtime error.
+ #
+ # So just try to flush as well as we can at SIGTERM time
+ try:
+ logfile.write('\n\nForcefully terminated\n')
+ logfile.flush()
+ except RuntimeError:
+ os.fsync(logfile.fileno())
+
+ self._log_handle = logfile
+ with _signals.terminator(flush_log):
+ yield self._log_filename
+
+ self._log_handle = None
+ self._log_filename = None
+
+ # get_log_handle()
+ #
+ # Fetches the active log handle, this will return the active
+ # log file handle when the Context.recorded_messages() context
+ # manager is active
+ #
+ # Returns:
+ # (file): The active logging file handle, or None
+ #
+ def get_log_handle(self):
+ return self._log_handle
+
+ # get_log_filename()
+ #
+ # Fetches the active log filename, this will return the active
+ # log filename when the Context.recorded_messages() context
+ # manager is active
+ #
+ # Returns:
+ # (str): The active logging filename, or None
+ #
+ def get_log_filename(self):
+ return self._log_filename
+
+ # set_artifact_directories_optional()
+ #
+ # This indicates that the current context (command or configuration)
+ # does not require directory trees of all artifacts to be available in the
+ # local cache.
+ #
+ def set_artifact_directories_optional(self):
+ self.require_artifact_directories = False
+ self.require_artifact_files = False
+
+ # set_artifact_files_optional()
+ #
+ # This indicates that the current context (command or configuration)
+ # does not require file contents of all artifacts to be available in the
+ # local cache.
+ #
+ def set_artifact_files_optional(self):
+ self.require_artifact_files = False
+
+ # _record_message()
+ #
+ # Records the message if recording is enabled
+ #
+ # Args:
+ # message (Message): The message to record
+ #
+ def _record_message(self, message):
+
+ if self._log_handle is None:
+ return
+
+ INDENT = " "
+ EMPTYTIME = "--:--:--"
+ template = "[{timecode: <8}] {type: <7}"
+
+ # If this message is associated with a plugin, print what
+ # we know about the plugin.
+ plugin_name = ""
+ if message.unique_id:
+ template += " {plugin}"
+ plugin = Plugin._lookup(message.unique_id)
+ plugin_name = plugin.name
+
+ template += ": {message}"
+
+ detail = ''
+ if message.detail is not None:
+ template += "\n\n{detail}"
+ detail = message.detail.rstrip('\n')
+ detail = INDENT + INDENT.join(detail.splitlines(True))
+
+ timecode = EMPTYTIME
+ if message.message_type in (MessageType.SUCCESS, MessageType.FAIL):
+ hours, remainder = divmod(int(message.elapsed.total_seconds()), 60**2)
+ minutes, seconds = divmod(remainder, 60)
+ timecode = "{0:02d}:{1:02d}:{2:02d}".format(hours, minutes, seconds)
+
+ text = template.format(timecode=timecode,
+ plugin=plugin_name,
+ type=message.message_type.upper(),
+ message=message.message,
+ detail=detail)
+
+ # Write to the open log file
+ self._log_handle.write('{}\n'.format(text))
+ self._log_handle.flush()
+
+ # _push_message_depth() / _pop_message_depth()
+ #
+ # For status messages, send the depth of timed
+ # activities inside a given task through the message
+ #
+ def _push_message_depth(self, silent_nested):
+ self._message_depth.appendleft(silent_nested)
+
+ def _pop_message_depth(self):
+ assert self._message_depth
+ self._message_depth.popleft()
+
+ # Force the resolved XDG variables into the environment,
+ # this is so that they can be used directly to specify
+ # preferred locations of things from user configuration
+ # files.
+ def _init_xdg(self):
+ if not os.environ.get('XDG_CACHE_HOME'):
+ os.environ['XDG_CACHE_HOME'] = os.path.expanduser('~/.cache')
+ if not os.environ.get('XDG_CONFIG_HOME'):
+ os.environ['XDG_CONFIG_HOME'] = os.path.expanduser('~/.config')
+ if not os.environ.get('XDG_DATA_HOME'):
+ os.environ['XDG_DATA_HOME'] = os.path.expanduser('~/.local/share')
+
+ def get_cascache(self):
+ if self._cascache is None:
+ self._cascache = CASCache(self.cachedir)
+ return self._cascache
+
+ def get_casquota(self):
+ if self._casquota is None:
+ self._casquota = CASQuota(self)
+ return self._casquota
+
+
+# _node_get_option_str()
+#
+# Like _yaml.node_get(), but also checks value is one of the allowed option
+# strings. Fetches a value from a dictionary node, and makes sure it's one of
+# the pre-defined options.
+#
+# Args:
+# node (dict): The dictionary node
+# key (str): The key to get a value for in node
+# allowed_options (iterable): Only accept these values
+#
+# Returns:
+# The value, if found in 'node'.
+#
+# Raises:
+# LoadError, when the value is not of the expected type, or is not found.
+#
+def _node_get_option_str(node, key, allowed_options):
+ result = _yaml.node_get(node, str, key)
+ if result not in allowed_options:
+ provenance = _yaml.node_get_provenance(node, key)
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: {} should be one of: {}".format(
+ provenance, key, ", ".join(allowed_options)))
+ return result
diff --git a/src/buildstream/_elementfactory.py b/src/buildstream/_elementfactory.py
new file mode 100644
index 000000000..d6591bf4c
--- /dev/null
+++ b/src/buildstream/_elementfactory.py
@@ -0,0 +1,63 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from . import _site
+from ._plugincontext import PluginContext
+from .element import Element
+
+
+# A ElementFactory creates Element instances
+# in the context of a given factory
+#
+# Args:
+# plugin_base (PluginBase): The main PluginBase object to work with
+# plugin_origins (list): Data used to search for external Element plugins
+#
+class ElementFactory(PluginContext):
+
+ def __init__(self, plugin_base, *,
+ format_versions={},
+ plugin_origins=None):
+
+ super().__init__(plugin_base, Element, [_site.element_plugins],
+ plugin_origins=plugin_origins,
+ format_versions=format_versions)
+
+ # create():
+ #
+ # Create an Element object, the pipeline uses this to create Element
+ # objects on demand for a given pipeline.
+ #
+ # Args:
+ # context (object): The Context object for processing
+ # project (object): The project object
+ # meta (object): The loaded MetaElement
+ #
+ # Returns: A newly created Element object of the appropriate kind
+ #
+ # Raises:
+ # PluginError (if the kind lookup failed)
+ # LoadError (if the element itself took issue with the config)
+ #
+ def create(self, context, project, meta):
+ element_type, default_config = self.lookup(meta.kind)
+ element = element_type(context, project, meta, default_config)
+ version = self._format_versions.get(meta.kind, 0)
+ self._assert_plugin_format(element, version)
+ return element
diff --git a/src/buildstream/_exceptions.py b/src/buildstream/_exceptions.py
new file mode 100644
index 000000000..f2d34bcba
--- /dev/null
+++ b/src/buildstream/_exceptions.py
@@ -0,0 +1,370 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Tiago Gomes <tiago.gomes@codethink.co.uk>
+
+from enum import Enum
+import os
+
+# Disable pylint warnings for whole file here:
+# pylint: disable=global-statement
+
+# The last raised exception, this is used in test cases only
+_last_exception = None
+_last_task_error_domain = None
+_last_task_error_reason = None
+
+
+# get_last_exception()
+#
+# Fetches the last exception from the main process
+#
+# Used by regression tests
+#
+def get_last_exception():
+ global _last_exception
+
+ le = _last_exception
+ _last_exception = None
+ return le
+
+
+# get_last_task_error()
+#
+# Fetches the last exception from a task
+#
+# Used by regression tests
+#
+def get_last_task_error():
+ if 'BST_TEST_SUITE' not in os.environ:
+ raise BstError("Getting the last task error is only supported when running tests")
+
+ global _last_task_error_domain
+ global _last_task_error_reason
+
+ d = _last_task_error_domain
+ r = _last_task_error_reason
+ _last_task_error_domain = _last_task_error_reason = None
+ return (d, r)
+
+
+# set_last_task_error()
+#
+# Sets the last exception of a task
+#
+# This is set by some internals to inform regression
+# tests about how things failed in a machine readable way
+#
+def set_last_task_error(domain, reason):
+ if 'BST_TEST_SUITE' in os.environ:
+ global _last_task_error_domain
+ global _last_task_error_reason
+
+ _last_task_error_domain = domain
+ _last_task_error_reason = reason
+
+
+class ErrorDomain(Enum):
+ PLUGIN = 1
+ LOAD = 2
+ IMPL = 3
+ PLATFORM = 4
+ SANDBOX = 5
+ ARTIFACT = 6
+ PIPELINE = 7
+ OSTREE = 8
+ UTIL = 9
+ PROG_NOT_FOUND = 12
+ SOURCE = 10
+ ELEMENT = 11
+ APP = 12
+ STREAM = 13
+ VIRTUAL_FS = 14
+ CAS = 15
+
+
+# BstError is an internal base exception class for BuildSream
+# exceptions.
+#
+# The sole purpose of using the base class is to add additional
+# context to exceptions raised by plugins in child tasks, this
+# context can then be communicated back to the main process.
+#
+class BstError(Exception):
+
+ def __init__(self, message, *, detail=None, domain=None, reason=None, temporary=False):
+ global _last_exception
+
+ super().__init__(message)
+
+ # Additional error detail, these are used to construct detail
+ # portions of the logging messages when encountered.
+ #
+ self.detail = detail
+
+ # A sandbox can be created to debug this error
+ self.sandbox = False
+
+ # When this exception occurred during the handling of a job, indicate
+ # whether or not there is any point retrying the job.
+ #
+ self.temporary = temporary
+
+ # Error domain and reason
+ #
+ self.domain = domain
+ self.reason = reason
+
+ # Hold on to the last raised exception for testing purposes
+ if 'BST_TEST_SUITE' in os.environ:
+ _last_exception = self
+
+
+# PluginError
+#
+# Raised on plugin related errors.
+#
+# This exception is raised either by the plugin loading process,
+# or by the base :class:`.Plugin` element itself.
+#
+class PluginError(BstError):
+ def __init__(self, message, reason=None, temporary=False):
+ super().__init__(message, domain=ErrorDomain.PLUGIN, reason=reason, temporary=False)
+
+
+# LoadErrorReason
+#
+# Describes the reason why a :class:`.LoadError` was raised.
+#
+class LoadErrorReason(Enum):
+
+ # A file was not found.
+ MISSING_FILE = 1
+
+ # The parsed data was not valid YAML.
+ INVALID_YAML = 2
+
+ # Data was malformed, a value was not of the expected type, etc
+ INVALID_DATA = 3
+
+ # An error occurred during YAML dictionary composition.
+ #
+ # This can happen by overriding a value with a new differently typed
+ # value, or by overwriting some named value when that was not allowed.
+ ILLEGAL_COMPOSITE = 4
+
+ # An circular dependency chain was detected
+ CIRCULAR_DEPENDENCY = 5
+
+ # A variable could not be resolved. This can happen if your project
+ # has cyclic dependencies in variable declarations, or, when substituting
+ # a string which refers to an undefined variable.
+ UNRESOLVED_VARIABLE = 6
+
+ # BuildStream does not support the required project format version
+ UNSUPPORTED_PROJECT = 7
+
+ # Project requires a newer version of a plugin than the one which was loaded
+ UNSUPPORTED_PLUGIN = 8
+
+ # A conditional expression failed to resolve
+ EXPRESSION_FAILED = 9
+
+ # An assertion was intentionally encoded into project YAML
+ USER_ASSERTION = 10
+
+ # A list composition directive did not apply to any underlying list
+ TRAILING_LIST_DIRECTIVE = 11
+
+ # Conflicting junctions in subprojects
+ CONFLICTING_JUNCTION = 12
+
+ # Failure to load a project from a specified junction
+ INVALID_JUNCTION = 13
+
+ # Subproject needs to be fetched
+ SUBPROJECT_FETCH_NEEDED = 14
+
+ # Subproject has no ref
+ SUBPROJECT_INCONSISTENT = 15
+
+ # An invalid symbol name was encountered
+ INVALID_SYMBOL_NAME = 16
+
+ # A project.conf file was missing
+ MISSING_PROJECT_CONF = 17
+
+ # Try to load a directory not a yaml file
+ LOADING_DIRECTORY = 18
+
+ # A project path leads outside of the project directory
+ PROJ_PATH_INVALID = 19
+
+ # A project path points to a file of the not right kind (e.g. a
+ # socket)
+ PROJ_PATH_INVALID_KIND = 20
+
+ # A recursive include has been encountered.
+ RECURSIVE_INCLUDE = 21
+
+ # A recursive variable has been encountered
+ RECURSIVE_VARIABLE = 22
+
+ # An attempt so set the value of a protected variable
+ PROTECTED_VARIABLE_REDEFINED = 23
+
+
+# LoadError
+#
+# Raised while loading some YAML.
+#
+# Args:
+# reason (LoadErrorReason): machine readable error reason
+# message (str): human readable error explanation
+#
+# This exception is raised when loading or parsing YAML, or when
+# interpreting project YAML
+#
+class LoadError(BstError):
+ def __init__(self, reason, message, *, detail=None):
+ super().__init__(message, detail=detail, domain=ErrorDomain.LOAD, reason=reason)
+
+
+# ImplError
+#
+# Raised when a :class:`.Source` or :class:`.Element` plugin fails to
+# implement a mandatory method
+#
+class ImplError(BstError):
+ def __init__(self, message, reason=None):
+ super().__init__(message, domain=ErrorDomain.IMPL, reason=reason)
+
+
+# PlatformError
+#
+# Raised if the current platform is not supported.
+class PlatformError(BstError):
+ def __init__(self, message, reason=None):
+ super().__init__(message, domain=ErrorDomain.PLATFORM, reason=reason)
+
+
+# SandboxError
+#
+# Raised when errors are encountered by the sandbox implementation
+#
+class SandboxError(BstError):
+ def __init__(self, message, detail=None, reason=None):
+ super().__init__(message, detail=detail, domain=ErrorDomain.SANDBOX, reason=reason)
+
+
+# SourceCacheError
+#
+# Raised when errors are encountered in the source caches
+#
+class SourceCacheError(BstError):
+ def __init__(self, message, detail=None, reason=None):
+ super().__init__(message, detail=detail, domain=ErrorDomain.SANDBOX, reason=reason)
+
+
+# ArtifactError
+#
+# Raised when errors are encountered in the artifact caches
+#
+class ArtifactError(BstError):
+ def __init__(self, message, *, detail=None, reason=None, temporary=False):
+ super().__init__(message, detail=detail, domain=ErrorDomain.ARTIFACT, reason=reason, temporary=True)
+
+
+# CASError
+#
+# Raised when errors are encountered in the CAS
+#
+class CASError(BstError):
+ def __init__(self, message, *, detail=None, reason=None, temporary=False):
+ super().__init__(message, detail=detail, domain=ErrorDomain.CAS, reason=reason, temporary=True)
+
+
+# CASRemoteError
+#
+# Raised when errors are encountered in the remote CAS
+class CASRemoteError(CASError):
+ pass
+
+
+# CASCacheError
+#
+# Raised when errors are encountered in the local CASCacheError
+#
+class CASCacheError(CASError):
+ pass
+
+
+# PipelineError
+#
+# Raised from pipeline operations
+#
+class PipelineError(BstError):
+
+ def __init__(self, message, *, detail=None, reason=None):
+ super().__init__(message, detail=detail, domain=ErrorDomain.PIPELINE, reason=reason)
+
+
+# StreamError
+#
+# Raised when a stream operation fails
+#
+class StreamError(BstError):
+
+ def __init__(self, message=None, *, detail=None, reason=None, terminated=False):
+
+ # The empty string should never appear to a user,
+ # this only allows us to treat this internal error as
+ # a BstError from the frontend.
+ if message is None:
+ message = ""
+
+ super().__init__(message, detail=detail, domain=ErrorDomain.STREAM, reason=reason)
+
+ self.terminated = terminated
+
+
+# AppError
+#
+# Raised from the frontend App directly
+#
+class AppError(BstError):
+ def __init__(self, message, detail=None, reason=None):
+ super().__init__(message, detail=detail, domain=ErrorDomain.APP, reason=reason)
+
+
+# SkipJob
+#
+# Raised from a child process within a job when the job should be
+# considered skipped by the parent process.
+#
+class SkipJob(Exception):
+ pass
+
+
+# ArtifactElementError
+#
+# Raised when errors are encountered by artifact elements
+#
+class ArtifactElementError(BstError):
+ def __init__(self, message, *, detail=None, reason=None):
+ super().__init__(message, detail=detail, domain=ErrorDomain.ELEMENT, reason=reason)
diff --git a/src/buildstream/_frontend/__init__.py b/src/buildstream/_frontend/__init__.py
new file mode 100644
index 000000000..febd4979d
--- /dev/null
+++ b/src/buildstream/_frontend/__init__.py
@@ -0,0 +1,25 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+import os
+from .cli import cli
+
+if "_BST_COMPLETION" not in os.environ:
+ from .profile import Profile
+ from .status import Status
+ from .widget import LogLine
diff --git a/src/buildstream/_frontend/app.py b/src/buildstream/_frontend/app.py
new file mode 100644
index 000000000..d4ea83871
--- /dev/null
+++ b/src/buildstream/_frontend/app.py
@@ -0,0 +1,870 @@
+#
+# Copyright (C) 2016-2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from contextlib import contextmanager
+import os
+import sys
+import traceback
+import datetime
+from textwrap import TextWrapper
+import click
+from click import UsageError
+
+# Import buildstream public symbols
+from .. import Scope
+
+# Import various buildstream internals
+from .._context import Context
+from .._platform import Platform
+from .._project import Project
+from .._exceptions import BstError, StreamError, LoadError, LoadErrorReason, AppError
+from .._message import Message, MessageType, unconditional_messages
+from .._stream import Stream
+from .._versions import BST_FORMAT_VERSION
+from .. import _yaml
+from .._scheduler import ElementJob, JobStatus
+
+# Import frontend assets
+from .profile import Profile
+from .status import Status
+from .widget import LogLine
+
+# Intendation for all logging
+INDENT = 4
+
+
+# App()
+#
+# Main Application State
+#
+# Args:
+# main_options (dict): The main CLI options of the `bst`
+# command, before any subcommand
+#
+class App():
+
+ def __init__(self, main_options):
+
+ #
+ # Public members
+ #
+ self.context = None # The Context object
+ self.stream = None # The Stream object
+ self.project = None # The toplevel Project object
+ self.logger = None # The LogLine object
+ self.interactive = None # Whether we are running in interactive mode
+ self.colors = None # Whether to use colors in logging
+
+ #
+ # Private members
+ #
+ self._session_start = datetime.datetime.now()
+ self._session_name = None
+ self._main_options = main_options # Main CLI options, before any command
+ self._status = None # The Status object
+ self._fail_messages = {} # Failure messages by unique plugin id
+ self._interactive_failures = None # Whether to handle failures interactively
+ self._started = False # Whether a session has started
+
+ # UI Colors Profiles
+ self._content_profile = Profile(fg='yellow')
+ self._format_profile = Profile(fg='cyan', dim=True)
+ self._success_profile = Profile(fg='green')
+ self._error_profile = Profile(fg='red', dim=True)
+ self._detail_profile = Profile(dim=True)
+
+ #
+ # Earily initialization
+ #
+ is_a_tty = sys.stdout.isatty() and sys.stderr.isatty()
+
+ # Enable interactive mode if we're attached to a tty
+ if main_options['no_interactive']:
+ self.interactive = False
+ else:
+ self.interactive = is_a_tty
+
+ # Handle errors interactively if we're in interactive mode
+ # and --on-error was not specified on the command line
+ if main_options.get('on_error') is not None:
+ self._interactive_failures = False
+ else:
+ self._interactive_failures = self.interactive
+
+ # Use color output if we're attached to a tty, unless
+ # otherwise specified on the comand line
+ if main_options['colors'] is None:
+ self.colors = is_a_tty
+ elif main_options['colors']:
+ self.colors = True
+ else:
+ self.colors = False
+
+ # create()
+ #
+ # Should be used instead of the regular constructor.
+ #
+ # This will select a platform specific App implementation
+ #
+ # Args:
+ # The same args as the App() constructor
+ #
+ @classmethod
+ def create(cls, *args, **kwargs):
+ if sys.platform.startswith('linux'):
+ # Use an App with linux specific features
+ from .linuxapp import LinuxApp # pylint: disable=cyclic-import
+ return LinuxApp(*args, **kwargs)
+ else:
+ # The base App() class is default
+ return App(*args, **kwargs)
+
+ # initialized()
+ #
+ # Context manager to initialize the application and optionally run a session
+ # within the context manager.
+ #
+ # This context manager will take care of catching errors from within the
+ # context and report them consistently, so the CLI need not take care of
+ # reporting the errors and exiting with a consistent error status.
+ #
+ # Args:
+ # session_name (str): The name of the session, or None for no session
+ #
+ # Note that the except_ argument may have a subtly different meaning depending
+ # on the activity performed on the Pipeline. In normal circumstances the except_
+ # argument excludes elements from the `elements` list. In a build session, the
+ # except_ elements are excluded from the tracking plan.
+ #
+ # If a session_name is provided, we treat the block as a session, and print
+ # the session header and summary, and time the main session from startup time.
+ #
+ @contextmanager
+ def initialized(self, *, session_name=None):
+ directory = self._main_options['directory']
+ config = self._main_options['config']
+
+ self._session_name = session_name
+
+ #
+ # Load the Context
+ #
+ try:
+ self.context = Context(directory)
+ self.context.load(config)
+ except BstError as e:
+ self._error_exit(e, "Error loading user configuration")
+
+ # Override things in the context from our command line options,
+ # the command line when used, trumps the config files.
+ #
+ override_map = {
+ 'strict': '_strict_build_plan',
+ 'debug': 'log_debug',
+ 'verbose': 'log_verbose',
+ 'error_lines': 'log_error_lines',
+ 'message_lines': 'log_message_lines',
+ 'on_error': 'sched_error_action',
+ 'fetchers': 'sched_fetchers',
+ 'builders': 'sched_builders',
+ 'pushers': 'sched_pushers',
+ 'network_retries': 'sched_network_retries',
+ 'pull_buildtrees': 'pull_buildtrees',
+ 'cache_buildtrees': 'cache_buildtrees'
+ }
+ for cli_option, context_attr in override_map.items():
+ option_value = self._main_options.get(cli_option)
+ if option_value is not None:
+ setattr(self.context, context_attr, option_value)
+ try:
+ Platform.get_platform()
+ except BstError as e:
+ self._error_exit(e, "Error instantiating platform")
+
+ # Create the logger right before setting the message handler
+ self.logger = LogLine(self.context,
+ self._content_profile,
+ self._format_profile,
+ self._success_profile,
+ self._error_profile,
+ self._detail_profile,
+ indent=INDENT)
+
+ # Propagate pipeline feedback to the user
+ self.context.set_message_handler(self._message_handler)
+
+ # Preflight the artifact cache after initializing logging,
+ # this can cause messages to be emitted.
+ try:
+ self.context.artifactcache.preflight()
+ except BstError as e:
+ self._error_exit(e, "Error instantiating artifact cache")
+
+ #
+ # Load the Project
+ #
+ try:
+ self.project = Project(directory, self.context, cli_options=self._main_options['option'],
+ default_mirror=self._main_options.get('default_mirror'))
+ except LoadError as e:
+
+ # Help users that are new to BuildStream by suggesting 'init'.
+ # We don't want to slow down users that just made a mistake, so
+ # don't stop them with an offer to create a project for them.
+ if e.reason == LoadErrorReason.MISSING_PROJECT_CONF:
+ click.echo("No project found. You can create a new project like so:", err=True)
+ click.echo("", err=True)
+ click.echo(" bst init", err=True)
+
+ self._error_exit(e, "Error loading project")
+
+ except BstError as e:
+ self._error_exit(e, "Error loading project")
+
+ # Now that we have a logger and message handler,
+ # we can override the global exception hook.
+ sys.excepthook = self._global_exception_handler
+
+ # Create the stream right away, we'll need to pass it around
+ self.stream = Stream(self.context, self.project, self._session_start,
+ session_start_callback=self.session_start_cb,
+ interrupt_callback=self._interrupt_handler,
+ ticker_callback=self._tick,
+ job_start_callback=self._job_started,
+ job_complete_callback=self._job_completed)
+
+ # Create our status printer, only available in interactive
+ self._status = Status(self.context,
+ self._content_profile, self._format_profile,
+ self._success_profile, self._error_profile,
+ self.stream, colors=self.colors)
+
+ # Mark the beginning of the session
+ if session_name:
+ self._message(MessageType.START, session_name)
+
+ # Run the body of the session here, once everything is loaded
+ try:
+ yield
+ except BstError as e:
+
+ # Print a nice summary if this is a session
+ if session_name:
+ elapsed = self.stream.elapsed_time
+
+ if isinstance(e, StreamError) and e.terminated: # pylint: disable=no-member
+ self._message(MessageType.WARN, session_name + ' Terminated', elapsed=elapsed)
+ else:
+ self._message(MessageType.FAIL, session_name, elapsed=elapsed)
+
+ # Notify session failure
+ self._notify("{} failed".format(session_name), e)
+
+ if self._started:
+ self._print_summary()
+
+ # Exit with the error
+ self._error_exit(e)
+ except RecursionError:
+ click.echo("RecursionError: Dependency depth is too large. Maximum recursion depth exceeded.",
+ err=True)
+ sys.exit(-1)
+
+ else:
+ # No exceptions occurred, print session time and summary
+ if session_name:
+ self._message(MessageType.SUCCESS, session_name, elapsed=self.stream.elapsed_time)
+ if self._started:
+ self._print_summary()
+
+ # Notify session success
+ self._notify("{} succeeded".format(session_name), "")
+
+ # init_project()
+ #
+ # Initialize a new BuildStream project, either with the explicitly passed options,
+ # or by starting an interactive session if project_name is not specified and the
+ # application is running in interactive mode.
+ #
+ # Args:
+ # project_name (str): The project name, must be a valid symbol name
+ # format_version (int): The project format version, default is the latest version
+ # element_path (str): The subdirectory to store elements in, default is 'elements'
+ # force (bool): Allow overwriting an existing project.conf
+ #
+ def init_project(self, project_name, format_version=BST_FORMAT_VERSION, element_path='elements', force=False):
+ directory = self._main_options['directory']
+ directory = os.path.abspath(directory)
+ project_path = os.path.join(directory, 'project.conf')
+
+ try:
+ # Abort if the project.conf already exists, unless `--force` was specified in `bst init`
+ if not force and os.path.exists(project_path):
+ raise AppError("A project.conf already exists at: {}".format(project_path),
+ reason='project-exists')
+
+ if project_name:
+ # If project name was specified, user interaction is not desired, just
+ # perform some validation and write the project.conf
+ _yaml.assert_symbol_name(None, project_name, 'project name')
+ self._assert_format_version(format_version)
+ self._assert_element_path(element_path)
+
+ elif not self.interactive:
+ raise AppError("Cannot initialize a new project without specifying the project name",
+ reason='unspecified-project-name')
+ else:
+ # Collect the parameters using an interactive session
+ project_name, format_version, element_path = \
+ self._init_project_interactive(project_name, format_version, element_path)
+
+ # Create the directory if it doesnt exist
+ try:
+ os.makedirs(directory, exist_ok=True)
+ except IOError as e:
+ raise AppError("Error creating project directory {}: {}".format(directory, e)) from e
+
+ # Create the elements sub-directory if it doesnt exist
+ elements_path = os.path.join(directory, element_path)
+ try:
+ os.makedirs(elements_path, exist_ok=True)
+ except IOError as e:
+ raise AppError("Error creating elements sub-directory {}: {}"
+ .format(elements_path, e)) from e
+
+ # Dont use ruamel.yaml here, because it doesnt let
+ # us programatically insert comments or whitespace at
+ # the toplevel.
+ try:
+ with open(project_path, 'w') as f:
+ f.write("# Unique project name\n" +
+ "name: {}\n\n".format(project_name) +
+ "# Required BuildStream format version\n" +
+ "format-version: {}\n\n".format(format_version) +
+ "# Subdirectory where elements are stored\n" +
+ "element-path: {}\n".format(element_path))
+ except IOError as e:
+ raise AppError("Error writing {}: {}".format(project_path, e)) from e
+
+ except BstError as e:
+ self._error_exit(e)
+
+ click.echo("", err=True)
+ click.echo("Created project.conf at: {}".format(project_path), err=True)
+ sys.exit(0)
+
+ # shell_prompt():
+ #
+ # Creates a prompt for a shell environment, using ANSI color codes
+ # if they are available in the execution context.
+ #
+ # Args:
+ # element (Element): The Element object to resolve a prompt for
+ #
+ # Returns:
+ # (str): The formatted prompt to display in the shell
+ #
+ def shell_prompt(self, element):
+ _, key, dim = element._get_display_key()
+ element_name = element._get_full_name()
+
+ if self.colors:
+ prompt = self._format_profile.fmt('[') + \
+ self._content_profile.fmt(key, dim=dim) + \
+ self._format_profile.fmt('@') + \
+ self._content_profile.fmt(element_name) + \
+ self._format_profile.fmt(':') + \
+ self._content_profile.fmt('$PWD') + \
+ self._format_profile.fmt(']$') + ' '
+ else:
+ prompt = '[{}@{}:${{PWD}}]$ '.format(key, element_name)
+
+ return prompt
+
+ # cleanup()
+ #
+ # Cleans up application state
+ #
+ # This is called by Click at exit time
+ #
+ def cleanup(self):
+ if self.stream:
+ self.stream.cleanup()
+
+ ############################################################
+ # Abstract Class Methods #
+ ############################################################
+
+ # notify()
+ #
+ # Notify the user of something which occurred, this
+ # is intended to grab attention from the user.
+ #
+ # This is guaranteed to only be called in interactive mode
+ #
+ # Args:
+ # title (str): The notification title
+ # text (str): The notification text
+ #
+ def notify(self, title, text):
+ pass
+
+ ############################################################
+ # Local Functions #
+ ############################################################
+
+ # Local function for calling the notify() virtual method
+ #
+ def _notify(self, title, text):
+ if self.interactive:
+ self.notify(str(title), str(text))
+
+ # Local message propagator
+ #
+ def _message(self, message_type, message, **kwargs):
+ args = dict(kwargs)
+ self.context.message(
+ Message(None, message_type, message, **args))
+
+ # Exception handler
+ #
+ def _global_exception_handler(self, etype, value, tb):
+
+ # Print the regular BUG message
+ formatted = "".join(traceback.format_exception(etype, value, tb))
+ self._message(MessageType.BUG, str(value),
+ detail=formatted)
+
+ # If the scheduler has started, try to terminate all jobs gracefully,
+ # otherwise exit immediately.
+ if self.stream.running:
+ self.stream.terminate()
+ else:
+ sys.exit(-1)
+
+ #
+ # Render the status area, conditional on some internal state
+ #
+ def _maybe_render_status(self):
+
+ # If we're suspended or terminating, then dont render the status area
+ if self._status and self.stream and \
+ not (self.stream.suspended or self.stream.terminated):
+ self._status.render()
+
+ #
+ # Handle ^C SIGINT interruptions in the scheduling main loop
+ #
+ def _interrupt_handler(self):
+
+ # Only handle ^C interactively in interactive mode
+ if not self.interactive:
+ self._status.clear()
+ self.stream.terminate()
+ return
+
+ # Here we can give the user some choices, like whether they would
+ # like to continue, abort immediately, or only complete processing of
+ # the currently ongoing tasks. We can also print something more
+ # intelligent, like how many tasks remain to complete overall.
+ with self._interrupted():
+ click.echo("\nUser interrupted with ^C\n" +
+ "\n"
+ "Choose one of the following options:\n" +
+ " (c)ontinue - Continue queueing jobs as much as possible\n" +
+ " (q)uit - Exit after all ongoing jobs complete\n" +
+ " (t)erminate - Terminate any ongoing jobs and exit\n" +
+ "\n" +
+ "Pressing ^C again will terminate jobs and exit\n",
+ err=True)
+
+ try:
+ choice = click.prompt("Choice:",
+ value_proc=_prefix_choice_value_proc(['continue', 'quit', 'terminate']),
+ default='continue', err=True)
+ except click.Abort:
+ # Ensure a newline after automatically printed '^C'
+ click.echo("", err=True)
+ choice = 'terminate'
+
+ if choice == 'terminate':
+ click.echo("\nTerminating all jobs at user request\n", err=True)
+ self.stream.terminate()
+ else:
+ if choice == 'quit':
+ click.echo("\nCompleting ongoing tasks before quitting\n", err=True)
+ self.stream.quit()
+ elif choice == 'continue':
+ click.echo("\nContinuing\n", err=True)
+
+ def _tick(self, elapsed):
+ self._maybe_render_status()
+
+ def _job_started(self, job):
+ self._status.add_job(job)
+ self._maybe_render_status()
+
+ def _job_completed(self, job, status):
+ self._status.remove_job(job)
+ self._maybe_render_status()
+
+ # Dont attempt to handle a failure if the user has already opted to
+ # terminate
+ if status == JobStatus.FAIL and not self.stream.terminated:
+
+ if isinstance(job, ElementJob):
+ element = job.element
+ queue = job.queue
+
+ # Get the last failure message for additional context
+ failure = self._fail_messages.get(element._unique_id)
+
+ # XXX This is dangerous, sometimes we get the job completed *before*
+ # the failure message reaches us ??
+ if not failure:
+ self._status.clear()
+ click.echo("\n\n\nBUG: Message handling out of sync, " +
+ "unable to retrieve failure message for element {}\n\n\n\n\n"
+ .format(element), err=True)
+ else:
+ self._handle_failure(element, queue, failure)
+ else:
+ click.echo("\nTerminating all jobs\n", err=True)
+ self.stream.terminate()
+
+ def _handle_failure(self, element, queue, failure):
+
+ # Handle non interactive mode setting of what to do when a job fails.
+ if not self._interactive_failures:
+
+ if self.context.sched_error_action == 'terminate':
+ self.stream.terminate()
+ elif self.context.sched_error_action == 'quit':
+ self.stream.quit()
+ elif self.context.sched_error_action == 'continue':
+ pass
+ return
+
+ # Interactive mode for element failures
+ with self._interrupted():
+
+ summary = ("\n{} failure on element: {}\n".format(failure.action_name, element.name) +
+ "\n" +
+ "Choose one of the following options:\n" +
+ " (c)ontinue - Continue queueing jobs as much as possible\n" +
+ " (q)uit - Exit after all ongoing jobs complete\n" +
+ " (t)erminate - Terminate any ongoing jobs and exit\n" +
+ " (r)etry - Retry this job\n")
+ if failure.logfile:
+ summary += " (l)og - View the full log file\n"
+ if failure.sandbox:
+ summary += " (s)hell - Drop into a shell in the failed build sandbox\n"
+ summary += "\nPressing ^C will terminate jobs and exit\n"
+
+ choices = ['continue', 'quit', 'terminate', 'retry']
+ if failure.logfile:
+ choices += ['log']
+ if failure.sandbox:
+ choices += ['shell']
+
+ choice = ''
+ while choice not in ['continue', 'quit', 'terminate', 'retry']:
+ click.echo(summary, err=True)
+
+ self._notify("BuildStream failure", "{} on element {}"
+ .format(failure.action_name, element.name))
+
+ try:
+ choice = click.prompt("Choice:", default='continue', err=True,
+ value_proc=_prefix_choice_value_proc(choices))
+ except click.Abort:
+ # Ensure a newline after automatically printed '^C'
+ click.echo("", err=True)
+ choice = 'terminate'
+
+ # Handle choices which you can come back from
+ #
+ if choice == 'shell':
+ click.echo("\nDropping into an interactive shell in the failed build sandbox\n", err=True)
+ try:
+ prompt = self.shell_prompt(element)
+ self.stream.shell(element, Scope.BUILD, prompt, isolate=True, usebuildtree='always')
+ except BstError as e:
+ click.echo("Error while attempting to create interactive shell: {}".format(e), err=True)
+ elif choice == 'log':
+ with open(failure.logfile, 'r') as logfile:
+ content = logfile.read()
+ click.echo_via_pager(content)
+
+ if choice == 'terminate':
+ click.echo("\nTerminating all jobs\n", err=True)
+ self.stream.terminate()
+ else:
+ if choice == 'quit':
+ click.echo("\nCompleting ongoing tasks before quitting\n", err=True)
+ self.stream.quit()
+ elif choice == 'continue':
+ click.echo("\nContinuing with other non failing elements\n", err=True)
+ elif choice == 'retry':
+ click.echo("\nRetrying failed job\n", err=True)
+ queue.failed_elements.remove(element)
+ queue.enqueue([element])
+
+ #
+ # Print the session heading if we've loaded a pipeline and there
+ # is going to be a session
+ #
+ def session_start_cb(self):
+ self._started = True
+ if self._session_name:
+ self.logger.print_heading(self.project,
+ self.stream,
+ log_file=self._main_options['log_file'],
+ styling=self.colors)
+
+ #
+ # Print a summary of the queues
+ #
+ def _print_summary(self):
+ click.echo("", err=True)
+ self.logger.print_summary(self.stream,
+ self._main_options['log_file'],
+ styling=self.colors)
+
+ # _error_exit()
+ #
+ # Exit with an error
+ #
+ # This will print the passed error to stderr and exit the program
+ # with -1 status
+ #
+ # Args:
+ # error (BstError): A BstError exception to print
+ # prefix (str): An optional string to prepend to the error message
+ #
+ def _error_exit(self, error, prefix=None):
+ click.echo("", err=True)
+ main_error = str(error)
+ if prefix is not None:
+ main_error = "{}: {}".format(prefix, main_error)
+
+ click.echo(main_error, err=True)
+ if error.detail:
+ indent = " " * INDENT
+ detail = '\n' + indent + indent.join(error.detail.splitlines(True))
+ click.echo(detail, err=True)
+
+ sys.exit(-1)
+
+ #
+ # Handle messages from the pipeline
+ #
+ def _message_handler(self, message, context):
+
+ # Drop status messages from the UI if not verbose, we'll still see
+ # info messages and status messages will still go to the log files.
+ if not context.log_verbose and message.message_type == MessageType.STATUS:
+ return
+
+ # Hold on to the failure messages
+ if message.message_type in [MessageType.FAIL, MessageType.BUG] and message.unique_id is not None:
+ self._fail_messages[message.unique_id] = message
+
+ # Send to frontend if appropriate
+ if self.context.silent_messages() and (message.message_type not in unconditional_messages):
+ return
+
+ if self._status:
+ self._status.clear()
+
+ text = self.logger.render(message)
+ click.echo(text, color=self.colors, nl=False, err=True)
+
+ # Maybe render the status area
+ self._maybe_render_status()
+
+ # Additionally log to a file
+ if self._main_options['log_file']:
+ click.echo(text, file=self._main_options['log_file'], color=False, nl=False)
+
+ @contextmanager
+ def _interrupted(self):
+ self._status.clear()
+ try:
+ with self.stream.suspend():
+ yield
+ finally:
+ self._maybe_render_status()
+
+ # Some validation routines for project initialization
+ #
+ def _assert_format_version(self, format_version):
+ message = "The version must be supported by this " + \
+ "version of buildstream (0 - {})\n".format(BST_FORMAT_VERSION)
+
+ # Validate that it is an integer
+ try:
+ number = int(format_version)
+ except ValueError as e:
+ raise AppError(message, reason='invalid-format-version') from e
+
+ # Validate that the specified version is supported
+ if number < 0 or number > BST_FORMAT_VERSION:
+ raise AppError(message, reason='invalid-format-version')
+
+ def _assert_element_path(self, element_path):
+ message = "The element path cannot be an absolute path or contain any '..' components\n"
+
+ # Validate the path is not absolute
+ if os.path.isabs(element_path):
+ raise AppError(message, reason='invalid-element-path')
+
+ # Validate that the path does not contain any '..' components
+ path = element_path
+ while path:
+ split = os.path.split(path)
+ path = split[0]
+ basename = split[1]
+ if basename == '..':
+ raise AppError(message, reason='invalid-element-path')
+
+ # _init_project_interactive()
+ #
+ # Collect the user input for an interactive session for App.init_project()
+ #
+ # Args:
+ # project_name (str): The project name, must be a valid symbol name
+ # format_version (int): The project format version, default is the latest version
+ # element_path (str): The subdirectory to store elements in, default is 'elements'
+ #
+ # Returns:
+ # project_name (str): The user selected project name
+ # format_version (int): The user selected format version
+ # element_path (str): The user selected element path
+ #
+ def _init_project_interactive(self, project_name, format_version=BST_FORMAT_VERSION, element_path='elements'):
+
+ def project_name_proc(user_input):
+ try:
+ _yaml.assert_symbol_name(None, user_input, 'project name')
+ except LoadError as e:
+ message = "{}\n\n{}\n".format(e, e.detail)
+ raise UsageError(message) from e
+ return user_input
+
+ def format_version_proc(user_input):
+ try:
+ self._assert_format_version(user_input)
+ except AppError as e:
+ raise UsageError(str(e)) from e
+ return user_input
+
+ def element_path_proc(user_input):
+ try:
+ self._assert_element_path(user_input)
+ except AppError as e:
+ raise UsageError(str(e)) from e
+ return user_input
+
+ w = TextWrapper(initial_indent=' ', subsequent_indent=' ', width=79)
+
+ # Collect project name
+ click.echo("", err=True)
+ click.echo(self._content_profile.fmt("Choose a unique name for your project"), err=True)
+ click.echo(self._format_profile.fmt("-------------------------------------"), err=True)
+ click.echo("", err=True)
+ click.echo(self._detail_profile.fmt(
+ w.fill("The project name is a unique symbol for your project and will be used "
+ "to distinguish your project from others in user preferences, namspaceing "
+ "of your project's artifacts in shared artifact caches, and in any case where "
+ "BuildStream needs to distinguish between multiple projects.")), err=True)
+ click.echo("", err=True)
+ click.echo(self._detail_profile.fmt(
+ w.fill("The project name must contain only alphanumeric characters, "
+ "may not start with a digit, and may contain dashes or underscores.")), err=True)
+ click.echo("", err=True)
+ project_name = click.prompt(self._content_profile.fmt("Project name"),
+ value_proc=project_name_proc, err=True)
+ click.echo("", err=True)
+
+ # Collect format version
+ click.echo(self._content_profile.fmt("Select the minimum required format version for your project"), err=True)
+ click.echo(self._format_profile.fmt("-----------------------------------------------------------"), err=True)
+ click.echo("", err=True)
+ click.echo(self._detail_profile.fmt(
+ w.fill("The format version is used to provide users who build your project "
+ "with a helpful error message in the case that they do not have a recent "
+ "enough version of BuildStream supporting all the features which your "
+ "project might use.")), err=True)
+ click.echo("", err=True)
+ click.echo(self._detail_profile.fmt(
+ w.fill("The lowest version allowed is 0, the currently installed version of BuildStream "
+ "supports up to format version {}.".format(BST_FORMAT_VERSION))), err=True)
+
+ click.echo("", err=True)
+ format_version = click.prompt(self._content_profile.fmt("Format version"),
+ value_proc=format_version_proc,
+ default=format_version, err=True)
+ click.echo("", err=True)
+
+ # Collect element path
+ click.echo(self._content_profile.fmt("Select the element path"), err=True)
+ click.echo(self._format_profile.fmt("-----------------------"), err=True)
+ click.echo("", err=True)
+ click.echo(self._detail_profile.fmt(
+ w.fill("The element path is a project subdirectory where element .bst files are stored "
+ "within your project.")), err=True)
+ click.echo("", err=True)
+ click.echo(self._detail_profile.fmt(
+ w.fill("Elements will be displayed in logs as filenames relative to "
+ "the element path, and similarly, dependencies must be expressed as filenames "
+ "relative to the element path.")), err=True)
+ click.echo("", err=True)
+ element_path = click.prompt(self._content_profile.fmt("Element path"),
+ value_proc=element_path_proc,
+ default=element_path, err=True)
+
+ return (project_name, format_version, element_path)
+
+
+#
+# Return a value processor for partial choice matching.
+# The returned values processor will test the passed value with all the item
+# in the 'choices' list. If the value is a prefix of one of the 'choices'
+# element, the element is returned. If no element or several elements match
+# the same input, a 'click.UsageError' exception is raised with a description
+# of the error.
+#
+# Note that Click expect user input errors to be signaled by raising a
+# 'click.UsageError' exception. That way, Click display an error message and
+# ask for a new input.
+#
+def _prefix_choice_value_proc(choices):
+
+ def value_proc(user_input):
+ remaining_candidate = [choice for choice in choices if choice.startswith(user_input)]
+
+ if not remaining_candidate:
+ raise UsageError("Expected one of {}, got {}".format(choices, user_input))
+ elif len(remaining_candidate) == 1:
+ return remaining_candidate[0]
+ else:
+ raise UsageError("Ambiguous input. '{}' can refer to one of {}".format(user_input, remaining_candidate))
+
+ return value_proc
diff --git a/src/buildstream/_frontend/cli.py b/src/buildstream/_frontend/cli.py
new file mode 100644
index 000000000..cc8cd5e54
--- /dev/null
+++ b/src/buildstream/_frontend/cli.py
@@ -0,0 +1,1277 @@
+import os
+import sys
+from contextlib import ExitStack
+from functools import partial
+from tempfile import TemporaryDirectory
+
+import click
+from .. import _yaml
+from .._exceptions import BstError, LoadError, AppError
+from .._versions import BST_FORMAT_VERSION
+from .complete import main_bashcomplete, complete_path, CompleteUnhandled
+
+
+##################################################################
+# Override of click's main entry point #
+##################################################################
+
+# search_command()
+#
+# Helper function to get a command and context object
+# for a given command.
+#
+# Args:
+# commands (list): A list of command words following `bst` invocation
+# context (click.Context): An existing toplevel context, or None
+#
+# Returns:
+# context (click.Context): The context of the associated command, or None
+#
+def search_command(args, *, context=None):
+ if context is None:
+ context = cli.make_context('bst', args, resilient_parsing=True)
+
+ # Loop into the deepest command
+ command = cli
+ command_ctx = context
+ for cmd in args:
+ command = command_ctx.command.get_command(command_ctx, cmd)
+ if command is None:
+ return None
+ command_ctx = command.make_context(command.name, [command.name],
+ parent=command_ctx,
+ resilient_parsing=True)
+
+ return command_ctx
+
+
+# Completion for completing command names as help arguments
+def complete_commands(cmd, args, incomplete):
+ command_ctx = search_command(args[1:])
+ if command_ctx and command_ctx.command and isinstance(command_ctx.command, click.MultiCommand):
+ return [subcommand + " " for subcommand in command_ctx.command.list_commands(command_ctx)
+ if not command_ctx.command.get_command(command_ctx, subcommand).hidden]
+
+ return []
+
+
+# Special completion for completing the bst elements in a project dir
+def complete_target(args, incomplete):
+ """
+ :param args: full list of args typed before the incomplete arg
+ :param incomplete: the incomplete text to autocomplete
+ :return: all the possible user-specified completions for the param
+ """
+
+ from .. import utils
+ project_conf = 'project.conf'
+
+ # First resolve the directory, in case there is an
+ # active --directory/-C option
+ #
+ base_directory = '.'
+ idx = -1
+ try:
+ idx = args.index('-C')
+ except ValueError:
+ try:
+ idx = args.index('--directory')
+ except ValueError:
+ pass
+
+ if idx >= 0 and len(args) > idx + 1:
+ base_directory = args[idx + 1]
+ else:
+ # Check if this directory or any of its parent directories
+ # contain a project config file
+ base_directory, _ = utils._search_upward_for_files(base_directory, [project_conf])
+
+ if base_directory is None:
+ # No project_conf was found in base_directory or its parents, no need
+ # to try loading any project conf and avoid os.path NoneType TypeError.
+ return []
+ else:
+ project_file = os.path.join(base_directory, project_conf)
+ try:
+ project = _yaml.load(project_file)
+ except LoadError:
+ # If there is no project conf in context, just dont
+ # even bother trying to complete anything.
+ return []
+
+ # The project is not required to have an element-path
+ element_directory = _yaml.node_get(project, str, 'element-path', default_value='')
+
+ # If a project was loaded, use its element-path to
+ # adjust our completion's base directory
+ if element_directory:
+ base_directory = os.path.join(base_directory, element_directory)
+
+ complete_list = []
+ for p in complete_path("File", incomplete, base_directory=base_directory):
+ if p.endswith(".bst ") or p.endswith("/"):
+ complete_list.append(p)
+ return complete_list
+
+
+def complete_artifact(orig_args, args, incomplete):
+ from .._context import Context
+ ctx = Context()
+
+ config = None
+ if orig_args:
+ for i, arg in enumerate(orig_args):
+ if arg in ('-c', '--config'):
+ try:
+ config = orig_args[i + 1]
+ except IndexError:
+ pass
+ if args:
+ for i, arg in enumerate(args):
+ if arg in ('-c', '--config'):
+ try:
+ config = args[i + 1]
+ except IndexError:
+ pass
+ ctx.load(config)
+
+ # element targets are valid artifact names
+ complete_list = complete_target(args, incomplete)
+ complete_list.extend(ref for ref in ctx.artifactcache.list_artifacts() if ref.startswith(incomplete))
+
+ return complete_list
+
+
+def override_completions(orig_args, cmd, cmd_param, args, incomplete):
+ """
+ :param orig_args: original, non-completion args
+ :param cmd_param: command definition
+ :param args: full list of args typed before the incomplete arg
+ :param incomplete: the incomplete text to autocomplete
+ :return: all the possible user-specified completions for the param
+ """
+
+ if cmd.name == 'help':
+ return complete_commands(cmd, args, incomplete)
+
+ # We can't easily extend click's data structures without
+ # modifying click itself, so just do some weak special casing
+ # right here and select which parameters we want to handle specially.
+ if isinstance(cmd_param.type, click.Path):
+ if (cmd_param.name == 'elements' or
+ cmd_param.name == 'element' or
+ cmd_param.name == 'except_' or
+ cmd_param.opts == ['--track'] or
+ cmd_param.opts == ['--track-except']):
+ return complete_target(args, incomplete)
+ if cmd_param.name == 'artifacts':
+ return complete_artifact(orig_args, args, incomplete)
+
+ raise CompleteUnhandled()
+
+
+def override_main(self, args=None, prog_name=None, complete_var=None,
+ standalone_mode=True, **extra):
+
+ # Hook for the Bash completion. This only activates if the Bash
+ # completion is actually enabled, otherwise this is quite a fast
+ # noop.
+ if main_bashcomplete(self, prog_name, partial(override_completions, args)):
+
+ # If we're running tests we cant just go calling exit()
+ # from the main process.
+ #
+ # The below is a quicker exit path for the sake
+ # of making completions respond faster.
+ if 'BST_TEST_SUITE' not in os.environ:
+ sys.stdout.flush()
+ sys.stderr.flush()
+ os._exit(0)
+
+ # Regular client return for test cases
+ return
+
+ original_main(self, args=args, prog_name=prog_name, complete_var=None,
+ standalone_mode=standalone_mode, **extra)
+
+
+original_main = click.BaseCommand.main
+click.BaseCommand.main = override_main
+
+
+##################################################################
+# Main Options #
+##################################################################
+def print_version(ctx, param, value):
+ if not value or ctx.resilient_parsing:
+ return
+
+ from .. import __version__
+ click.echo(__version__)
+ ctx.exit()
+
+
+@click.group(context_settings=dict(help_option_names=['-h', '--help']))
+@click.option('--version', is_flag=True, callback=print_version,
+ expose_value=False, is_eager=True)
+@click.option('--config', '-c',
+ type=click.Path(exists=True, dir_okay=False, readable=True),
+ help="Configuration file to use")
+@click.option('--directory', '-C', default=os.getcwd(),
+ type=click.Path(file_okay=False, readable=True),
+ help="Project directory (default: current directory)")
+@click.option('--on-error', default=None,
+ type=click.Choice(['continue', 'quit', 'terminate']),
+ help="What to do when an error is encountered")
+@click.option('--fetchers', type=click.INT, default=None,
+ help="Maximum simultaneous download tasks")
+@click.option('--builders', type=click.INT, default=None,
+ help="Maximum simultaneous build tasks")
+@click.option('--pushers', type=click.INT, default=None,
+ help="Maximum simultaneous upload tasks")
+@click.option('--network-retries', type=click.INT, default=None,
+ help="Maximum retries for network tasks")
+@click.option('--no-interactive', is_flag=True, default=False,
+ help="Force non interactive mode, otherwise this is automatically decided")
+@click.option('--verbose/--no-verbose', default=None,
+ help="Be extra verbose")
+@click.option('--debug/--no-debug', default=None,
+ help="Print debugging output")
+@click.option('--error-lines', type=click.INT, default=None,
+ help="Maximum number of lines to show from a task log")
+@click.option('--message-lines', type=click.INT, default=None,
+ help="Maximum number of lines to show in a detailed message")
+@click.option('--log-file',
+ type=click.File(mode='w', encoding='UTF-8'),
+ help="A file to store the main log (allows storing the main log while in interactive mode)")
+@click.option('--colors/--no-colors', default=None,
+ help="Force enable/disable ANSI color codes in output")
+@click.option('--strict/--no-strict', default=None, is_flag=True,
+ help="Elements must be rebuilt when their dependencies have changed")
+@click.option('--option', '-o', type=click.Tuple([str, str]), multiple=True, metavar='OPTION VALUE',
+ help="Specify a project option")
+@click.option('--default-mirror', default=None,
+ help="The mirror to fetch from first, before attempting other mirrors")
+@click.option('--pull-buildtrees', is_flag=True, default=None,
+ help="Include an element's build tree when pulling remote element artifacts")
+@click.option('--cache-buildtrees', default=None,
+ type=click.Choice(['always', 'auto', 'never']),
+ help="Cache artifact build tree content on creation")
+@click.pass_context
+def cli(context, **kwargs):
+ """Build and manipulate BuildStream projects
+
+ Most of the main options override options in the
+ user preferences configuration file.
+ """
+
+ from .app import App
+
+ # Create the App, giving it the main arguments
+ context.obj = App.create(dict(kwargs))
+ context.call_on_close(context.obj.cleanup)
+
+
+##################################################################
+# Help Command #
+##################################################################
+@cli.command(name="help", short_help="Print usage information",
+ context_settings={"help_option_names": []})
+@click.argument("command", nargs=-1, metavar='COMMAND')
+@click.pass_context
+def help_command(ctx, command):
+ """Print usage information about a given command
+ """
+ command_ctx = search_command(command, context=ctx.parent)
+ if not command_ctx:
+ click.echo("Not a valid command: '{} {}'"
+ .format(ctx.parent.info_name, " ".join(command)), err=True)
+ sys.exit(-1)
+
+ click.echo(command_ctx.command.get_help(command_ctx), err=True)
+
+ # Hint about available sub commands
+ if isinstance(command_ctx.command, click.MultiCommand):
+ detail = " "
+ if command:
+ detail = " {} ".format(" ".join(command))
+ click.echo("\nFor usage on a specific command: {} help{}COMMAND"
+ .format(ctx.parent.info_name, detail), err=True)
+
+
+##################################################################
+# Init Command #
+##################################################################
+@cli.command(short_help="Initialize a new BuildStream project")
+@click.option('--project-name', type=click.STRING,
+ help="The project name to use")
+@click.option('--format-version', type=click.INT, default=BST_FORMAT_VERSION,
+ help="The required format version (default: {})".format(BST_FORMAT_VERSION))
+@click.option('--element-path', type=click.Path(), default="elements",
+ help="The subdirectory to store elements in (default: elements)")
+@click.option('--force', '-f', default=False, is_flag=True,
+ help="Allow overwriting an existing project.conf")
+@click.pass_obj
+def init(app, project_name, format_version, element_path, force):
+ """Initialize a new BuildStream project
+
+ Creates a new BuildStream project.conf in the project
+ directory.
+
+ Unless `--project-name` is specified, this will be an
+ interactive session.
+ """
+ app.init_project(project_name, format_version, element_path, force)
+
+
+##################################################################
+# Build Command #
+##################################################################
+@cli.command(short_help="Build elements in a pipeline")
+@click.option('--all', 'all_', default=False, is_flag=True,
+ help="Build elements that would not be needed for the current build plan")
+@click.option('--track', 'track_', multiple=True,
+ type=click.Path(readable=False),
+ help="Specify elements to track during the build. Can be used "
+ "repeatedly to specify multiple elements")
+@click.option('--track-all', default=False, is_flag=True,
+ help="Track all elements in the pipeline")
+@click.option('--track-except', multiple=True,
+ type=click.Path(readable=False),
+ help="Except certain dependencies from tracking")
+@click.option('--track-cross-junctions', '-J', default=False, is_flag=True,
+ help="Allow tracking to cross junction boundaries")
+@click.option('--track-save', default=False, is_flag=True,
+ help="Deprecated: This is ignored")
+@click.option('--remote', '-r', default=None,
+ help="The URL of the remote cache (defaults to the first configured cache)")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def build(app, elements, all_, track_, track_save, track_all, track_except, track_cross_junctions, remote):
+ """Build elements in a pipeline
+
+ Specifying no elements will result in building the default targets
+ of the project. If no default targets are configured, all project
+ elements will be built.
+
+ When this command is executed from a workspace directory, the default
+ is to build the workspace element.
+ """
+
+ if (track_except or track_cross_junctions) and not (track_ or track_all):
+ click.echo("ERROR: The --track-except and --track-cross-junctions options "
+ "can only be used with --track or --track-all", err=True)
+ sys.exit(-1)
+
+ if track_save:
+ click.echo("WARNING: --track-save is deprecated, saving is now unconditional", err=True)
+
+ with app.initialized(session_name="Build"):
+ ignore_junction_targets = False
+
+ if not elements:
+ elements = app.project.get_default_targets()
+ # Junction elements cannot be built, exclude them from default targets
+ ignore_junction_targets = True
+
+ if track_all:
+ track_ = elements
+
+ app.stream.build(elements,
+ track_targets=track_,
+ track_except=track_except,
+ track_cross_junctions=track_cross_junctions,
+ ignore_junction_targets=ignore_junction_targets,
+ build_all=all_,
+ remote=remote)
+
+
+##################################################################
+# Show Command #
+##################################################################
+@cli.command(short_help="Show elements in the pipeline")
+@click.option('--except', 'except_', multiple=True,
+ type=click.Path(readable=False),
+ help="Except certain dependencies")
+@click.option('--deps', '-d', default='all',
+ type=click.Choice(['none', 'plan', 'run', 'build', 'all']),
+ help='The dependencies to show (default: all)')
+@click.option('--order', default="stage",
+ type=click.Choice(['stage', 'alpha']),
+ help='Staging or alphabetic ordering of dependencies')
+@click.option('--format', '-f', 'format_', metavar='FORMAT', default=None,
+ type=click.STRING,
+ help='Format string for each element')
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def show(app, elements, deps, except_, order, format_):
+ """Show elements in the pipeline
+
+ Specifying no elements will result in showing the default targets
+ of the project. If no default targets are configured, all project
+ elements will be shown.
+
+ When this command is executed from a workspace directory, the default
+ is to show the workspace element.
+
+ By default this will show all of the dependencies of the
+ specified target element.
+
+ Specify `--deps` to control which elements to show:
+
+ \b
+ none: No dependencies, just the element itself
+ plan: Dependencies required for a build plan
+ run: Runtime dependencies, including the element itself
+ build: Build time dependencies, excluding the element itself
+ all: All dependencies
+
+ \b
+ FORMAT
+ ~~~~~~
+ The --format option controls what should be printed for each element,
+ the following symbols can be used in the format string:
+
+ \b
+ %{name} The element name
+ %{key} The abbreviated cache key (if all sources are consistent)
+ %{full-key} The full cache key (if all sources are consistent)
+ %{state} cached, buildable, waiting or inconsistent
+ %{config} The element configuration
+ %{vars} Variable configuration
+ %{env} Environment settings
+ %{public} Public domain data
+ %{workspaced} If the element is workspaced
+ %{workspace-dirs} A list of workspace directories
+ %{deps} A list of all dependencies
+ %{build-deps} A list of build dependencies
+ %{runtime-deps} A list of runtime dependencies
+
+ The value of the %{symbol} without the leading '%' character is understood
+ as a pythonic formatting string, so python formatting features apply,
+ examle:
+
+ \b
+ bst show target.bst --format \\
+ 'Name: %{name: ^20} Key: %{key: ^8} State: %{state}'
+
+ If you want to use a newline in a format string in bash, use the '$' modifier:
+
+ \b
+ bst show target.bst --format \\
+ $'---------- %{name} ----------\\n%{vars}'
+ """
+ with app.initialized():
+ # Do not require artifact directory trees or file contents to be present for `bst show`
+ app.context.set_artifact_directories_optional()
+
+ if not elements:
+ elements = app.project.get_default_targets()
+
+ dependencies = app.stream.load_selection(elements,
+ selection=deps,
+ except_targets=except_)
+
+ if order == "alpha":
+ dependencies = sorted(dependencies)
+
+ if not format_:
+ format_ = app.context.log_element_format
+
+ report = app.logger.show_pipeline(dependencies, format_)
+ click.echo(report, color=app.colors)
+
+
+##################################################################
+# Shell Command #
+##################################################################
+@cli.command(short_help="Shell into an element's sandbox environment")
+@click.option('--build', '-b', 'build_', is_flag=True, default=False,
+ help='Stage dependencies and sources to build')
+@click.option('--sysroot', '-s', default=None,
+ type=click.Path(exists=True, file_okay=False, readable=True),
+ help="An existing sysroot")
+@click.option('--mount', type=click.Tuple([click.Path(exists=True), str]), multiple=True,
+ metavar='HOSTPATH PATH',
+ help="Mount a file or directory into the sandbox")
+@click.option('--isolate', is_flag=True, default=False,
+ help='Create an isolated build sandbox')
+@click.option('--use-buildtree', '-t', 'cli_buildtree', type=click.Choice(['ask', 'try', 'always', 'never']),
+ default='ask',
+ help='Defaults to ask but if set to always the function will fail if a build tree is not available')
+@click.argument('element', required=False,
+ type=click.Path(readable=False))
+@click.argument('command', type=click.STRING, nargs=-1)
+@click.pass_obj
+def shell(app, element, sysroot, mount, isolate, build_, cli_buildtree, command):
+ """Run a command in the target element's sandbox environment
+
+ When this command is executed from a workspace directory, the default
+ is to shell into the workspace element.
+
+ This will stage a temporary sysroot for running the target
+ element, assuming it has already been built and all required
+ artifacts are in the local cache.
+
+ Use '--' to separate a command from the options to bst,
+ otherwise bst may respond to them instead. e.g.
+
+ \b
+ bst shell example.bst -- df -h
+
+ Use the --build option to create a temporary sysroot for
+ building the element instead.
+
+ Use the --sysroot option with an existing failed build
+ directory or with a checkout of the given target, in order
+ to use a specific sysroot.
+
+ If no COMMAND is specified, the default is to attempt
+ to run an interactive shell.
+ """
+ from ..element import Scope
+ from .._project import HostMount
+ from .._pipeline import PipelineSelection
+
+ if build_:
+ scope = Scope.BUILD
+ else:
+ scope = Scope.RUN
+
+ use_buildtree = None
+
+ with app.initialized():
+ if not element:
+ element = app.project.get_default_target()
+ if not element:
+ raise AppError('Missing argument "ELEMENT".')
+
+ dependencies = app.stream.load_selection((element,), selection=PipelineSelection.NONE,
+ use_artifact_config=True)
+ element = dependencies[0]
+ prompt = app.shell_prompt(element)
+ mounts = [
+ HostMount(path, host_path)
+ for host_path, path in mount
+ ]
+
+ cached = element._cached_buildtree()
+ buildtree_exists = element._buildtree_exists()
+ if cli_buildtree in ("always", "try"):
+ use_buildtree = cli_buildtree
+ if not cached and buildtree_exists and use_buildtree == "always":
+ click.echo("WARNING: buildtree is not cached locally, will attempt to pull from available remotes",
+ err=True)
+ else:
+ # If the value has defaulted to ask and in non interactive mode, don't consider the buildtree, this
+ # being the default behaviour of the command
+ if app.interactive and cli_buildtree == "ask":
+ if cached and bool(click.confirm('Do you want to use the cached buildtree?')):
+ use_buildtree = "always"
+ elif buildtree_exists:
+ try:
+ choice = click.prompt("Do you want to pull & use a cached buildtree?",
+ type=click.Choice(['try', 'always', 'never']),
+ err=True, show_choices=True)
+ except click.Abort:
+ click.echo('Aborting', err=True)
+ sys.exit(-1)
+
+ if choice != "never":
+ use_buildtree = choice
+
+ # Raise warning if the element is cached in a failed state
+ if use_buildtree and element._cached_failure():
+ click.echo("WARNING: using a buildtree from a failed build.", err=True)
+
+ try:
+ exitcode = app.stream.shell(element, scope, prompt,
+ directory=sysroot,
+ mounts=mounts,
+ isolate=isolate,
+ command=command,
+ usebuildtree=use_buildtree)
+ except BstError as e:
+ raise AppError("Error launching shell: {}".format(e), detail=e.detail) from e
+
+ # If there were no errors, we return the shell's exit code here.
+ sys.exit(exitcode)
+
+
+##################################################################
+# Source Command #
+##################################################################
+@cli.group(short_help="Manipulate sources for an element")
+def source():
+ """Manipulate sources for an element"""
+
+
+##################################################################
+# Source Fetch Command #
+##################################################################
+@source.command(name="fetch", short_help="Fetch sources in a pipeline")
+@click.option('--except', 'except_', multiple=True,
+ type=click.Path(readable=False),
+ help="Except certain dependencies from fetching")
+@click.option('--deps', '-d', default='plan',
+ type=click.Choice(['none', 'plan', 'all']),
+ help='The dependencies to fetch (default: plan)')
+@click.option('--track', 'track_', default=False, is_flag=True,
+ help="Track new source references before fetching")
+@click.option('--track-cross-junctions', '-J', default=False, is_flag=True,
+ help="Allow tracking to cross junction boundaries")
+@click.option('--remote', '-r', default=None,
+ help="The URL of the remote source cache (defaults to the first configured cache)")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def source_fetch(app, elements, deps, track_, except_, track_cross_junctions, remote):
+ """Fetch sources required to build the pipeline
+
+ Specifying no elements will result in fetching the default targets
+ of the project. If no default targets are configured, all project
+ elements will be fetched.
+
+ When this command is executed from a workspace directory, the default
+ is to fetch the workspace element.
+
+ By default this will only try to fetch sources which are
+ required for the build plan of the specified target element,
+ omitting sources for any elements which are already built
+ and available in the artifact cache.
+
+ Specify `--deps` to control which sources to fetch:
+
+ \b
+ none: No dependencies, just the element itself
+ plan: Only dependencies required for the build plan
+ all: All dependencies
+ """
+ from .._pipeline import PipelineSelection
+
+ if track_cross_junctions and not track_:
+ click.echo("ERROR: The --track-cross-junctions option can only be used with --track", err=True)
+ sys.exit(-1)
+
+ if track_ and deps == PipelineSelection.PLAN:
+ click.echo("WARNING: --track specified for tracking of a build plan\n\n"
+ "Since tracking modifies the build plan, all elements will be tracked.", err=True)
+ deps = PipelineSelection.ALL
+
+ with app.initialized(session_name="Fetch"):
+ if not elements:
+ elements = app.project.get_default_targets()
+
+ app.stream.fetch(elements,
+ selection=deps,
+ except_targets=except_,
+ track_targets=track_,
+ track_cross_junctions=track_cross_junctions,
+ remote=remote)
+
+
+##################################################################
+# Source Track Command #
+##################################################################
+@source.command(name="track", short_help="Track new source references")
+@click.option('--except', 'except_', multiple=True,
+ type=click.Path(readable=False),
+ help="Except certain dependencies from tracking")
+@click.option('--deps', '-d', default='none',
+ type=click.Choice(['none', 'all']),
+ help='The dependencies to track (default: none)')
+@click.option('--cross-junctions', '-J', default=False, is_flag=True,
+ help="Allow crossing junction boundaries")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def source_track(app, elements, deps, except_, cross_junctions):
+ """Consults the specified tracking branches for new versions available
+ to build and updates the project with any newly available references.
+
+ Specifying no elements will result in tracking the default targets
+ of the project. If no default targets are configured, all project
+ elements will be tracked.
+
+ When this command is executed from a workspace directory, the default
+ is to track the workspace element.
+
+ If no default is declared, all elements in the project will be tracked
+
+ By default this will track just the specified element, but you can also
+ update a whole tree of dependencies in one go.
+
+ Specify `--deps` to control which sources to track:
+
+ \b
+ none: No dependencies, just the specified elements
+ all: All dependencies of all specified elements
+ """
+ with app.initialized(session_name="Track"):
+ if not elements:
+ elements = app.project.get_default_targets()
+
+ # Substitute 'none' for 'redirect' so that element redirections
+ # will be done
+ if deps == 'none':
+ deps = 'redirect'
+ app.stream.track(elements,
+ selection=deps,
+ except_targets=except_,
+ cross_junctions=cross_junctions)
+
+
+##################################################################
+# Source Checkout Command #
+##################################################################
+@source.command(name='checkout', short_help='Checkout sources for an element')
+@click.option('--force', '-f', default=False, is_flag=True,
+ help="Allow files to be overwritten")
+@click.option('--except', 'except_', multiple=True,
+ type=click.Path(readable=False),
+ help="Except certain dependencies")
+@click.option('--deps', '-d', default='none',
+ type=click.Choice(['build', 'none', 'run', 'all']),
+ help='The dependencies whose sources to checkout (default: none)')
+@click.option('--fetch', 'fetch_', default=False, is_flag=True,
+ help='Fetch elements if they are not fetched')
+@click.option('--tar', 'tar', default=False, is_flag=True,
+ help='Create a tarball from the element\'s sources instead of a '
+ 'file tree.')
+@click.option('--include-build-scripts', 'build_scripts', is_flag=True)
+@click.argument('element', required=False, type=click.Path(readable=False))
+@click.argument('location', type=click.Path(), required=False)
+@click.pass_obj
+def source_checkout(app, element, location, force, deps, fetch_, except_,
+ tar, build_scripts):
+ """Checkout sources of an element to the specified location
+
+ When this command is executed from a workspace directory, the default
+ is to checkout the sources of the workspace element.
+ """
+ if not element and not location:
+ click.echo("ERROR: LOCATION is not specified", err=True)
+ sys.exit(-1)
+
+ if element and not location:
+ # Nasty hack to get around click's optional args
+ location = element
+ element = None
+
+ with app.initialized():
+ if not element:
+ element = app.project.get_default_target()
+ if not element:
+ raise AppError('Missing argument "ELEMENT".')
+
+ app.stream.source_checkout(element,
+ location=location,
+ force=force,
+ deps=deps,
+ fetch=fetch_,
+ except_targets=except_,
+ tar=tar,
+ include_build_scripts=build_scripts)
+
+
+##################################################################
+# Workspace Command #
+##################################################################
+@cli.group(short_help="Manipulate developer workspaces")
+def workspace():
+ """Manipulate developer workspaces"""
+
+
+##################################################################
+# Workspace Open Command #
+##################################################################
+@workspace.command(name='open', short_help="Open a new workspace")
+@click.option('--no-checkout', default=False, is_flag=True,
+ help="Do not checkout the source, only link to the given directory")
+@click.option('--force', '-f', default=False, is_flag=True,
+ help="The workspace will be created even if the directory in which it will be created is not empty " +
+ "or if a workspace for that element already exists")
+@click.option('--track', 'track_', default=False, is_flag=True,
+ help="Track and fetch new source references before checking out the workspace")
+@click.option('--directory', type=click.Path(file_okay=False), default=None,
+ help="Only for use when a single Element is given: Set the directory to use to create the workspace")
+@click.argument('elements', nargs=-1, type=click.Path(readable=False), required=True)
+@click.pass_obj
+def workspace_open(app, no_checkout, force, track_, directory, elements):
+ """Open a workspace for manual source modification"""
+
+ with app.initialized():
+ app.stream.workspace_open(elements,
+ no_checkout=no_checkout,
+ track_first=track_,
+ force=force,
+ custom_dir=directory)
+
+
+##################################################################
+# Workspace Close Command #
+##################################################################
+@workspace.command(name='close', short_help="Close workspaces")
+@click.option('--remove-dir', default=False, is_flag=True,
+ help="Remove the path that contains the closed workspace")
+@click.option('--all', '-a', 'all_', default=False, is_flag=True,
+ help="Close all open workspaces")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def workspace_close(app, remove_dir, all_, elements):
+ """Close a workspace"""
+
+ removed_required_element = False
+
+ with app.initialized():
+ if not (all_ or elements):
+ # NOTE: I may need to revisit this when implementing multiple projects
+ # opening one workspace.
+ element = app.project.get_default_target()
+ if element:
+ elements = (element,)
+ else:
+ raise AppError('No elements specified')
+
+ # Early exit if we specified `all` and there are no workspaces
+ if all_ and not app.stream.workspace_exists():
+ click.echo('No open workspaces to close', err=True)
+ sys.exit(0)
+
+ if all_:
+ elements = [element_name for element_name, _ in app.context.get_workspaces().list()]
+
+ elements = app.stream.redirect_element_names(elements)
+
+ # Check that the workspaces in question exist, and that it's safe to
+ # remove them.
+ nonexisting = []
+ for element_name in elements:
+ if not app.stream.workspace_exists(element_name):
+ nonexisting.append(element_name)
+ if nonexisting:
+ raise AppError("Workspace does not exist", detail="\n".join(nonexisting))
+
+ for element_name in elements:
+ app.stream.workspace_close(element_name, remove_dir=remove_dir)
+ if app.stream.workspace_is_required(element_name):
+ removed_required_element = True
+
+ # This message is echo'd last, as it's most relevant to the next
+ # thing the user will type.
+ if removed_required_element:
+ click.echo(
+ "Removed '{}', therefore you can no longer run BuildStream "
+ "commands from the current directory.".format(element_name), err=True)
+
+
+##################################################################
+# Workspace Reset Command #
+##################################################################
+@workspace.command(name='reset', short_help="Reset a workspace to its original state")
+@click.option('--soft', default=False, is_flag=True,
+ help="Reset workspace state without affecting its contents")
+@click.option('--track', 'track_', default=False, is_flag=True,
+ help="Track and fetch the latest source before resetting")
+@click.option('--all', '-a', 'all_', default=False, is_flag=True,
+ help="Reset all open workspaces")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def workspace_reset(app, soft, track_, all_, elements):
+ """Reset a workspace to its original state"""
+
+ # Check that the workspaces in question exist
+ with app.initialized():
+
+ if not (all_ or elements):
+ element = app.project.get_default_target()
+ if element:
+ elements = (element,)
+ else:
+ raise AppError('No elements specified to reset')
+
+ if all_ and not app.stream.workspace_exists():
+ raise AppError("No open workspaces to reset")
+
+ if all_:
+ elements = tuple(element_name for element_name, _ in app.context.get_workspaces().list())
+
+ app.stream.workspace_reset(elements, soft=soft, track_first=track_)
+
+
+##################################################################
+# Workspace List Command #
+##################################################################
+@workspace.command(name='list', short_help="List open workspaces")
+@click.pass_obj
+def workspace_list(app):
+ """List open workspaces"""
+
+ with app.initialized():
+ app.stream.workspace_list()
+
+
+#############################################################
+# Artifact Commands #
+#############################################################
+@cli.group(short_help="Manipulate cached artifacts")
+def artifact():
+ """Manipulate cached artifacts"""
+
+
+#####################################################################
+# Artifact Checkout Command #
+#####################################################################
+@artifact.command(name='checkout', short_help="Checkout contents of an artifact")
+@click.option('--force', '-f', default=False, is_flag=True,
+ help="Allow files to be overwritten")
+@click.option('--deps', '-d', default=None,
+ type=click.Choice(['run', 'build', 'none']),
+ help='The dependencies to checkout (default: run)')
+@click.option('--integrate/--no-integrate', default=None, is_flag=True,
+ help="Whether to run integration commands")
+@click.option('--hardlinks', default=False, is_flag=True,
+ help="Checkout hardlinks instead of copying if possible")
+@click.option('--tar', default=None, metavar='LOCATION',
+ type=click.Path(),
+ help="Create a tarball from the artifact contents instead "
+ "of a file tree. If LOCATION is '-', the tarball "
+ "will be dumped to the standard output.")
+@click.option('--directory', default=None,
+ type=click.Path(file_okay=False),
+ help="The directory to checkout the artifact to")
+@click.argument('element', required=False,
+ type=click.Path(readable=False))
+@click.pass_obj
+def artifact_checkout(app, force, deps, integrate, hardlinks, tar, directory, element):
+ """Checkout contents of an artifact
+
+ When this command is executed from a workspace directory, the default
+ is to checkout the artifact of the workspace element.
+ """
+ from ..element import Scope
+
+ if hardlinks and tar is not None:
+ click.echo("ERROR: options --hardlinks and --tar conflict", err=True)
+ sys.exit(-1)
+
+ if tar is None and directory is None:
+ click.echo("ERROR: One of --directory or --tar must be provided", err=True)
+ sys.exit(-1)
+
+ if tar is not None and directory is not None:
+ click.echo("ERROR: options --directory and --tar conflict", err=True)
+ sys.exit(-1)
+
+ if tar is not None:
+ location = tar
+ tar = True
+ else:
+ location = os.getcwd() if directory is None else directory
+ tar = False
+
+ if deps == "build":
+ scope = Scope.BUILD
+ elif deps == "none":
+ scope = Scope.NONE
+ else:
+ scope = Scope.RUN
+
+ with app.initialized():
+ if not element:
+ element = app.project.get_default_target()
+ if not element:
+ raise AppError('Missing argument "ELEMENT".')
+
+ app.stream.checkout(element,
+ location=location,
+ force=force,
+ scope=scope,
+ integrate=True if integrate is None else integrate,
+ hardlinks=hardlinks,
+ tar=tar)
+
+
+################################################################
+# Artifact Pull Command #
+################################################################
+@artifact.command(name="pull", short_help="Pull a built artifact")
+@click.option('--deps', '-d', default='none',
+ type=click.Choice(['none', 'all']),
+ help='The dependency artifacts to pull (default: none)')
+@click.option('--remote', '-r', default=None,
+ help="The URL of the remote cache (defaults to the first configured cache)")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def artifact_pull(app, elements, deps, remote):
+ """Pull a built artifact from the configured remote artifact cache.
+
+ Specifying no elements will result in pulling the default targets
+ of the project. If no default targets are configured, all project
+ elements will be pulled.
+
+ When this command is executed from a workspace directory, the default
+ is to pull the workspace element.
+
+ By default the artifact will be pulled one of the configured caches
+ if possible, following the usual priority order. If the `--remote` flag
+ is given, only the specified cache will be queried.
+
+ Specify `--deps` to control which artifacts to pull:
+
+ \b
+ none: No dependencies, just the element itself
+ all: All dependencies
+ """
+
+ with app.initialized(session_name="Pull"):
+ ignore_junction_targets = False
+
+ if not elements:
+ elements = app.project.get_default_targets()
+ # Junction elements cannot be pulled, exclude them from default targets
+ ignore_junction_targets = True
+
+ app.stream.pull(elements, selection=deps, remote=remote,
+ ignore_junction_targets=ignore_junction_targets)
+
+
+##################################################################
+# Artifact Push Command #
+##################################################################
+@artifact.command(name="push", short_help="Push a built artifact")
+@click.option('--deps', '-d', default='none',
+ type=click.Choice(['none', 'all']),
+ help='The dependencies to push (default: none)')
+@click.option('--remote', '-r', default=None,
+ help="The URL of the remote cache (defaults to the first configured cache)")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def artifact_push(app, elements, deps, remote):
+ """Push a built artifact to a remote artifact cache.
+
+ Specifying no elements will result in pushing the default targets
+ of the project. If no default targets are configured, all project
+ elements will be pushed.
+
+ When this command is executed from a workspace directory, the default
+ is to push the workspace element.
+
+ The default destination is the highest priority configured cache. You can
+ override this by passing a different cache URL with the `--remote` flag.
+
+ If bst has been configured to include build trees on artifact pulls,
+ an attempt will be made to pull any required build trees to avoid the
+ skipping of partial artifacts being pushed.
+
+ Specify `--deps` to control which artifacts to push:
+
+ \b
+ none: No dependencies, just the element itself
+ all: All dependencies
+ """
+ with app.initialized(session_name="Push"):
+ ignore_junction_targets = False
+
+ if not elements:
+ elements = app.project.get_default_targets()
+ # Junction elements cannot be pushed, exclude them from default targets
+ ignore_junction_targets = True
+
+ app.stream.push(elements, selection=deps, remote=remote,
+ ignore_junction_targets=ignore_junction_targets)
+
+
+################################################################
+# Artifact Log Command #
+################################################################
+@artifact.command(name='log', short_help="Show logs of artifacts")
+@click.argument('artifacts', type=click.Path(), nargs=-1)
+@click.pass_obj
+def artifact_log(app, artifacts):
+ """Show logs of artifacts.
+
+ Note that 'artifacts' can be element references like "hello.bst", and they
+ can also be artifact references. You may use shell-style wildcards for
+ either.
+
+ Here are some examples of element references:
+
+ \b
+ - `hello.bst`
+ - `*.bst`
+
+ Note that element references must end with '.bst' to distinguish them from
+ artifact references. Anything that does not end in '.bst' is an artifact
+ ref.
+
+ Artifact references follow the format `<project_name>/<element>/<key>`.
+ Note that 'element' is without the `.bst` extension.
+
+ Here are some examples of artifact references:
+
+ \b
+ - `myproject/hello/*`
+ - `myproject/*`
+ - `*`
+ - `myproject/hello/827637*`
+ - `myproject/he*/827637*`
+ - `myproject/he??o/827637*`
+ - `m*/h*/8276376b077eda104c812e6ec2f488c7c9eea211ce572c83d734c10bf241209f`
+
+ """
+ # Note that the backticks in the above docstring are important for the
+ # generated docs. When sphinx is generating rst output from the help output
+ # of this command, the asterisks will be interpreted as emphasis tokens if
+ # they are not somehow escaped.
+
+ with app.initialized():
+ logsdirs = app.stream.artifact_log(artifacts)
+
+ with ExitStack() as stack:
+ extractdirs = []
+ for logsdir in logsdirs:
+ # NOTE: If reading the logs feels unresponsive, here would be a good place
+ # to provide progress information.
+ td = stack.enter_context(TemporaryDirectory())
+ logsdir.export_files(td, can_link=True)
+ extractdirs.append(td)
+
+ for extractdir in extractdirs:
+ for log in (os.path.join(extractdir, log) for log in os.listdir(extractdir)):
+ # NOTE: Should click gain the ability to pass files to the pager this can be optimised.
+ with open(log) as f:
+ data = f.read()
+ click.echo_via_pager(data)
+
+
+###################################################################
+# Artifact Delete Command #
+###################################################################
+@artifact.command(name='delete', short_help="Remove artifacts from the local cache")
+@click.option('--no-prune', 'no_prune', default=False, is_flag=True,
+ help="Do not prune the local cache of unreachable refs")
+@click.argument('artifacts', type=click.Path(), nargs=-1)
+@click.pass_obj
+def artifact_delete(app, artifacts, no_prune):
+ """Remove artifacts from the local cache"""
+ with app.initialized():
+ app.stream.artifact_delete(artifacts, no_prune)
+
+
+##################################################################
+# DEPRECATED Commands #
+##################################################################
+
+# XXX: The following commands are now obsolete, but they are kept
+# here along with all the options so that we can provide nice error
+# messages when they are called.
+# Also, note that these commands are hidden from the top-level help.
+
+##################################################################
+# Fetch Command #
+##################################################################
+@cli.command(short_help="COMMAND OBSOLETE - Fetch sources in a pipeline", hidden=True)
+@click.option('--except', 'except_', multiple=True,
+ type=click.Path(readable=False),
+ help="Except certain dependencies from fetching")
+@click.option('--deps', '-d', default='plan',
+ type=click.Choice(['none', 'plan', 'all']),
+ help='The dependencies to fetch (default: plan)')
+@click.option('--track', 'track_', default=False, is_flag=True,
+ help="Track new source references before fetching")
+@click.option('--track-cross-junctions', '-J', default=False, is_flag=True,
+ help="Allow tracking to cross junction boundaries")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def fetch(app, elements, deps, track_, except_, track_cross_junctions):
+ click.echo("This command is now obsolete. Use `bst source fetch` instead.", err=True)
+ sys.exit(1)
+
+
+##################################################################
+# Track Command #
+##################################################################
+@cli.command(short_help="COMMAND OBSOLETE - Track new source references", hidden=True)
+@click.option('--except', 'except_', multiple=True,
+ type=click.Path(readable=False),
+ help="Except certain dependencies from tracking")
+@click.option('--deps', '-d', default='none',
+ type=click.Choice(['none', 'all']),
+ help='The dependencies to track (default: none)')
+@click.option('--cross-junctions', '-J', default=False, is_flag=True,
+ help="Allow crossing junction boundaries")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def track(app, elements, deps, except_, cross_junctions):
+ click.echo("This command is now obsolete. Use `bst source track` instead.", err=True)
+ sys.exit(1)
+
+
+##################################################################
+# Checkout Command #
+##################################################################
+@cli.command(short_help="COMMAND OBSOLETE - Checkout a built artifact", hidden=True)
+@click.option('--force', '-f', default=False, is_flag=True,
+ help="Allow files to be overwritten")
+@click.option('--deps', '-d', default='run',
+ type=click.Choice(['run', 'build', 'none']),
+ help='The dependencies to checkout (default: run)')
+@click.option('--integrate/--no-integrate', default=True, is_flag=True,
+ help="Whether to run integration commands")
+@click.option('--hardlinks', default=False, is_flag=True,
+ help="Checkout hardlinks instead of copies (handle with care)")
+@click.option('--tar', default=False, is_flag=True,
+ help="Create a tarball from the artifact contents instead "
+ "of a file tree. If LOCATION is '-', the tarball "
+ "will be dumped to the standard output.")
+@click.argument('element', required=False,
+ type=click.Path(readable=False))
+@click.argument('location', type=click.Path(), required=False)
+@click.pass_obj
+def checkout(app, element, location, force, deps, integrate, hardlinks, tar):
+ click.echo("This command is now obsolete. Use `bst artifact checkout` instead " +
+ "and use the --directory option to specify LOCATION", err=True)
+ sys.exit(1)
+
+
+################################################################
+# Pull Command #
+################################################################
+@cli.command(short_help="COMMAND OBSOLETE - Pull a built artifact", hidden=True)
+@click.option('--deps', '-d', default='none',
+ type=click.Choice(['none', 'all']),
+ help='The dependency artifacts to pull (default: none)')
+@click.option('--remote', '-r',
+ help="The URL of the remote cache (defaults to the first configured cache)")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def pull(app, elements, deps, remote):
+ click.echo("This command is now obsolete. Use `bst artifact pull` instead.", err=True)
+ sys.exit(1)
+
+
+##################################################################
+# Push Command #
+##################################################################
+@cli.command(short_help="COMMAND OBSOLETE - Push a built artifact", hidden=True)
+@click.option('--deps', '-d', default='none',
+ type=click.Choice(['none', 'all']),
+ help='The dependencies to push (default: none)')
+@click.option('--remote', '-r', default=None,
+ help="The URL of the remote cache (defaults to the first configured cache)")
+@click.argument('elements', nargs=-1,
+ type=click.Path(readable=False))
+@click.pass_obj
+def push(app, elements, deps, remote):
+ click.echo("This command is now obsolete. Use `bst artifact push` instead.", err=True)
+ sys.exit(1)
diff --git a/src/buildstream/_frontend/complete.py b/src/buildstream/_frontend/complete.py
new file mode 100644
index 000000000..bf9324812
--- /dev/null
+++ b/src/buildstream/_frontend/complete.py
@@ -0,0 +1,338 @@
+#
+# Copyright (c) 2014 by Armin Ronacher.
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# This module was forked from the python click library, Included
+# original copyright notice from the Click library and following disclaimer
+# as per their LICENSE requirements.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+import collections.abc
+import copy
+import os
+
+import click
+from click.core import MultiCommand, Option, Argument
+from click.parser import split_arg_string
+
+WORDBREAK = '='
+
+COMPLETION_SCRIPT = '''
+%(complete_func)s() {
+ local IFS=$'\n'
+ COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
+ COMP_CWORD=$COMP_CWORD \\
+ %(autocomplete_var)s=complete $1 ) )
+ return 0
+}
+
+complete -F %(complete_func)s -o nospace %(script_names)s
+'''
+
+
+# An exception for our custom completion handler to
+# indicate that it does not want to handle completion
+# for this parameter
+#
+class CompleteUnhandled(Exception):
+ pass
+
+
+def complete_path(path_type, incomplete, base_directory='.'):
+ """Helper method for implementing the completions() method
+ for File and Path parameter types.
+ """
+
+ # Try listing the files in the relative or absolute path
+ # specified in `incomplete` minus the last path component,
+ # otherwise list files starting from the current working directory.
+ entries = []
+ base_path = ''
+
+ # This is getting a bit messy
+ listed_base_directory = False
+
+ if os.path.sep in incomplete:
+ split = incomplete.rsplit(os.path.sep, 1)
+ base_path = split[0]
+
+ # If there was nothing on the left of the last separator,
+ # we are completing files in the filesystem root
+ base_path = os.path.join(base_directory, base_path)
+ else:
+ incomplete_base_path = os.path.join(base_directory, incomplete)
+ if os.path.isdir(incomplete_base_path):
+ base_path = incomplete_base_path
+
+ try:
+ if base_path:
+ if os.path.isdir(base_path):
+ entries = [os.path.join(base_path, e) for e in os.listdir(base_path)]
+ else:
+ entries = os.listdir(base_directory)
+ listed_base_directory = True
+ except OSError:
+ # If for any reason the os reports an error from os.listdir(), just
+ # ignore this and avoid a stack trace
+ pass
+
+ base_directory_slash = base_directory
+ if not base_directory_slash.endswith(os.sep):
+ base_directory_slash += os.sep
+ base_directory_len = len(base_directory_slash)
+
+ def entry_is_dir(entry):
+ if listed_base_directory:
+ entry = os.path.join(base_directory, entry)
+ return os.path.isdir(entry)
+
+ def fix_path(path):
+
+ # Append slashes to any entries which are directories, or
+ # spaces for other files since they cannot be further completed
+ if entry_is_dir(path) and not path.endswith(os.sep):
+ path = path + os.sep
+ else:
+ path = path + " "
+
+ # Remove the artificial leading path portion which
+ # may have been prepended for search purposes.
+ if path.startswith(base_directory_slash):
+ path = path[base_directory_len:]
+
+ return path
+
+ return [
+ # Return an appropriate path for each entry
+ fix_path(e) for e in sorted(entries)
+
+ # Filter out non directory elements when searching for a directory,
+ # the opposite is fine, however.
+ if not (path_type == 'Directory' and not entry_is_dir(e))
+ ]
+
+
+# Instead of delegating completions to the param type,
+# hard code all of buildstream's completions here.
+#
+# This whole module should be removed in favor of more
+# generic code in click once this issue is resolved:
+# https://github.com/pallets/click/issues/780
+#
+def get_param_type_completion(param_type, incomplete):
+
+ if isinstance(param_type, click.Choice):
+ return [c + " " for c in param_type.choices]
+ elif isinstance(param_type, click.File):
+ return complete_path("File", incomplete)
+ elif isinstance(param_type, click.Path):
+ return complete_path(param_type.path_type, incomplete)
+
+ return []
+
+
+def resolve_ctx(cli, prog_name, args):
+ """
+ Parse into a hierarchy of contexts. Contexts are connected through the parent variable.
+ :param cli: command definition
+ :param prog_name: the program that is running
+ :param args: full list of args typed before the incomplete arg
+ :return: the final context/command parsed
+ """
+ ctx = cli.make_context(prog_name, args, resilient_parsing=True)
+ args_remaining = ctx.protected_args + ctx.args
+ while ctx is not None and args_remaining:
+ if isinstance(ctx.command, MultiCommand):
+ cmd = ctx.command.get_command(ctx, args_remaining[0])
+ if cmd is None:
+ return None
+ ctx = cmd.make_context(args_remaining[0], args_remaining[1:], parent=ctx, resilient_parsing=True)
+ args_remaining = ctx.protected_args + ctx.args
+ else:
+ ctx = ctx.parent
+
+ return ctx
+
+
+def start_of_option(param_str):
+ """
+ :param param_str: param_str to check
+ :return: whether or not this is the start of an option declaration (i.e. starts "-" or "--")
+ """
+ return param_str and param_str[:1] == '-'
+
+
+def is_incomplete_option(all_args, cmd_param):
+ """
+ :param all_args: the full original list of args supplied
+ :param cmd_param: the current command paramter
+ :return: whether or not the last option declaration (i.e. starts "-" or "--") is incomplete and
+ corresponds to this cmd_param. In other words whether this cmd_param option can still accept
+ values
+ """
+ if cmd_param.is_flag:
+ return False
+ last_option = None
+ for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):
+ if index + 1 > cmd_param.nargs:
+ break
+ if start_of_option(arg_str):
+ last_option = arg_str
+
+ return bool(last_option and last_option in cmd_param.opts)
+
+
+def is_incomplete_argument(current_params, cmd_param):
+ """
+ :param current_params: the current params and values for this argument as already entered
+ :param cmd_param: the current command parameter
+ :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In
+ other words whether or not the this cmd_param argument can still accept values
+ """
+ current_param_values = current_params[cmd_param.name]
+ if current_param_values is None:
+ return True
+ if cmd_param.nargs == -1:
+ return True
+ if isinstance(current_param_values, collections.abc.Iterable) \
+ and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:
+ return True
+ return False
+
+
+def get_user_autocompletions(args, incomplete, cmd, cmd_param, override):
+ """
+ :param args: full list of args typed before the incomplete arg
+ :param incomplete: the incomplete text of the arg to autocomplete
+ :param cmd_param: command definition
+ :param override: a callable (cmd_param, args, incomplete) that will be
+ called to override default completion based on parameter type. Should raise
+ 'CompleteUnhandled' if it could not find a completion.
+ :return: all the possible user-specified completions for the param
+ """
+
+ # Use the type specific default completions unless it was overridden
+ try:
+ return override(cmd=cmd,
+ cmd_param=cmd_param,
+ args=args,
+ incomplete=incomplete)
+ except CompleteUnhandled:
+ return get_param_type_completion(cmd_param.type, incomplete) or []
+
+
+def get_choices(cli, prog_name, args, incomplete, override):
+ """
+ :param cli: command definition
+ :param prog_name: the program that is running
+ :param args: full list of args typed before the incomplete arg
+ :param incomplete: the incomplete text of the arg to autocomplete
+ :param override: a callable (cmd_param, args, incomplete) that will be
+ called to override default completion based on parameter type. Should raise
+ 'CompleteUnhandled' if it could not find a completion.
+ :return: all the possible completions for the incomplete
+ """
+ all_args = copy.deepcopy(args)
+
+ ctx = resolve_ctx(cli, prog_name, args)
+ if ctx is None:
+ return
+
+ # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse
+ # without the '='
+ if start_of_option(incomplete) and WORDBREAK in incomplete:
+ partition_incomplete = incomplete.partition(WORDBREAK)
+ all_args.append(partition_incomplete[0])
+ incomplete = partition_incomplete[2]
+ elif incomplete == WORDBREAK:
+ incomplete = ''
+
+ choices = []
+ found_param = False
+ if start_of_option(incomplete):
+ # completions for options
+ for param in ctx.command.params:
+ if isinstance(param, Option):
+ choices.extend([param_opt + " " for param_opt in param.opts + param.secondary_opts
+ if param_opt not in all_args or param.multiple])
+ found_param = True
+ if not found_param:
+ # completion for option values by choices
+ for cmd_param in ctx.command.params:
+ if isinstance(cmd_param, Option) and is_incomplete_option(all_args, cmd_param):
+ choices.extend(get_user_autocompletions(all_args, incomplete, ctx.command, cmd_param, override))
+ found_param = True
+ break
+ if not found_param:
+ # completion for argument values by choices
+ for cmd_param in ctx.command.params:
+ if isinstance(cmd_param, Argument) and is_incomplete_argument(ctx.params, cmd_param):
+ choices.extend(get_user_autocompletions(all_args, incomplete, ctx.command, cmd_param, override))
+ found_param = True
+ break
+
+ if not found_param and isinstance(ctx.command, MultiCommand):
+ # completion for any subcommands
+ choices.extend([cmd + " " for cmd in ctx.command.list_commands(ctx)
+ if not ctx.command.get_command(ctx, cmd).hidden])
+
+ if not start_of_option(incomplete) and ctx.parent is not None \
+ and isinstance(ctx.parent.command, MultiCommand) and ctx.parent.command.chain:
+ # completion for chained commands
+ visible_commands = [cmd for cmd in ctx.parent.command.list_commands(ctx.parent)
+ if not ctx.parent.command.get_command(ctx.parent, cmd).hidden]
+ remaining_comands = set(visible_commands) - set(ctx.parent.protected_args)
+ choices.extend([cmd + " " for cmd in remaining_comands])
+
+ for item in choices:
+ if item.startswith(incomplete):
+ yield item
+
+
+def do_complete(cli, prog_name, override):
+ cwords = split_arg_string(os.environ['COMP_WORDS'])
+ cword = int(os.environ['COMP_CWORD'])
+ args = cwords[1:cword]
+ try:
+ incomplete = cwords[cword]
+ except IndexError:
+ incomplete = ''
+
+ for item in get_choices(cli, prog_name, args, incomplete, override):
+ click.echo(item)
+
+
+# Main function called from main.py at startup here
+#
+def main_bashcomplete(cmd, prog_name, override):
+ """Internal handler for the bash completion support."""
+
+ if '_BST_COMPLETION' in os.environ:
+ do_complete(cmd, prog_name, override)
+ return True
+
+ return False
diff --git a/src/buildstream/_frontend/linuxapp.py b/src/buildstream/_frontend/linuxapp.py
new file mode 100644
index 000000000..0444dc7b4
--- /dev/null
+++ b/src/buildstream/_frontend/linuxapp.py
@@ -0,0 +1,64 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+import os
+import click
+
+from .app import App
+
+
+# This trick is currently only supported on some terminals,
+# avoid using it where it can cause garbage to be printed
+# to the terminal.
+#
+def _osc_777_supported():
+
+ term = os.environ.get('TERM')
+
+ if term and (term.startswith('xterm') or term.startswith('vte')):
+
+ # Since vte version 4600, upstream silently ignores
+ # the OSC 777 without printing garbage to the terminal.
+ #
+ # For distros like Fedora who have patched vte, this
+ # will trigger a desktop notification and bring attention
+ # to the terminal.
+ #
+ vte_version = os.environ.get('VTE_VERSION')
+ try:
+ vte_version_int = int(vte_version)
+ except (ValueError, TypeError):
+ return False
+
+ if vte_version_int >= 4600:
+ return True
+
+ return False
+
+
+# A linux specific App implementation
+#
+class LinuxApp(App):
+
+ def notify(self, title, text):
+
+ # Currently we only try this notification method
+ # of sending an escape sequence to the terminal
+ #
+ if _osc_777_supported():
+ click.echo("\033]777;notify;{};{}\007".format(title, text), err=True)
diff --git a/src/buildstream/_frontend/profile.py b/src/buildstream/_frontend/profile.py
new file mode 100644
index 000000000..dda0f7ffe
--- /dev/null
+++ b/src/buildstream/_frontend/profile.py
@@ -0,0 +1,77 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+import re
+import copy
+import click
+
+
+# Profile()
+#
+# A class for formatting text with ansi color codes
+#
+# Kwargs:
+# The same keyword arguments which can be used with click.style()
+#
+class Profile():
+ def __init__(self, **kwargs):
+ self._kwargs = dict(kwargs)
+
+ # fmt()
+ #
+ # Format some text with ansi color codes
+ #
+ # Args:
+ # text (str): The text to format
+ #
+ # Kwargs:
+ # Keyword arguments to apply on top of the base click.style()
+ # arguments
+ #
+ def fmt(self, text, **kwargs):
+ kwargs = dict(kwargs)
+ fmtargs = copy.copy(self._kwargs)
+ fmtargs.update(kwargs)
+ return click.style(text, **fmtargs)
+
+ # fmt_subst()
+ #
+ # Substitute a variable of the %{varname} form, formatting
+ # only the substituted text with the given click.style() configurations
+ #
+ # Args:
+ # text (str): The text to format, with possible variables
+ # varname (str): The variable name to substitute
+ # value (str): The value to substitute the variable with
+ #
+ # Kwargs:
+ # Keyword arguments to apply on top of the base click.style()
+ # arguments
+ #
+ def fmt_subst(self, text, varname, value, **kwargs):
+
+ def subst_callback(match):
+ # Extract and format the "{(varname)...}" portion of the match
+ inner_token = match.group(1)
+ formatted = inner_token.format(**{varname: value})
+
+ # Colorize after the pythonic format formatting, which may have padding
+ return self.fmt(formatted, **kwargs)
+
+ # Lazy regex, after our word, match anything that does not have '%'
+ return re.sub(r"%(\{(" + varname + r")[^%]*\})", subst_callback, text)
diff --git a/src/buildstream/_frontend/status.py b/src/buildstream/_frontend/status.py
new file mode 100644
index 000000000..91f47221a
--- /dev/null
+++ b/src/buildstream/_frontend/status.py
@@ -0,0 +1,523 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+import os
+import sys
+import curses
+import click
+
+# Import a widget internal for formatting time codes
+from .widget import TimeCode
+from .._scheduler import ElementJob
+
+
+# Status()
+#
+# A widget for formatting overall status.
+#
+# Note that the render() and clear() methods in this class are
+# simply noops in the case that the application is not connected
+# to a terminal, or if the terminal does not support ANSI escape codes.
+#
+# Args:
+# context (Context): The Context
+# content_profile (Profile): Formatting profile for content text
+# format_profile (Profile): Formatting profile for formatting text
+# success_profile (Profile): Formatting profile for success text
+# error_profile (Profile): Formatting profile for error text
+# stream (Stream): The Stream
+# colors (bool): Whether to print the ANSI color codes in the output
+#
+class Status():
+
+ # Table of the terminal capabilities we require and use
+ _TERM_CAPABILITIES = {
+ 'move_up': 'cuu1',
+ 'move_x': 'hpa',
+ 'clear_eol': 'el'
+ }
+
+ def __init__(self, context,
+ content_profile, format_profile,
+ success_profile, error_profile,
+ stream, colors=False):
+
+ self._context = context
+ self._content_profile = content_profile
+ self._format_profile = format_profile
+ self._success_profile = success_profile
+ self._error_profile = error_profile
+ self._stream = stream
+ self._jobs = []
+ self._last_lines = 0 # Number of status lines we last printed to console
+ self._spacing = 1
+ self._colors = colors
+ self._header = _StatusHeader(context,
+ content_profile, format_profile,
+ success_profile, error_profile,
+ stream)
+
+ self._term_width, _ = click.get_terminal_size()
+ self._alloc_lines = 0
+ self._alloc_columns = None
+ self._line_length = 0
+ self._need_alloc = True
+ self._term_caps = self._init_terminal()
+
+ # add_job()
+ #
+ # Adds a job to track in the status area
+ #
+ # Args:
+ # element (Element): The element of the job to track
+ # action_name (str): The action name for this job
+ #
+ def add_job(self, job):
+ elapsed = self._stream.elapsed_time
+ job = _StatusJob(self._context, job, self._content_profile, self._format_profile, elapsed)
+ self._jobs.append(job)
+ self._need_alloc = True
+
+ # remove_job()
+ #
+ # Removes a job currently being tracked in the status area
+ #
+ # Args:
+ # element (Element): The element of the job to track
+ # action_name (str): The action name for this job
+ #
+ def remove_job(self, job):
+ action_name = job.action_name
+ if not isinstance(job, ElementJob):
+ element = None
+ else:
+ element = job.element
+
+ self._jobs = [
+ job for job in self._jobs
+ if not (job.element is element and
+ job.action_name == action_name)
+ ]
+ self._need_alloc = True
+
+ # clear()
+ #
+ # Clear the status area, it is necessary to call
+ # this before printing anything to the console if
+ # a status area is in use.
+ #
+ # To print some logging to the output and then restore
+ # the status, use the following:
+ #
+ # status.clear()
+ # ... print something to console ...
+ # status.render()
+ #
+ def clear(self):
+
+ if not self._term_caps:
+ return
+
+ for _ in range(self._last_lines):
+ self._move_up()
+ self._clear_line()
+ self._last_lines = 0
+
+ # render()
+ #
+ # Render the status area.
+ #
+ # If you are not printing a line in addition to rendering
+ # the status area, for instance in a timeout, then it is
+ # not necessary to call clear().
+ def render(self):
+
+ if not self._term_caps:
+ return
+
+ elapsed = self._stream.elapsed_time
+
+ self.clear()
+ self._check_term_width()
+ self._allocate()
+
+ # Nothing to render, early return
+ if self._alloc_lines == 0:
+ return
+
+ # Before rendering the actual lines, we need to add some line
+ # feeds for the amount of lines we intend to print first, and
+ # move cursor position back to the first line
+ for _ in range(self._alloc_lines + self._header.lines):
+ click.echo('', err=True)
+ for _ in range(self._alloc_lines + self._header.lines):
+ self._move_up()
+
+ # Render the one line header
+ text = self._header.render(self._term_width, elapsed)
+ click.echo(text, color=self._colors, err=True)
+
+ # Now we have the number of columns, and an allocation for
+ # alignment of each column
+ n_columns = len(self._alloc_columns)
+ for line in self._job_lines(n_columns):
+ text = ''
+ for job in line:
+ column = line.index(job)
+ text += job.render(self._alloc_columns[column] - job.size, elapsed)
+
+ # Add spacing between columns
+ if column < (n_columns - 1):
+ text += ' ' * self._spacing
+
+ # Print the line
+ click.echo(text, color=self._colors, err=True)
+
+ # Track what we printed last, for the next clear
+ self._last_lines = self._alloc_lines + self._header.lines
+
+ ###################################################
+ # Private Methods #
+ ###################################################
+
+ # _init_terminal()
+ #
+ # Initialize the terminal and return the resolved terminal
+ # capabilities dictionary.
+ #
+ # Returns:
+ # (dict|None): The resolved terminal capabilities dictionary,
+ # or None if the terminal does not support all
+ # of the required capabilities.
+ #
+ def _init_terminal(self):
+
+ # We need both output streams to be connected to a terminal
+ if not (sys.stdout.isatty() and sys.stderr.isatty()):
+ return None
+
+ # Initialized terminal, curses might decide it doesnt
+ # support this terminal
+ try:
+ curses.setupterm(os.environ.get('TERM', 'dumb'))
+ except curses.error:
+ return None
+
+ term_caps = {}
+
+ # Resolve the string capabilities we need for the capability
+ # names we need.
+ #
+ for capname, capval in self._TERM_CAPABILITIES.items():
+ code = curses.tigetstr(capval)
+
+ # If any of the required capabilities resolve empty strings or None,
+ # then we don't have the capabilities we need for a status bar on
+ # this terminal.
+ if not code:
+ return None
+
+ # Decode sequences as latin1, as they are always 8-bit bytes,
+ # so when b'\xff' is returned, this must be decoded to u'\xff'.
+ #
+ # This technique is employed by the python blessings library
+ # as well, and should provide better compatibility with most
+ # terminals.
+ #
+ term_caps[capname] = code.decode('latin1')
+
+ return term_caps
+
+ def _check_term_width(self):
+ term_width, _ = click.get_terminal_size()
+ if self._term_width != term_width:
+ self._term_width = term_width
+ self._need_alloc = True
+
+ def _move_up(self):
+ assert self._term_caps is not None
+
+ # Explicitly move to beginning of line, fixes things up
+ # when there was a ^C or ^Z printed to the terminal.
+ move_x = curses.tparm(self._term_caps['move_x'].encode('latin1'), 0)
+ move_x = move_x.decode('latin1')
+
+ move_up = curses.tparm(self._term_caps['move_up'].encode('latin1'))
+ move_up = move_up.decode('latin1')
+
+ click.echo(move_x + move_up, nl=False, err=True)
+
+ def _clear_line(self):
+ assert self._term_caps is not None
+
+ clear_eol = curses.tparm(self._term_caps['clear_eol'].encode('latin1'))
+ clear_eol = clear_eol.decode('latin1')
+ click.echo(clear_eol, nl=False, err=True)
+
+ def _allocate(self):
+ if not self._need_alloc:
+ return
+
+ # State when there is no jobs to display
+ alloc_lines = 0
+ alloc_columns = []
+ line_length = 0
+
+ # Test for the widest width which fits columnized jobs
+ for columns in reversed(range(len(self._jobs))):
+ alloc_lines, alloc_columns = self._allocate_columns(columns + 1)
+
+ # If the sum of column widths with spacing in between
+ # fits into the terminal width, this is a good allocation.
+ line_length = sum(alloc_columns) + (columns * self._spacing)
+ if line_length < self._term_width:
+ break
+
+ self._alloc_lines = alloc_lines
+ self._alloc_columns = alloc_columns
+ self._line_length = line_length
+ self._need_alloc = False
+
+ def _job_lines(self, columns):
+ for i in range(0, len(self._jobs), columns):
+ yield self._jobs[i:i + columns]
+
+ # Returns an array of integers representing the maximum
+ # length in characters for each column, given the current
+ # list of jobs to render.
+ #
+ def _allocate_columns(self, columns):
+ column_widths = [0 for _ in range(columns)]
+ lines = 0
+ for line in self._job_lines(columns):
+ line_len = len(line)
+ lines += 1
+ for col in range(columns):
+ if col < line_len:
+ job = line[col]
+ column_widths[col] = max(column_widths[col], job.size)
+
+ return lines, column_widths
+
+
+# _StatusHeader()
+#
+# A delegate object for rendering the header part of the Status() widget
+#
+# Args:
+# context (Context): The Context
+# content_profile (Profile): Formatting profile for content text
+# format_profile (Profile): Formatting profile for formatting text
+# success_profile (Profile): Formatting profile for success text
+# error_profile (Profile): Formatting profile for error text
+# stream (Stream): The Stream
+#
+class _StatusHeader():
+
+ def __init__(self, context,
+ content_profile, format_profile,
+ success_profile, error_profile,
+ stream):
+
+ #
+ # Public members
+ #
+ self.lines = 3
+
+ #
+ # Private members
+ #
+ self._content_profile = content_profile
+ self._format_profile = format_profile
+ self._success_profile = success_profile
+ self._error_profile = error_profile
+ self._stream = stream
+ self._time_code = TimeCode(context, content_profile, format_profile)
+ self._context = context
+
+ def render(self, line_length, elapsed):
+ project = self._context.get_toplevel_project()
+ line_length = max(line_length, 80)
+
+ #
+ # Line 1: Session time, project name, session / total elements
+ #
+ # ========= 00:00:00 project-name (143/387) =========
+ #
+ session = str(len(self._stream.session_elements))
+ total = str(len(self._stream.total_elements))
+
+ size = 0
+ text = ''
+ size += len(total) + len(session) + 4 # Size for (N/N) with a leading space
+ size += 8 # Size of time code
+ size += len(project.name) + 1
+ text += self._time_code.render_time(elapsed)
+ text += ' ' + self._content_profile.fmt(project.name)
+ text += ' ' + self._format_profile.fmt('(') + \
+ self._content_profile.fmt(session) + \
+ self._format_profile.fmt('/') + \
+ self._content_profile.fmt(total) + \
+ self._format_profile.fmt(')')
+
+ line1 = self._centered(text, size, line_length, '=')
+
+ #
+ # Line 2: Dynamic list of queue status reports
+ #
+ # (Fetched:0 117 0)→ (Built:4 0 0)
+ #
+ size = 0
+ text = ''
+
+ # Format and calculate size for each queue progress
+ for queue in self._stream.queues:
+
+ # Add spacing
+ if self._stream.queues.index(queue) > 0:
+ size += 2
+ text += self._format_profile.fmt('→ ')
+
+ queue_text, queue_size = self._render_queue(queue)
+ size += queue_size
+ text += queue_text
+
+ line2 = self._centered(text, size, line_length, ' ')
+
+ #
+ # Line 3: Cache usage percentage report
+ #
+ # ~~~~~~ cache: 69% ~~~~~~
+ #
+ usage = self._context.get_cache_usage()
+ usage_percent = '{}%'.format(usage.used_percent)
+
+ size = 21
+ size += len(usage_percent)
+ if usage.used_percent >= 95:
+ formatted_usage_percent = self._error_profile.fmt(usage_percent)
+ elif usage.used_percent >= 80:
+ formatted_usage_percent = self._content_profile.fmt(usage_percent)
+ else:
+ formatted_usage_percent = self._success_profile.fmt(usage_percent)
+
+ text = self._format_profile.fmt("~~~~~~ ") + \
+ self._content_profile.fmt('cache') + \
+ self._format_profile.fmt(': ') + \
+ formatted_usage_percent + \
+ self._format_profile.fmt(' ~~~~~~')
+ line3 = self._centered(text, size, line_length, ' ')
+
+ return line1 + '\n' + line2 + '\n' + line3
+
+ ###################################################
+ # Private Methods #
+ ###################################################
+ def _render_queue(self, queue):
+ processed = str(len(queue.processed_elements))
+ skipped = str(len(queue.skipped_elements))
+ failed = str(len(queue.failed_elements))
+
+ size = 5 # Space for the formatting '[', ':', ' ', ' ' and ']'
+ size += len(queue.complete_name)
+ size += len(processed) + len(skipped) + len(failed)
+ text = self._format_profile.fmt("(") + \
+ self._content_profile.fmt(queue.complete_name) + \
+ self._format_profile.fmt(":") + \
+ self._success_profile.fmt(processed) + ' ' + \
+ self._content_profile.fmt(skipped) + ' ' + \
+ self._error_profile.fmt(failed) + \
+ self._format_profile.fmt(")")
+
+ return (text, size)
+
+ def _centered(self, text, size, line_length, fill):
+ remaining = line_length - size
+ remaining -= 2
+
+ final_text = self._format_profile.fmt(fill * (remaining // 2)) + ' '
+ final_text += text
+ final_text += ' ' + self._format_profile.fmt(fill * (remaining // 2))
+
+ return final_text
+
+
+# _StatusJob()
+#
+# A delegate object for rendering a job in the status area
+#
+# Args:
+# context (Context): The Context
+# job (Job): The job being processed
+# content_profile (Profile): Formatting profile for content text
+# format_profile (Profile): Formatting profile for formatting text
+# elapsed (datetime): The offset into the session when this job is created
+#
+class _StatusJob():
+
+ def __init__(self, context, job, content_profile, format_profile, elapsed):
+ action_name = job.action_name
+ if not isinstance(job, ElementJob):
+ element = None
+ else:
+ element = job.element
+
+ #
+ # Public members
+ #
+ self.element = element # The Element
+ self.action_name = action_name # The action name
+ self.size = None # The number of characters required to render
+ self.full_name = element._get_full_name() if element else action_name
+
+ #
+ # Private members
+ #
+ self._offset = elapsed
+ self._content_profile = content_profile
+ self._format_profile = format_profile
+ self._time_code = TimeCode(context, content_profile, format_profile)
+
+ # Calculate the size needed to display
+ self.size = 10 # Size of time code with brackets
+ self.size += len(action_name)
+ self.size += len(self.full_name)
+ self.size += 3 # '[' + ':' + ']'
+
+ # render()
+ #
+ # Render the Job, return a rendered string
+ #
+ # Args:
+ # padding (int): Amount of padding to print in order to align with columns
+ # elapsed (datetime): The session elapsed time offset
+ #
+ def render(self, padding, elapsed):
+ text = self._format_profile.fmt('[') + \
+ self._time_code.render_time(elapsed - self._offset) + \
+ self._format_profile.fmt(']')
+
+ # Add padding after the display name, before terminating ']'
+ name = self.full_name + (' ' * padding)
+ text += self._format_profile.fmt('[') + \
+ self._content_profile.fmt(self.action_name) + \
+ self._format_profile.fmt(':') + \
+ self._content_profile.fmt(name) + \
+ self._format_profile.fmt(']')
+
+ return text
diff --git a/src/buildstream/_frontend/widget.py b/src/buildstream/_frontend/widget.py
new file mode 100644
index 000000000..cfe3a06e9
--- /dev/null
+++ b/src/buildstream/_frontend/widget.py
@@ -0,0 +1,806 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+import datetime
+import os
+from collections import defaultdict, OrderedDict
+from contextlib import ExitStack
+from mmap import mmap
+import re
+import textwrap
+from ruamel import yaml
+import click
+
+from .profile import Profile
+from .. import Element, Consistency, Scope
+from .. import _yaml
+from .. import __version__ as bst_version
+from .._exceptions import ImplError
+from .._message import MessageType
+from ..plugin import Plugin
+
+
+# These messages are printed a bit differently
+ERROR_MESSAGES = [MessageType.FAIL, MessageType.ERROR, MessageType.BUG]
+
+
+# Widget()
+#
+# Args:
+# content_profile (Profile): The profile to use for rendering content
+# format_profile (Profile): The profile to use for rendering formatting
+#
+# An abstract class for printing output columns in our text UI.
+#
+class Widget():
+
+ def __init__(self, context, content_profile, format_profile):
+
+ # The context
+ self.context = context
+
+ # The content profile
+ self.content_profile = content_profile
+
+ # The formatting profile
+ self.format_profile = format_profile
+
+ # render()
+ #
+ # Renders a string to be printed in the UI
+ #
+ # Args:
+ # message (Message): A message to print
+ #
+ # Returns:
+ # (str): The string this widget prints for the given message
+ #
+ def render(self, message):
+ raise ImplError("{} does not implement render()".format(type(self).__name__))
+
+
+# Used to add spacing between columns
+class Space(Widget):
+
+ def render(self, message):
+ return ' '
+
+
+# Used to add fixed text between columns
+class FixedText(Widget):
+
+ def __init__(self, context, text, content_profile, format_profile):
+ super(FixedText, self).__init__(context, content_profile, format_profile)
+ self.text = text
+
+ def render(self, message):
+ return self.format_profile.fmt(self.text)
+
+
+# Used to add the wallclock time this message was created at
+class WallclockTime(Widget):
+ def __init__(self, context, content_profile, format_profile, output_format=False):
+ self._output_format = output_format
+ super(WallclockTime, self).__init__(context, content_profile, format_profile)
+
+ def render(self, message):
+
+ fields = [self.content_profile.fmt("{:02d}".format(x)) for x in
+ [message.creation_time.hour,
+ message.creation_time.minute,
+ message.creation_time.second,
+ ]
+ ]
+ text = self.format_profile.fmt(":").join(fields)
+
+ if self._output_format == 'us':
+ text += self.content_profile.fmt(".{:06d}".format(message.creation_time.microsecond))
+
+ return text
+
+
+# A widget for rendering the debugging column
+class Debug(Widget):
+
+ def render(self, message):
+ unique_id = 0 if message.unique_id is None else message.unique_id
+
+ text = self.format_profile.fmt('pid:')
+ text += self.content_profile.fmt("{: <5}".format(message.pid))
+ text += self.format_profile.fmt(" id:")
+ text += self.content_profile.fmt("{:0>3}".format(unique_id))
+
+ return text
+
+
+# A widget for rendering the time codes
+class TimeCode(Widget):
+ def __init__(self, context, content_profile, format_profile, microseconds=False):
+ self._microseconds = microseconds
+ super(TimeCode, self).__init__(context, content_profile, format_profile)
+
+ def render(self, message):
+ return self.render_time(message.elapsed)
+
+ def render_time(self, elapsed):
+ if elapsed is None:
+ fields = [
+ self.content_profile.fmt('--')
+ for i in range(3)
+ ]
+ else:
+ hours, remainder = divmod(int(elapsed.total_seconds()), 60 * 60)
+ minutes, seconds = divmod(remainder, 60)
+ fields = [
+ self.content_profile.fmt("{0:02d}".format(field))
+ for field in [hours, minutes, seconds]
+ ]
+
+ text = self.format_profile.fmt(':').join(fields)
+
+ if self._microseconds:
+ if elapsed is not None:
+ text += self.content_profile.fmt(".{0:06d}".format(elapsed.microseconds))
+ else:
+ text += self.content_profile.fmt(".------")
+ return text
+
+
+# A widget for rendering the MessageType
+class TypeName(Widget):
+
+ _action_colors = {
+ MessageType.DEBUG: "cyan",
+ MessageType.STATUS: "cyan",
+ MessageType.INFO: "magenta",
+ MessageType.WARN: "yellow",
+ MessageType.START: "blue",
+ MessageType.SUCCESS: "green",
+ MessageType.FAIL: "red",
+ MessageType.SKIPPED: "yellow",
+ MessageType.ERROR: "red",
+ MessageType.BUG: "red",
+ }
+
+ def render(self, message):
+ return self.content_profile.fmt("{: <7}"
+ .format(message.message_type.upper()),
+ bold=True, dim=True,
+ fg=self._action_colors[message.message_type])
+
+
+# A widget for displaying the Element name
+class ElementName(Widget):
+
+ def render(self, message):
+ action_name = message.action_name
+ element_id = message.task_id or message.unique_id
+ if element_id is not None:
+ plugin = Plugin._lookup(element_id)
+ name = plugin._get_full_name()
+ name = '{: <30}'.format(name)
+ else:
+ name = 'core activity'
+ name = '{: <30}'.format(name)
+
+ if not action_name:
+ action_name = "Main"
+
+ return self.content_profile.fmt("{: >8}".format(action_name.lower())) + \
+ self.format_profile.fmt(':') + self.content_profile.fmt(name)
+
+
+# A widget for displaying the primary message text
+class MessageText(Widget):
+
+ def render(self, message):
+ return message.message
+
+
+# A widget for formatting the element cache key
+class CacheKey(Widget):
+
+ def __init__(self, context, content_profile, format_profile, err_profile):
+ super(CacheKey, self).__init__(context, content_profile, format_profile)
+
+ self._err_profile = err_profile
+ self._key_length = context.log_key_length
+
+ def render(self, message):
+
+ element_id = message.task_id or message.unique_id
+ if not self._key_length:
+ return ""
+
+ if element_id is None:
+ return ' ' * self._key_length
+
+ missing = False
+ key = ' ' * self._key_length
+ plugin = Plugin._lookup(element_id)
+ if isinstance(plugin, Element):
+ _, key, missing = plugin._get_display_key()
+
+ if message.message_type in ERROR_MESSAGES:
+ text = self._err_profile.fmt(key)
+ else:
+ text = self.content_profile.fmt(key, dim=missing)
+
+ return text
+
+
+# A widget for formatting the log file
+class LogFile(Widget):
+
+ def __init__(self, context, content_profile, format_profile, err_profile):
+ super(LogFile, self).__init__(context, content_profile, format_profile)
+
+ self._err_profile = err_profile
+ self._logdir = context.logdir
+
+ def render(self, message, abbrev=True):
+
+ if message.logfile and message.scheduler:
+ logfile = message.logfile
+
+ if abbrev and self._logdir != "" and logfile.startswith(self._logdir):
+ logfile = logfile[len(self._logdir):]
+ logfile = logfile.lstrip(os.sep)
+
+ if message.message_type in ERROR_MESSAGES:
+ text = self._err_profile.fmt(logfile)
+ else:
+ text = self.content_profile.fmt(logfile, dim=True)
+ else:
+ text = ''
+
+ return text
+
+
+# START and SUCCESS messages are expected to have no useful
+# information in the message text, so we display the logfile name for
+# these messages, and the message text for other types.
+#
+class MessageOrLogFile(Widget):
+ def __init__(self, context, content_profile, format_profile, err_profile):
+ super(MessageOrLogFile, self).__init__(context, content_profile, format_profile)
+ self._message_widget = MessageText(context, content_profile, format_profile)
+ self._logfile_widget = LogFile(context, content_profile, format_profile, err_profile)
+
+ def render(self, message):
+ # Show the log file only in the main start/success messages
+ if message.logfile and message.scheduler and \
+ message.message_type in [MessageType.START, MessageType.SUCCESS]:
+ text = self._logfile_widget.render(message)
+ else:
+ text = self._message_widget.render(message)
+ return text
+
+
+# LogLine
+#
+# A widget for formatting a log line
+#
+# Args:
+# context (Context): The Context
+# content_profile (Profile): Formatting profile for content text
+# format_profile (Profile): Formatting profile for formatting text
+# success_profile (Profile): Formatting profile for success text
+# error_profile (Profile): Formatting profile for error text
+# detail_profile (Profile): Formatting profile for detail text
+# indent (int): Number of spaces to use for general indentation
+#
+class LogLine(Widget):
+
+ def __init__(self, context,
+ content_profile,
+ format_profile,
+ success_profile,
+ err_profile,
+ detail_profile,
+ indent=4):
+ super(LogLine, self).__init__(context, content_profile, format_profile)
+
+ self._columns = []
+ self._failure_messages = defaultdict(list)
+ self._success_profile = success_profile
+ self._err_profile = err_profile
+ self._detail_profile = detail_profile
+ self._indent = ' ' * indent
+ self._log_lines = context.log_error_lines
+ self._message_lines = context.log_message_lines
+ self._resolved_keys = None
+
+ self._space_widget = Space(context, content_profile, format_profile)
+ self._logfile_widget = LogFile(context, content_profile, format_profile, err_profile)
+
+ if context.log_debug:
+ self._columns.extend([
+ Debug(context, content_profile, format_profile)
+ ])
+
+ self.logfile_variable_names = {
+ "elapsed": TimeCode(context, content_profile, format_profile, microseconds=False),
+ "elapsed-us": TimeCode(context, content_profile, format_profile, microseconds=True),
+ "wallclock": WallclockTime(context, content_profile, format_profile),
+ "wallclock-us": WallclockTime(context, content_profile, format_profile, output_format='us'),
+ "key": CacheKey(context, content_profile, format_profile, err_profile),
+ "element": ElementName(context, content_profile, format_profile),
+ "action": TypeName(context, content_profile, format_profile),
+ "message": MessageOrLogFile(context, content_profile, format_profile, err_profile)
+ }
+ logfile_tokens = self._parse_logfile_format(context.log_message_format, content_profile, format_profile)
+ self._columns.extend(logfile_tokens)
+
+ # show_pipeline()
+ #
+ # Display a list of elements in the specified format.
+ #
+ # The formatting string is the one currently documented in `bst show`, this
+ # is used in pipeline session headings and also to implement `bst show`.
+ #
+ # Args:
+ # dependencies (list of Element): A list of Element objects
+ # format_: A formatting string, as specified by `bst show`
+ #
+ # Returns:
+ # (str): The formatted list of elements
+ #
+ def show_pipeline(self, dependencies, format_):
+ report = ''
+ p = Profile()
+
+ for element in dependencies:
+ line = format_
+
+ full_key, cache_key, dim_keys = element._get_display_key()
+
+ line = p.fmt_subst(line, 'name', element._get_full_name(), fg='blue', bold=True)
+ line = p.fmt_subst(line, 'key', cache_key, fg='yellow', dim=dim_keys)
+ line = p.fmt_subst(line, 'full-key', full_key, fg='yellow', dim=dim_keys)
+
+ consistency = element._get_consistency()
+ if consistency == Consistency.INCONSISTENT:
+ line = p.fmt_subst(line, 'state', "no reference", fg='red')
+ else:
+ if element._cached_failure():
+ line = p.fmt_subst(line, 'state', "failed", fg='red')
+ elif element._cached_success():
+ line = p.fmt_subst(line, 'state', "cached", fg='magenta')
+ elif consistency == Consistency.RESOLVED and not element._source_cached():
+ line = p.fmt_subst(line, 'state', "fetch needed", fg='red')
+ elif element._buildable():
+ line = p.fmt_subst(line, 'state', "buildable", fg='green')
+ else:
+ line = p.fmt_subst(line, 'state', "waiting", fg='blue')
+
+ # Element configuration
+ if "%{config" in format_:
+ config = _yaml.node_sanitize(element._Element__config)
+ line = p.fmt_subst(
+ line, 'config',
+ yaml.round_trip_dump(config, default_flow_style=False, allow_unicode=True))
+
+ # Variables
+ if "%{vars" in format_:
+ variables = _yaml.node_sanitize(element._Element__variables.flat)
+ line = p.fmt_subst(
+ line, 'vars',
+ yaml.round_trip_dump(variables, default_flow_style=False, allow_unicode=True))
+
+ # Environment
+ if "%{env" in format_:
+ environment = _yaml.node_sanitize(element._Element__environment)
+ line = p.fmt_subst(
+ line, 'env',
+ yaml.round_trip_dump(environment, default_flow_style=False, allow_unicode=True))
+
+ # Public
+ if "%{public" in format_:
+ environment = _yaml.node_sanitize(element._Element__public)
+ line = p.fmt_subst(
+ line, 'public',
+ yaml.round_trip_dump(environment, default_flow_style=False, allow_unicode=True))
+
+ # Workspaced
+ if "%{workspaced" in format_:
+ line = p.fmt_subst(
+ line, 'workspaced',
+ '(workspaced)' if element._get_workspace() else '', fg='yellow')
+
+ # Workspace-dirs
+ if "%{workspace-dirs" in format_:
+ workspace = element._get_workspace()
+ if workspace is not None:
+ path = workspace.get_absolute_path()
+ if path.startswith("~/"):
+ path = os.path.join(os.getenv('HOME', '/root'), path[2:])
+ line = p.fmt_subst(line, 'workspace-dirs', "Workspace: {}".format(path))
+ else:
+ line = p.fmt_subst(
+ line, 'workspace-dirs', '')
+
+ # Dependencies
+ if "%{deps" in format_:
+ deps = [e.name for e in element.dependencies(Scope.ALL, recurse=False)]
+ line = p.fmt_subst(
+ line, 'deps',
+ yaml.safe_dump(deps, default_style=None).rstrip('\n'))
+
+ # Build Dependencies
+ if "%{build-deps" in format_:
+ build_deps = [e.name for e in element.dependencies(Scope.BUILD, recurse=False)]
+ line = p.fmt_subst(
+ line, 'build-deps',
+ yaml.safe_dump(build_deps, default_style=False).rstrip('\n'))
+
+ # Runtime Dependencies
+ if "%{runtime-deps" in format_:
+ runtime_deps = [e.name for e in element.dependencies(Scope.RUN, recurse=False)]
+ line = p.fmt_subst(
+ line, 'runtime-deps',
+ yaml.safe_dump(runtime_deps, default_style=False).rstrip('\n'))
+
+ report += line + '\n'
+
+ return report.rstrip('\n')
+
+ # print_heading()
+ #
+ # A message to be printed at program startup, indicating
+ # some things about user configuration and BuildStream version
+ # and so on.
+ #
+ # Args:
+ # project (Project): The toplevel project we were invoked from
+ # stream (Stream): The stream
+ # log_file (file): An optional file handle for additional logging
+ # styling (bool): Whether to enable ansi escape codes in the output
+ #
+ def print_heading(self, project, stream, *, log_file, styling=False):
+ context = self.context
+ starttime = datetime.datetime.now()
+ text = ''
+
+ self._resolved_keys = {element: element._get_cache_key() for element in stream.session_elements}
+
+ # Main invocation context
+ text += '\n'
+ text += self.content_profile.fmt("BuildStream Version {}\n".format(bst_version), bold=True)
+ values = OrderedDict()
+ values["Session Start"] = starttime.strftime('%A, %d-%m-%Y at %H:%M:%S')
+ values["Project"] = "{} ({})".format(project.name, project.directory)
+ values["Targets"] = ", ".join([t.name for t in stream.targets])
+ values["Cache Usage"] = str(context.get_cache_usage())
+ text += self._format_values(values)
+
+ # User configurations
+ text += '\n'
+ text += self.content_profile.fmt("User Configuration\n", bold=True)
+ values = OrderedDict()
+ values["Configuration File"] = \
+ "Default Configuration" if not context.config_origin else context.config_origin
+ values["Cache Directory"] = context.cachedir
+ values["Log Files"] = context.logdir
+ values["Source Mirrors"] = context.sourcedir
+ values["Build Area"] = context.builddir
+ values["Strict Build Plan"] = "Yes" if context.get_strict() else "No"
+ values["Maximum Fetch Tasks"] = context.sched_fetchers
+ values["Maximum Build Tasks"] = context.sched_builders
+ values["Maximum Push Tasks"] = context.sched_pushers
+ values["Maximum Network Retries"] = context.sched_network_retries
+ text += self._format_values(values)
+ text += '\n'
+
+ # Project Options
+ values = OrderedDict()
+ project.options.printable_variables(values)
+ if values:
+ text += self.content_profile.fmt("Project Options\n", bold=True)
+ text += self._format_values(values)
+ text += '\n'
+
+ # Plugins
+ text += self._format_plugins(project.first_pass_config.element_factory.loaded_dependencies,
+ project.first_pass_config.source_factory.loaded_dependencies)
+ if project.config.element_factory and project.config.source_factory:
+ text += self._format_plugins(project.config.element_factory.loaded_dependencies,
+ project.config.source_factory.loaded_dependencies)
+
+ # Pipeline state
+ text += self.content_profile.fmt("Pipeline\n", bold=True)
+ text += self.show_pipeline(stream.total_elements, context.log_element_format)
+ text += '\n'
+
+ # Separator line before following output
+ text += self.format_profile.fmt("=" * 79 + '\n')
+
+ click.echo(text, color=styling, nl=False, err=True)
+ if log_file:
+ click.echo(text, file=log_file, color=False, nl=False)
+
+ # print_summary()
+ #
+ # Print a summary of activities at the end of a session
+ #
+ # Args:
+ # stream (Stream): The Stream
+ # log_file (file): An optional file handle for additional logging
+ # styling (bool): Whether to enable ansi escape codes in the output
+ #
+ def print_summary(self, stream, log_file, styling=False):
+
+ # Early silent return if there are no queues, can happen
+ # only in the case that the stream early returned due to
+ # an inconsistent pipeline state.
+ if not stream.queues:
+ return
+
+ text = ''
+
+ assert self._resolved_keys is not None
+ elements = sorted(e for (e, k) in self._resolved_keys.items() if k != e._get_cache_key())
+ if elements:
+ text += self.content_profile.fmt("Resolved key Summary\n", bold=True)
+ text += self.show_pipeline(elements, self.context.log_element_format)
+ text += "\n\n"
+
+ if self._failure_messages:
+ values = OrderedDict()
+
+ for element, messages in sorted(self._failure_messages.items(), key=lambda x: x[0].name):
+ for queue in stream.queues:
+ if any(el.name == element.name for el in queue.failed_elements):
+ values[element.name] = ''.join(self._render(v) for v in messages)
+ if values:
+ text += self.content_profile.fmt("Failure Summary\n", bold=True)
+ text += self._format_values(values, style_value=False)
+
+ text += self.content_profile.fmt("Pipeline Summary\n", bold=True)
+ values = OrderedDict()
+
+ values['Total'] = self.content_profile.fmt(str(len(stream.total_elements)))
+ values['Session'] = self.content_profile.fmt(str(len(stream.session_elements)))
+
+ processed_maxlen = 1
+ skipped_maxlen = 1
+ failed_maxlen = 1
+ for queue in stream.queues:
+ processed_maxlen = max(len(str(len(queue.processed_elements))), processed_maxlen)
+ skipped_maxlen = max(len(str(len(queue.skipped_elements))), skipped_maxlen)
+ failed_maxlen = max(len(str(len(queue.failed_elements))), failed_maxlen)
+
+ for queue in stream.queues:
+ processed = str(len(queue.processed_elements))
+ skipped = str(len(queue.skipped_elements))
+ failed = str(len(queue.failed_elements))
+
+ processed_align = ' ' * (processed_maxlen - len(processed))
+ skipped_align = ' ' * (skipped_maxlen - len(skipped))
+ failed_align = ' ' * (failed_maxlen - len(failed))
+
+ status_text = self.content_profile.fmt("processed ") + \
+ self._success_profile.fmt(processed) + \
+ self.format_profile.fmt(', ') + processed_align
+
+ status_text += self.content_profile.fmt("skipped ") + \
+ self.content_profile.fmt(skipped) + \
+ self.format_profile.fmt(', ') + skipped_align
+
+ status_text += self.content_profile.fmt("failed ") + \
+ self._err_profile.fmt(failed) + ' ' + failed_align
+ values["{} Queue".format(queue.action_name)] = status_text
+
+ text += self._format_values(values, style_value=False)
+
+ click.echo(text, color=styling, nl=False, err=True)
+ if log_file:
+ click.echo(text, file=log_file, color=False, nl=False)
+
+ ###################################################
+ # Widget Abstract Methods #
+ ###################################################
+
+ def render(self, message):
+
+ # Track logfiles for later use
+ element_id = message.task_id or message.unique_id
+ if message.message_type in ERROR_MESSAGES and element_id is not None:
+ plugin = Plugin._lookup(element_id)
+ self._failure_messages[plugin].append(message)
+
+ return self._render(message)
+
+ ###################################################
+ # Private Methods #
+ ###################################################
+ def _parse_logfile_format(self, format_string, content_profile, format_profile):
+ logfile_tokens = []
+ while format_string:
+ if format_string.startswith("%%"):
+ logfile_tokens.append(FixedText(self.context, "%", content_profile, format_profile))
+ format_string = format_string[2:]
+ continue
+ m = re.search(r"^%\{([^\}]+)\}", format_string)
+ if m is not None:
+ variable = m.group(1)
+ format_string = format_string[m.end(0):]
+ if variable not in self.logfile_variable_names:
+ raise Exception("'{0}' is not a valid log variable name.".format(variable))
+ logfile_tokens.append(self.logfile_variable_names[variable])
+ else:
+ m = re.search("^[^%]+", format_string)
+ if m is not None:
+ text = FixedText(self.context, m.group(0), content_profile, format_profile)
+ format_string = format_string[m.end(0):]
+ logfile_tokens.append(text)
+ else:
+ # No idea what to do now
+ raise Exception("'{0}' could not be parsed into a valid logging format.".format(format_string))
+ return logfile_tokens
+
+ def _render(self, message):
+
+ # Render the column widgets first
+ text = ''
+ for widget in self._columns:
+ text += widget.render(message)
+
+ text += '\n'
+
+ extra_nl = False
+
+ # Now add some custom things
+ if message.detail:
+
+ # Identify frontend messages, we never abbreviate these
+ frontend_message = not (message.task_id or message.unique_id)
+
+ # Split and truncate message detail down to message_lines lines
+ lines = message.detail.splitlines(True)
+
+ n_lines = len(lines)
+ abbrev = False
+ if message.message_type not in ERROR_MESSAGES \
+ and not frontend_message and n_lines > self._message_lines:
+ lines = lines[0:self._message_lines]
+ if self._message_lines > 0:
+ abbrev = True
+ else:
+ lines[n_lines - 1] = lines[n_lines - 1].rstrip('\n')
+
+ detail = self._indent + self._indent.join(lines)
+
+ text += '\n'
+ if message.message_type in ERROR_MESSAGES:
+ text += self._err_profile.fmt(detail, bold=True)
+ else:
+ text += self._detail_profile.fmt(detail)
+
+ if abbrev:
+ text += self._indent + \
+ self.content_profile.fmt('Message contains {} additional lines'
+ .format(n_lines - self._message_lines), dim=True)
+ text += '\n'
+
+ extra_nl = True
+
+ if message.scheduler and message.message_type == MessageType.FAIL:
+ text += '\n'
+
+ if self.context is not None and not self.context.log_verbose:
+ text += self._indent + self._err_profile.fmt("Log file: ")
+ text += self._indent + self._logfile_widget.render(message) + '\n'
+ elif self._log_lines > 0:
+ text += self._indent + self._err_profile.fmt("Printing the last {} lines from log file:"
+ .format(self._log_lines)) + '\n'
+ text += self._indent + self._logfile_widget.render(message, abbrev=False) + '\n'
+ text += self._indent + self._err_profile.fmt("=" * 70) + '\n'
+
+ log_content = self._read_last_lines(message.logfile)
+ log_content = textwrap.indent(log_content, self._indent)
+ text += self._detail_profile.fmt(log_content)
+ text += '\n'
+ text += self._indent + self._err_profile.fmt("=" * 70) + '\n'
+ extra_nl = True
+
+ if extra_nl:
+ text += '\n'
+
+ return text
+
+ def _read_last_lines(self, logfile):
+ with ExitStack() as stack:
+ # mmap handles low-level memory details, allowing for
+ # faster searches
+ f = stack.enter_context(open(logfile, 'r+'))
+ log = stack.enter_context(mmap(f.fileno(), os.path.getsize(f.name)))
+
+ count = 0
+ end = log.size() - 1
+
+ while count < self._log_lines and end >= 0:
+ location = log.rfind(b'\n', 0, end)
+ count += 1
+
+ # If location is -1 (none found), this will print the
+ # first character because of the later +1
+ end = location
+
+ # end+1 is correct whether or not a newline was found at
+ # that location. If end is -1 (seek before beginning of file)
+ # then we get the first characther. If end is a newline position,
+ # we discard it and only want to print the beginning of the next
+ # line.
+ lines = log[(end + 1):].splitlines()
+ return '\n'.join([line.decode('utf-8') for line in lines]).rstrip()
+
+ def _format_plugins(self, element_plugins, source_plugins):
+ text = ""
+
+ if not (element_plugins or source_plugins):
+ return text
+
+ text += self.content_profile.fmt("Loaded Plugins\n", bold=True)
+
+ if element_plugins:
+ text += self.format_profile.fmt(" Element Plugins\n")
+ for plugin in element_plugins:
+ text += self.content_profile.fmt(" - {}\n".format(plugin))
+
+ if source_plugins:
+ text += self.format_profile.fmt(" Source Plugins\n")
+ for plugin in source_plugins:
+ text += self.content_profile.fmt(" - {}\n".format(plugin))
+
+ text += '\n'
+
+ return text
+
+ # _format_values()
+ #
+ # Formats an indented dictionary of titles / values, ensuring
+ # the values are aligned.
+ #
+ # Args:
+ # values: A dictionary, usually an OrderedDict()
+ # style_value: Whether to use the content profile for the values
+ #
+ # Returns:
+ # (str): The formatted values
+ #
+ def _format_values(self, values, style_value=True):
+ text = ''
+ max_key_len = 0
+ for key, value in values.items():
+ max_key_len = max(len(key), max_key_len)
+
+ for key, value in values.items():
+ if isinstance(value, str) and '\n' in value:
+ text += self.format_profile.fmt(" {}:\n".format(key))
+ text += textwrap.indent(value, self._indent)
+ continue
+
+ text += self.format_profile.fmt(" {}: {}".format(key, ' ' * (max_key_len - len(key))))
+ if style_value:
+ text += self.content_profile.fmt(str(value))
+ else:
+ text += str(value)
+ text += '\n'
+
+ return text
diff --git a/src/buildstream/_fuse/__init__.py b/src/buildstream/_fuse/__init__.py
new file mode 100644
index 000000000..a5e882634
--- /dev/null
+++ b/src/buildstream/_fuse/__init__.py
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .hardlinks import SafeHardlinks
diff --git a/src/buildstream/_fuse/fuse.py b/src/buildstream/_fuse/fuse.py
new file mode 100644
index 000000000..4ff6b9903
--- /dev/null
+++ b/src/buildstream/_fuse/fuse.py
@@ -0,0 +1,1006 @@
+# This is an embedded copy of fuse.py taken from the following upstream commit:
+#
+# https://github.com/terencehonles/fusepy/commit/0eafeb557e0e70926ed9450008ef17057d302391
+#
+# Our local modifications are recorded in the Git history of this repo.
+
+# Copyright (c) 2012 Terence Honles <terence@honles.com> (maintainer)
+# Copyright (c) 2008 Giorgos Verigakis <verigak@gmail.com> (author)
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# pylint: skip-file
+
+from __future__ import print_function, absolute_import, division
+
+from ctypes import *
+from ctypes.util import find_library
+from errno import *
+from os import strerror
+from platform import machine, system
+from signal import signal, SIGINT, SIG_DFL
+from stat import S_IFDIR
+from traceback import print_exc
+
+import logging
+
+try:
+ from functools import partial
+except ImportError:
+ # http://docs.python.org/library/functools.html#functools.partial
+ def partial(func, *args, **keywords):
+ def newfunc(*fargs, **fkeywords):
+ newkeywords = keywords.copy()
+ newkeywords.update(fkeywords)
+ return func(*(args + fargs), **newkeywords)
+
+ newfunc.func = func
+ newfunc.args = args
+ newfunc.keywords = keywords
+ return newfunc
+
+try:
+ basestring
+except NameError:
+ basestring = str
+
+class c_timespec(Structure):
+ _fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
+
+class c_utimbuf(Structure):
+ _fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
+
+class c_stat(Structure):
+ pass # Platform dependent
+
+_system = system()
+_machine = machine()
+
+if _system == 'Darwin':
+ _libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) # libfuse dependency
+ _libfuse_path = (find_library('fuse4x') or find_library('osxfuse') or
+ find_library('fuse'))
+else:
+ _libfuse_path = find_library('fuse')
+
+if not _libfuse_path:
+ raise EnvironmentError('Unable to find libfuse')
+else:
+ _libfuse = CDLL(_libfuse_path)
+
+if _system == 'Darwin' and hasattr(_libfuse, 'macfuse_version'):
+ _system = 'Darwin-MacFuse'
+
+
+if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
+ ENOTSUP = 45
+ c_dev_t = c_int32
+ c_fsblkcnt_t = c_ulong
+ c_fsfilcnt_t = c_ulong
+ c_gid_t = c_uint32
+ c_mode_t = c_uint16
+ c_off_t = c_int64
+ c_pid_t = c_int32
+ c_uid_t = c_uint32
+ setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
+ c_size_t, c_int, c_uint32)
+ getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
+ c_size_t, c_uint32)
+ if _system == 'Darwin':
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('st_mode', c_mode_t),
+ ('st_nlink', c_uint16),
+ ('st_ino', c_uint64),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('st_rdev', c_dev_t),
+ ('st_atimespec', c_timespec),
+ ('st_mtimespec', c_timespec),
+ ('st_ctimespec', c_timespec),
+ ('st_birthtimespec', c_timespec),
+ ('st_size', c_off_t),
+ ('st_blocks', c_int64),
+ ('st_blksize', c_int32),
+ ('st_flags', c_int32),
+ ('st_gen', c_int32),
+ ('st_lspare', c_int32),
+ ('st_qspare', c_int64)]
+ else:
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('st_ino', c_uint32),
+ ('st_mode', c_mode_t),
+ ('st_nlink', c_uint16),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('st_rdev', c_dev_t),
+ ('st_atimespec', c_timespec),
+ ('st_mtimespec', c_timespec),
+ ('st_ctimespec', c_timespec),
+ ('st_size', c_off_t),
+ ('st_blocks', c_int64),
+ ('st_blksize', c_int32)]
+elif _system == 'Linux':
+ ENOTSUP = 95
+ c_dev_t = c_ulonglong
+ c_fsblkcnt_t = c_ulonglong
+ c_fsfilcnt_t = c_ulonglong
+ c_gid_t = c_uint
+ c_mode_t = c_uint
+ c_off_t = c_longlong
+ c_pid_t = c_int
+ c_uid_t = c_uint
+ setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
+ c_size_t, c_int)
+
+ getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
+ c_size_t)
+
+ if _machine == 'x86_64':
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('st_ino', c_ulong),
+ ('st_nlink', c_ulong),
+ ('st_mode', c_mode_t),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('__pad0', c_int),
+ ('st_rdev', c_dev_t),
+ ('st_size', c_off_t),
+ ('st_blksize', c_long),
+ ('st_blocks', c_long),
+ ('st_atimespec', c_timespec),
+ ('st_mtimespec', c_timespec),
+ ('st_ctimespec', c_timespec)]
+ elif _machine == 'mips':
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('__pad1_1', c_ulong),
+ ('__pad1_2', c_ulong),
+ ('__pad1_3', c_ulong),
+ ('st_ino', c_ulong),
+ ('st_mode', c_mode_t),
+ ('st_nlink', c_ulong),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('st_rdev', c_dev_t),
+ ('__pad2_1', c_ulong),
+ ('__pad2_2', c_ulong),
+ ('st_size', c_off_t),
+ ('__pad3', c_ulong),
+ ('st_atimespec', c_timespec),
+ ('__pad4', c_ulong),
+ ('st_mtimespec', c_timespec),
+ ('__pad5', c_ulong),
+ ('st_ctimespec', c_timespec),
+ ('__pad6', c_ulong),
+ ('st_blksize', c_long),
+ ('st_blocks', c_long),
+ ('__pad7_1', c_ulong),
+ ('__pad7_2', c_ulong),
+ ('__pad7_3', c_ulong),
+ ('__pad7_4', c_ulong),
+ ('__pad7_5', c_ulong),
+ ('__pad7_6', c_ulong),
+ ('__pad7_7', c_ulong),
+ ('__pad7_8', c_ulong),
+ ('__pad7_9', c_ulong),
+ ('__pad7_10', c_ulong),
+ ('__pad7_11', c_ulong),
+ ('__pad7_12', c_ulong),
+ ('__pad7_13', c_ulong),
+ ('__pad7_14', c_ulong)]
+ elif _machine == 'ppc':
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('st_ino', c_ulonglong),
+ ('st_mode', c_mode_t),
+ ('st_nlink', c_uint),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('st_rdev', c_dev_t),
+ ('__pad2', c_ushort),
+ ('st_size', c_off_t),
+ ('st_blksize', c_long),
+ ('st_blocks', c_longlong),
+ ('st_atimespec', c_timespec),
+ ('st_mtimespec', c_timespec),
+ ('st_ctimespec', c_timespec)]
+ elif _machine == 'ppc64' or _machine == 'ppc64le':
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('st_ino', c_ulong),
+ ('st_nlink', c_ulong),
+ ('st_mode', c_mode_t),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('__pad', c_uint),
+ ('st_rdev', c_dev_t),
+ ('st_size', c_off_t),
+ ('st_blksize', c_long),
+ ('st_blocks', c_long),
+ ('st_atimespec', c_timespec),
+ ('st_mtimespec', c_timespec),
+ ('st_ctimespec', c_timespec)]
+ elif _machine == 'aarch64':
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('st_ino', c_ulong),
+ ('st_mode', c_mode_t),
+ ('st_nlink', c_uint),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('st_rdev', c_dev_t),
+ ('__pad1', c_ulong),
+ ('st_size', c_off_t),
+ ('st_blksize', c_int),
+ ('__pad2', c_int),
+ ('st_blocks', c_long),
+ ('st_atimespec', c_timespec),
+ ('st_mtimespec', c_timespec),
+ ('st_ctimespec', c_timespec)]
+ else:
+ # i686, use as fallback for everything else
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('__pad1', c_ushort),
+ ('__st_ino', c_ulong),
+ ('st_mode', c_mode_t),
+ ('st_nlink', c_uint),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('st_rdev', c_dev_t),
+ ('__pad2', c_ushort),
+ ('st_size', c_off_t),
+ ('st_blksize', c_long),
+ ('st_blocks', c_longlong),
+ ('st_atimespec', c_timespec),
+ ('st_mtimespec', c_timespec),
+ ('st_ctimespec', c_timespec),
+ ('st_ino', c_ulonglong)]
+else:
+ raise NotImplementedError('{} is not supported.'.format(_system))
+
+
+class c_statvfs(Structure):
+ _fields_ = [
+ ('f_bsize', c_ulong),
+ ('f_frsize', c_ulong),
+ ('f_blocks', c_fsblkcnt_t),
+ ('f_bfree', c_fsblkcnt_t),
+ ('f_bavail', c_fsblkcnt_t),
+ ('f_files', c_fsfilcnt_t),
+ ('f_ffree', c_fsfilcnt_t),
+ ('f_favail', c_fsfilcnt_t),
+ ('f_fsid', c_ulong),
+ #('unused', c_int),
+ ('f_flag', c_ulong),
+ ('f_namemax', c_ulong)]
+
+if _system == 'FreeBSD':
+ c_fsblkcnt_t = c_uint64
+ c_fsfilcnt_t = c_uint64
+ setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
+ c_size_t, c_int)
+
+ getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
+ c_size_t)
+
+ class c_statvfs(Structure):
+ _fields_ = [
+ ('f_bavail', c_fsblkcnt_t),
+ ('f_bfree', c_fsblkcnt_t),
+ ('f_blocks', c_fsblkcnt_t),
+ ('f_favail', c_fsfilcnt_t),
+ ('f_ffree', c_fsfilcnt_t),
+ ('f_files', c_fsfilcnt_t),
+ ('f_bsize', c_ulong),
+ ('f_flag', c_ulong),
+ ('f_frsize', c_ulong)]
+
+class fuse_file_info(Structure):
+ _fields_ = [
+ ('flags', c_int),
+ ('fh_old', c_ulong),
+ ('writepage', c_int),
+ ('direct_io', c_uint, 1),
+ ('keep_cache', c_uint, 1),
+ ('flush', c_uint, 1),
+ ('padding', c_uint, 29),
+ ('fh', c_uint64),
+ ('lock_owner', c_uint64)]
+
+class fuse_context(Structure):
+ _fields_ = [
+ ('fuse', c_voidp),
+ ('uid', c_uid_t),
+ ('gid', c_gid_t),
+ ('pid', c_pid_t),
+ ('private_data', c_voidp)]
+
+_libfuse.fuse_get_context.restype = POINTER(fuse_context)
+
+
+class fuse_operations(Structure):
+ _fields_ = [
+ ('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
+ ('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
+ ('getdir', c_voidp), # Deprecated, use readdir
+ ('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
+ ('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
+ ('unlink', CFUNCTYPE(c_int, c_char_p)),
+ ('rmdir', CFUNCTYPE(c_int, c_char_p)),
+ ('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
+ ('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
+ ('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
+ ('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
+ ('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
+ ('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)),
+ ('utime', c_voidp), # Deprecated, use utimens
+ ('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
+
+ ('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t,
+ c_off_t, POINTER(fuse_file_info))),
+
+ ('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t,
+ c_off_t, POINTER(fuse_file_info))),
+
+ ('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
+ ('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
+ ('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
+ ('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
+ ('setxattr', setxattr_t),
+ ('getxattr', getxattr_t),
+ ('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
+ ('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
+ ('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
+
+ ('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp,
+ CFUNCTYPE(c_int, c_voidp, c_char_p,
+ POINTER(c_stat), c_off_t),
+ c_off_t, POINTER(fuse_file_info))),
+
+ ('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
+
+ ('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int,
+ POINTER(fuse_file_info))),
+
+ ('init', CFUNCTYPE(c_voidp, c_voidp)),
+ ('destroy', CFUNCTYPE(c_voidp, c_voidp)),
+ ('access', CFUNCTYPE(c_int, c_char_p, c_int)),
+
+ ('create', CFUNCTYPE(c_int, c_char_p, c_mode_t,
+ POINTER(fuse_file_info))),
+
+ ('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t,
+ POINTER(fuse_file_info))),
+
+ ('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
+ POINTER(fuse_file_info))),
+
+ ('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info),
+ c_int, c_voidp)),
+
+ ('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
+ ('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong))),
+ ('flag_nullpath_ok', c_uint, 1),
+ ('flag_nopath', c_uint, 1),
+ ('flag_utime_omit_ok', c_uint, 1),
+ ('flag_reserved', c_uint, 29),
+ ]
+
+
+def time_of_timespec(ts):
+ return ts.tv_sec + ts.tv_nsec / 10 ** 9
+
+def set_st_attrs(st, attrs):
+ for key, val in attrs.items():
+ if key in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'):
+ timespec = getattr(st, key + 'spec', None)
+ if timespec is None:
+ continue
+ timespec.tv_sec = int(val)
+ timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9)
+ elif hasattr(st, key):
+ setattr(st, key, val)
+
+
+def fuse_get_context():
+ 'Returns a (uid, gid, pid) tuple'
+
+ ctxp = _libfuse.fuse_get_context()
+ ctx = ctxp.contents
+ return ctx.uid, ctx.gid, ctx.pid
+
+
+class FuseOSError(OSError):
+ def __init__(self, errno):
+ super(FuseOSError, self).__init__(errno, strerror(errno))
+
+
+class FUSE(object):
+ '''
+ This class is the lower level interface and should not be subclassed under
+ normal use. Its methods are called by fuse.
+
+ Assumes API version 2.6 or later.
+ '''
+
+ OPTIONS = (
+ ('foreground', '-f'),
+ ('debug', '-d'),
+ ('nothreads', '-s'),
+ )
+
+ def __init__(self, operations, mountpoint, raw_fi=False, encoding='utf-8',
+ **kwargs):
+
+ '''
+ Setting raw_fi to True will cause FUSE to pass the fuse_file_info
+ class as is to Operations, instead of just the fh field.
+
+ This gives you access to direct_io, keep_cache, etc.
+ '''
+
+ self.operations = operations
+ self.raw_fi = raw_fi
+ self.encoding = encoding
+
+ args = ['fuse']
+
+ args.extend(flag for arg, flag in self.OPTIONS
+ if kwargs.pop(arg, False))
+
+ kwargs.setdefault('fsname', operations.__class__.__name__)
+ args.append('-o')
+ args.append(','.join(self._normalize_fuse_options(**kwargs)))
+ args.append(mountpoint)
+
+ args = [arg.encode(encoding) for arg in args]
+ argv = (c_char_p * len(args))(*args)
+
+ fuse_ops = fuse_operations()
+ for ent in fuse_operations._fields_:
+ name, prototype = ent[:2]
+
+ val = getattr(operations, name, None)
+ if val is None:
+ continue
+
+ # Function pointer members are tested for using the
+ # getattr(operations, name) above but are dynamically
+ # invoked using self.operations(name)
+ if hasattr(prototype, 'argtypes'):
+ val = prototype(partial(self._wrapper, getattr(self, name)))
+
+ setattr(fuse_ops, name, val)
+
+ try:
+ old_handler = signal(SIGINT, SIG_DFL)
+ except ValueError:
+ old_handler = SIG_DFL
+
+ err = _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
+ sizeof(fuse_ops), None)
+
+ try:
+ signal(SIGINT, old_handler)
+ except ValueError:
+ pass
+
+ del self.operations # Invoke the destructor
+ if err:
+ raise RuntimeError(err)
+
+ @staticmethod
+ def _normalize_fuse_options(**kargs):
+ for key, value in kargs.items():
+ if isinstance(value, bool):
+ if value is True: yield key
+ else:
+ yield '{}={}'.format(key, value)
+
+ @staticmethod
+ def _wrapper(func, *args, **kwargs):
+ 'Decorator for the methods that follow'
+
+ try:
+ return func(*args, **kwargs) or 0
+ except OSError as e:
+ return -(e.errno or EFAULT)
+ except:
+ print_exc()
+ return -EFAULT
+
+ def _decode_optional_path(self, path):
+ # NB: this method is intended for fuse operations that
+ # allow the path argument to be NULL,
+ # *not* as a generic path decoding method
+ if path is None:
+ return None
+ return path.decode(self.encoding)
+
+ def getattr(self, path, buf):
+ return self.fgetattr(path, buf, None)
+
+ def readlink(self, path, buf, bufsize):
+ ret = self.operations('readlink', path.decode(self.encoding)) \
+ .encode(self.encoding)
+
+ # copies a string into the given buffer
+ # (null terminated and truncated if necessary)
+ data = create_string_buffer(ret[:bufsize - 1])
+ memmove(buf, data, len(data))
+ return 0
+
+ def mknod(self, path, mode, dev):
+ return self.operations('mknod', path.decode(self.encoding), mode, dev)
+
+ def mkdir(self, path, mode):
+ return self.operations('mkdir', path.decode(self.encoding), mode)
+
+ def unlink(self, path):
+ return self.operations('unlink', path.decode(self.encoding))
+
+ def rmdir(self, path):
+ return self.operations('rmdir', path.decode(self.encoding))
+
+ def symlink(self, source, target):
+ 'creates a symlink `target -> source` (e.g. ln -s source target)'
+
+ return self.operations('symlink', target.decode(self.encoding),
+ source.decode(self.encoding))
+
+ def rename(self, old, new):
+ return self.operations('rename', old.decode(self.encoding),
+ new.decode(self.encoding))
+
+ def link(self, source, target):
+ 'creates a hard link `target -> source` (e.g. ln source target)'
+
+ return self.operations('link', target.decode(self.encoding),
+ source.decode(self.encoding))
+
+ def chmod(self, path, mode):
+ return self.operations('chmod', path.decode(self.encoding), mode)
+
+ def chown(self, path, uid, gid):
+ # Check if any of the arguments is a -1 that has overflowed
+ if c_uid_t(uid + 1).value == 0:
+ uid = -1
+ if c_gid_t(gid + 1).value == 0:
+ gid = -1
+
+ return self.operations('chown', path.decode(self.encoding), uid, gid)
+
+ def truncate(self, path, length):
+ return self.operations('truncate', path.decode(self.encoding), length)
+
+ def open(self, path, fip):
+ fi = fip.contents
+ if self.raw_fi:
+ return self.operations('open', path.decode(self.encoding), fi)
+ else:
+ fi.fh = self.operations('open', path.decode(self.encoding),
+ fi.flags)
+
+ return 0
+
+ def read(self, path, buf, size, offset, fip):
+ if self.raw_fi:
+ fh = fip.contents
+ else:
+ fh = fip.contents.fh
+
+ ret = self.operations('read', self._decode_optional_path(path), size,
+ offset, fh)
+
+ if not ret: return 0
+
+ retsize = len(ret)
+ assert retsize <= size, \
+ 'actual amount read {:d} greater than expected {:d}'.format(retsize, size)
+
+ data = create_string_buffer(ret, retsize)
+ memmove(buf, data, retsize)
+ return retsize
+
+ def write(self, path, buf, size, offset, fip):
+ data = string_at(buf, size)
+
+ if self.raw_fi:
+ fh = fip.contents
+ else:
+ fh = fip.contents.fh
+
+ return self.operations('write', self._decode_optional_path(path), data,
+ offset, fh)
+
+ def statfs(self, path, buf):
+ stv = buf.contents
+ attrs = self.operations('statfs', path.decode(self.encoding))
+ for key, val in attrs.items():
+ if hasattr(stv, key):
+ setattr(stv, key, val)
+
+ return 0
+
+ def flush(self, path, fip):
+ if self.raw_fi:
+ fh = fip.contents
+ else:
+ fh = fip.contents.fh
+
+ return self.operations('flush', self._decode_optional_path(path), fh)
+
+ def release(self, path, fip):
+ if self.raw_fi:
+ fh = fip.contents
+ else:
+ fh = fip.contents.fh
+
+ return self.operations('release', self._decode_optional_path(path), fh)
+
+ def fsync(self, path, datasync, fip):
+ if self.raw_fi:
+ fh = fip.contents
+ else:
+ fh = fip.contents.fh
+
+ return self.operations('fsync', self._decode_optional_path(path), datasync,
+ fh)
+
+ def setxattr(self, path, name, value, size, options, *args):
+ return self.operations('setxattr', path.decode(self.encoding),
+ name.decode(self.encoding),
+ string_at(value, size), options, *args)
+
+ def getxattr(self, path, name, value, size, *args):
+ ret = self.operations('getxattr', path.decode(self.encoding),
+ name.decode(self.encoding), *args)
+
+ retsize = len(ret)
+ # allow size queries
+ if not value: return retsize
+
+ # do not truncate
+ if retsize > size: return -ERANGE
+
+ buf = create_string_buffer(ret, retsize) # Does not add trailing 0
+ memmove(value, buf, retsize)
+
+ return retsize
+
+ def listxattr(self, path, namebuf, size):
+ attrs = self.operations('listxattr', path.decode(self.encoding)) or ''
+ ret = '\x00'.join(attrs).encode(self.encoding)
+ if len(ret) > 0:
+ ret += '\x00'.encode(self.encoding)
+
+ retsize = len(ret)
+ # allow size queries
+ if not namebuf: return retsize
+
+ # do not truncate
+ if retsize > size: return -ERANGE
+
+ buf = create_string_buffer(ret, retsize)
+ memmove(namebuf, buf, retsize)
+
+ return retsize
+
+ def removexattr(self, path, name):
+ return self.operations('removexattr', path.decode(self.encoding),
+ name.decode(self.encoding))
+
+ def opendir(self, path, fip):
+ # Ignore raw_fi
+ fip.contents.fh = self.operations('opendir',
+ path.decode(self.encoding))
+
+ return 0
+
+ def readdir(self, path, buf, filler, offset, fip):
+ # Ignore raw_fi
+ for item in self.operations('readdir', self._decode_optional_path(path),
+ fip.contents.fh):
+
+ if isinstance(item, basestring):
+ name, st, offset = item, None, 0
+ else:
+ name, attrs, offset = item
+ if attrs:
+ st = c_stat()
+ set_st_attrs(st, attrs)
+ else:
+ st = None
+
+ if filler(buf, name.encode(self.encoding), st, offset) != 0:
+ break
+
+ return 0
+
+ def releasedir(self, path, fip):
+ # Ignore raw_fi
+ return self.operations('releasedir', self._decode_optional_path(path),
+ fip.contents.fh)
+
+ def fsyncdir(self, path, datasync, fip):
+ # Ignore raw_fi
+ return self.operations('fsyncdir', self._decode_optional_path(path),
+ datasync, fip.contents.fh)
+
+ def init(self, conn):
+ return self.operations('init', '/')
+
+ def destroy(self, private_data):
+ return self.operations('destroy', '/')
+
+ def access(self, path, amode):
+ return self.operations('access', path.decode(self.encoding), amode)
+
+ def create(self, path, mode, fip):
+ fi = fip.contents
+ path = path.decode(self.encoding)
+
+ if self.raw_fi:
+ return self.operations('create', path, mode, fi)
+ else:
+ # This line is different from upstream to fix issues
+ # reading file opened with O_CREAT|O_RDWR.
+ # See issue #143.
+ fi.fh = self.operations('create', path, mode, fi.flags)
+ # END OF MODIFICATION
+ return 0
+
+ def ftruncate(self, path, length, fip):
+ if self.raw_fi:
+ fh = fip.contents
+ else:
+ fh = fip.contents.fh
+
+ return self.operations('truncate', self._decode_optional_path(path),
+ length, fh)
+
+ def fgetattr(self, path, buf, fip):
+ memset(buf, 0, sizeof(c_stat))
+
+ st = buf.contents
+ if not fip:
+ fh = fip
+ elif self.raw_fi:
+ fh = fip.contents
+ else:
+ fh = fip.contents.fh
+
+ attrs = self.operations('getattr', self._decode_optional_path(path), fh)
+ set_st_attrs(st, attrs)
+ return 0
+
+ def lock(self, path, fip, cmd, lock):
+ if self.raw_fi:
+ fh = fip.contents
+ else:
+ fh = fip.contents.fh
+
+ return self.operations('lock', self._decode_optional_path(path), fh, cmd,
+ lock)
+
+ def utimens(self, path, buf):
+ if buf:
+ atime = time_of_timespec(buf.contents.actime)
+ mtime = time_of_timespec(buf.contents.modtime)
+ times = (atime, mtime)
+ else:
+ times = None
+
+ return self.operations('utimens', path.decode(self.encoding), times)
+
+ def bmap(self, path, blocksize, idx):
+ return self.operations('bmap', path.decode(self.encoding), blocksize,
+ idx)
+
+
+class Operations(object):
+ '''
+ This class should be subclassed and passed as an argument to FUSE on
+ initialization. All operations should raise a FuseOSError exception on
+ error.
+
+ When in doubt of what an operation should do, check the FUSE header file
+ or the corresponding system call man page.
+ '''
+
+ def __call__(self, op, *args):
+ if not hasattr(self, op):
+ raise FuseOSError(EFAULT)
+ return getattr(self, op)(*args)
+
+ def access(self, path, amode):
+ return 0
+
+ bmap = None
+
+ def chmod(self, path, mode):
+ raise FuseOSError(EROFS)
+
+ def chown(self, path, uid, gid):
+ raise FuseOSError(EROFS)
+
+ def create(self, path, mode, fi=None):
+ '''
+ When raw_fi is False (default case), fi is None and create should
+ return a numerical file handle.
+
+ When raw_fi is True the file handle should be set directly by create
+ and return 0.
+ '''
+
+ raise FuseOSError(EROFS)
+
+ def destroy(self, path):
+ 'Called on filesystem destruction. Path is always /'
+
+ pass
+
+ def flush(self, path, fh):
+ return 0
+
+ def fsync(self, path, datasync, fh):
+ return 0
+
+ def fsyncdir(self, path, datasync, fh):
+ return 0
+
+ def getattr(self, path, fh=None):
+ '''
+ Returns a dictionary with keys identical to the stat C structure of
+ stat(2).
+
+ st_atime, st_mtime and st_ctime should be floats.
+
+ NOTE: There is an incombatibility between Linux and Mac OS X
+ concerning st_nlink of directories. Mac OS X counts all files inside
+ the directory, while Linux counts only the subdirectories.
+ '''
+
+ if path != '/':
+ raise FuseOSError(ENOENT)
+ return dict(st_mode=(S_IFDIR | 0o755), st_nlink=2)
+
+ def getxattr(self, path, name, position=0):
+ raise FuseOSError(ENOTSUP)
+
+ def init(self, path):
+ '''
+ Called on filesystem initialization. (Path is always /)
+
+ Use it instead of __init__ if you start threads on initialization.
+ '''
+
+ pass
+
+ def link(self, target, source):
+ 'creates a hard link `target -> source` (e.g. ln source target)'
+
+ raise FuseOSError(EROFS)
+
+ def listxattr(self, path):
+ return []
+
+ lock = None
+
+ def mkdir(self, path, mode):
+ raise FuseOSError(EROFS)
+
+ def mknod(self, path, mode, dev):
+ raise FuseOSError(EROFS)
+
+ def open(self, path, flags):
+ '''
+ When raw_fi is False (default case), open should return a numerical
+ file handle.
+
+ When raw_fi is True the signature of open becomes:
+ open(self, path, fi)
+
+ and the file handle should be set directly.
+ '''
+
+ return 0
+
+ def opendir(self, path):
+ 'Returns a numerical file handle.'
+
+ return 0
+
+ def read(self, path, size, offset, fh):
+ 'Returns a string containing the data requested.'
+
+ raise FuseOSError(EIO)
+
+ def readdir(self, path, fh):
+ '''
+ Can return either a list of names, or a list of (name, attrs, offset)
+ tuples. attrs is a dict as in getattr.
+ '''
+
+ return ['.', '..']
+
+ def readlink(self, path):
+ raise FuseOSError(ENOENT)
+
+ def release(self, path, fh):
+ return 0
+
+ def releasedir(self, path, fh):
+ return 0
+
+ def removexattr(self, path, name):
+ raise FuseOSError(ENOTSUP)
+
+ def rename(self, old, new):
+ raise FuseOSError(EROFS)
+
+ def rmdir(self, path):
+ raise FuseOSError(EROFS)
+
+ def setxattr(self, path, name, value, options, position=0):
+ raise FuseOSError(ENOTSUP)
+
+ def statfs(self, path):
+ '''
+ Returns a dictionary with keys identical to the statvfs C structure of
+ statvfs(3).
+
+ On Mac OS X f_bsize and f_frsize must be a power of 2
+ (minimum 512).
+ '''
+
+ return {}
+
+ def symlink(self, target, source):
+ 'creates a symlink `target -> source` (e.g. ln -s source target)'
+
+ raise FuseOSError(EROFS)
+
+ def truncate(self, path, length, fh=None):
+ raise FuseOSError(EROFS)
+
+ def unlink(self, path):
+ raise FuseOSError(EROFS)
+
+ def utimens(self, path, times=None):
+ 'Times is a (atime, mtime) tuple. If None use current time.'
+
+ return 0
+
+ def write(self, path, data, offset, fh):
+ raise FuseOSError(EROFS)
+
+
+class LoggingMixIn:
+ log = logging.getLogger('fuse.log-mixin')
+
+ def __call__(self, op, path, *args):
+ self.log.debug('-> %s %s %s', op, path, repr(args))
+ ret = '[Unhandled Exception]'
+ try:
+ ret = getattr(self, op)(path, *args)
+ return ret
+ except OSError as e:
+ ret = str(e)
+ raise
+ finally:
+ self.log.debug('<- %s %s', op, repr(ret))
diff --git a/src/buildstream/_fuse/hardlinks.py b/src/buildstream/_fuse/hardlinks.py
new file mode 100644
index 000000000..ff2e81eea
--- /dev/null
+++ b/src/buildstream/_fuse/hardlinks.py
@@ -0,0 +1,218 @@
+#
+# Copyright (C) 2016 Stavros Korokithakis
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+#
+# The filesystem operations implementation here is based
+# on some example code written by Stavros Korokithakis.
+
+import errno
+import os
+import shutil
+import stat
+import tempfile
+
+from .fuse import FuseOSError, Operations
+
+from .mount import Mount
+
+
+# SafeHardlinks()
+#
+# A FUSE mount which implements a copy on write hardlink experience.
+#
+# Args:
+# root (str): The underlying filesystem path to mirror
+# tmp (str): A directory on the same filesystem for creating temp files
+#
+class SafeHardlinks(Mount):
+
+ def __init__(self, directory, tempdir, fuse_mount_options=None):
+ self.directory = directory
+ self.tempdir = tempdir
+ if fuse_mount_options is None:
+ fuse_mount_options = {}
+ super().__init__(fuse_mount_options=fuse_mount_options)
+
+ def create_operations(self):
+ return SafeHardlinkOps(self.directory, self.tempdir)
+
+
+# SafeHardlinkOps()
+#
+# The actual FUSE Operations implementation below.
+#
+class SafeHardlinkOps(Operations):
+
+ def __init__(self, root, tmp):
+ self.root = root
+ self.tmp = tmp
+
+ def _full_path(self, partial):
+ if partial.startswith("/"):
+ partial = partial[1:]
+ path = os.path.join(self.root, partial)
+ return path
+
+ def _ensure_copy(self, full_path):
+ try:
+ # Follow symbolic links manually here
+ real_path = os.path.realpath(full_path)
+ file_stat = os.stat(real_path)
+
+ # Dont bother with files that cannot be hardlinked, oddly it
+ # directories actually usually have st_nlink > 1 so just avoid
+ # that.
+ #
+ # We already wont get symlinks here, and stat will throw
+ # the FileNotFoundError below if a followed symlink did not exist.
+ #
+ if not stat.S_ISDIR(file_stat.st_mode) and file_stat.st_nlink > 1:
+ with tempfile.TemporaryDirectory(dir=self.tmp) as tempdir:
+ basename = os.path.basename(real_path)
+ temp_path = os.path.join(tempdir, basename)
+
+ # First copy, then unlink origin and rename
+ shutil.copy2(real_path, temp_path)
+ os.unlink(real_path)
+ os.rename(temp_path, real_path)
+
+ except FileNotFoundError:
+ # This doesnt exist yet, assume we're about to create it
+ # so it's not a problem.
+ pass
+
+ ###########################################################
+ # Fuse Methods #
+ ###########################################################
+ def access(self, path, mode):
+ full_path = self._full_path(path)
+ if not os.access(full_path, mode):
+ raise FuseOSError(errno.EACCES)
+
+ def chmod(self, path, mode):
+ full_path = self._full_path(path)
+
+ # Ensure copies on chmod
+ self._ensure_copy(full_path)
+ return os.chmod(full_path, mode)
+
+ def chown(self, path, uid, gid):
+ full_path = self._full_path(path)
+
+ # Ensure copies on chown
+ self._ensure_copy(full_path)
+ return os.chown(full_path, uid, gid)
+
+ def getattr(self, path, fh=None):
+ full_path = self._full_path(path)
+ st = os.lstat(full_path)
+ return dict((key, getattr(st, key)) for key in (
+ 'st_atime', 'st_ctime', 'st_gid', 'st_mode',
+ 'st_mtime', 'st_nlink', 'st_size', 'st_uid', 'st_rdev'))
+
+ def readdir(self, path, fh):
+ full_path = self._full_path(path)
+
+ dirents = ['.', '..']
+ if os.path.isdir(full_path):
+ dirents.extend(os.listdir(full_path))
+ for r in dirents:
+ yield r
+
+ def readlink(self, path):
+ pathname = os.readlink(self._full_path(path))
+ if pathname.startswith("/"):
+ # Path name is absolute, sanitize it.
+ return os.path.relpath(pathname, self.root)
+ else:
+ return pathname
+
+ def mknod(self, path, mode, dev):
+ return os.mknod(self._full_path(path), mode, dev)
+
+ def rmdir(self, path):
+ full_path = self._full_path(path)
+ return os.rmdir(full_path)
+
+ def mkdir(self, path, mode):
+ return os.mkdir(self._full_path(path), mode)
+
+ def statfs(self, path):
+ full_path = self._full_path(path)
+ stv = os.statvfs(full_path)
+ return dict((key, getattr(stv, key)) for key in (
+ 'f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail',
+ 'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax'))
+
+ def unlink(self, path):
+ return os.unlink(self._full_path(path))
+
+ def symlink(self, name, target):
+ return os.symlink(target, self._full_path(name))
+
+ def rename(self, old, new):
+ return os.rename(self._full_path(old), self._full_path(new))
+
+ def link(self, target, name):
+
+ # When creating a hard link here, should we ensure the original
+ # file is not a hardlink itself first ?
+ #
+ return os.link(self._full_path(name), self._full_path(target))
+
+ def utimens(self, path, times=None):
+ return os.utime(self._full_path(path), times)
+
+ def open(self, path, flags):
+ full_path = self._full_path(path)
+
+ # If we're opening for writing, ensure it's a copy first
+ if flags & os.O_WRONLY or flags & os.O_RDWR:
+ self._ensure_copy(full_path)
+
+ return os.open(full_path, flags)
+
+ def create(self, path, mode, flags):
+ full_path = self._full_path(path)
+
+ # If it already exists, ensure it's a copy first
+ self._ensure_copy(full_path)
+ return os.open(full_path, flags, mode)
+
+ def read(self, path, length, offset, fh):
+ os.lseek(fh, offset, os.SEEK_SET)
+ return os.read(fh, length)
+
+ def write(self, path, buf, offset, fh):
+ os.lseek(fh, offset, os.SEEK_SET)
+ return os.write(fh, buf)
+
+ def truncate(self, path, length, fh=None):
+ full_path = self._full_path(path)
+ with open(full_path, 'r+') as f:
+ f.truncate(length)
+
+ def flush(self, path, fh):
+ return os.fsync(fh)
+
+ def release(self, path, fh):
+ return os.close(fh)
+
+ def fsync(self, path, fdatasync, fh):
+ return self.flush(path, fh)
diff --git a/src/buildstream/_fuse/mount.py b/src/buildstream/_fuse/mount.py
new file mode 100644
index 000000000..e31684100
--- /dev/null
+++ b/src/buildstream/_fuse/mount.py
@@ -0,0 +1,196 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+import os
+import signal
+import time
+import sys
+
+from contextlib import contextmanager
+from multiprocessing import Process
+from .fuse import FUSE
+
+from .._exceptions import ImplError
+from .. import _signals
+
+
+# Just a custom exception to raise here, for identifying possible
+# bugs with a fuse layer implementation
+#
+class FuseMountError(Exception):
+ pass
+
+
+# This is a convenience class which takes care of synchronizing the
+# startup of FUSE and shutting it down.
+#
+# The implementations / subclasses should:
+#
+# - Overload the instance initializer to add any parameters
+# needed for their fuse Operations implementation
+#
+# - Implement create_operations() to create the Operations
+# instance on behalf of the superclass, using any additional
+# parameters collected in the initializer.
+#
+# Mount objects can be treated as contextmanagers, the volume
+# will be mounted during the context.
+#
+# UGLY CODE NOTE:
+#
+# This is a horrible little piece of code. The problem we face
+# here is that the highlevel libfuse API has fuse_main(), which
+# will either block in the foreground, or become a full daemon.
+#
+# With the daemon approach, we know that the fuse is mounted right
+# away when fuse_main() returns, then the daemon will go and handle
+# requests on its own, but then we have no way to shut down the
+# daemon.
+#
+# With the blocking approach, we still have it as a child process
+# so we can tell it to gracefully terminate; but it's impossible
+# to know when the mount is done, there is no callback for that
+#
+# The solution we use here without digging too deep into the
+# low level fuse API, is to fork a child process which will
+# fun the fuse loop in foreground, and we block the parent
+# process until the volume is mounted with a busy loop with timeouts.
+#
+class Mount():
+
+ # These are not really class data, they are
+ # just here for the sake of having None setup instead
+ # of missing attributes, since we do not provide any
+ # initializer and leave the initializer to the subclass.
+ #
+ __mountpoint = None
+ __operations = None
+ __process = None
+
+ ################################################
+ # User Facing API #
+ ################################################
+
+ def __init__(self, fuse_mount_options=None):
+ self._fuse_mount_options = {} if fuse_mount_options is None else fuse_mount_options
+
+ # mount():
+ #
+ # User facing API for mounting a fuse subclass implementation
+ #
+ # Args:
+ # (str): Location to mount this fuse fs
+ #
+ def mount(self, mountpoint):
+
+ assert self.__process is None
+
+ self.__mountpoint = mountpoint
+ self.__process = Process(target=self.__run_fuse)
+
+ # Ensure the child fork() does not inherit our signal handlers, if the
+ # child wants to handle a signal then it will first set its own
+ # handler, and then unblock it.
+ with _signals.blocked([signal.SIGTERM, signal.SIGTSTP, signal.SIGINT], ignore=False):
+ self.__process.start()
+
+ # This is horrible, we're going to wait until mountpoint is mounted and that's it.
+ while not os.path.ismount(mountpoint):
+ time.sleep(1 / 100)
+
+ # unmount():
+ #
+ # User facing API for unmounting a fuse subclass implementation
+ #
+ def unmount(self):
+
+ # Terminate child process and join
+ if self.__process is not None:
+ self.__process.terminate()
+ self.__process.join()
+
+ # Report an error if ever the underlying operations crashed for some reason.
+ if self.__process.exitcode != 0:
+ raise FuseMountError("{} reported exit code {} when unmounting"
+ .format(type(self).__name__, self.__process.exitcode))
+
+ self.__mountpoint = None
+ self.__process = None
+
+ # mounted():
+ #
+ # A context manager to run a code block with this fuse Mount
+ # mounted, this will take care of automatically unmounting
+ # in the case that the calling process is terminated.
+ #
+ # Args:
+ # (str): Location to mount this fuse fs
+ #
+ @contextmanager
+ def mounted(self, mountpoint):
+
+ self.mount(mountpoint)
+ try:
+ with _signals.terminator(self.unmount):
+ yield
+ finally:
+ self.unmount()
+
+ ################################################
+ # Abstract Methods #
+ ################################################
+
+ # create_operations():
+ #
+ # Create an Operations class (from fusepy) and return it
+ #
+ # Returns:
+ # (Operations): A FUSE Operations implementation
+ def create_operations(self):
+ raise ImplError("Mount subclass '{}' did not implement create_operations()"
+ .format(type(self).__name__))
+
+ ################################################
+ # Child Process #
+ ################################################
+ def __run_fuse(self):
+
+ # First become session leader while signals are still blocked
+ #
+ # Then reset the SIGTERM handler to the default and finally
+ # unblock SIGTERM.
+ #
+ os.setsid()
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM])
+
+ # Ask the subclass to give us an Operations object
+ #
+ self.__operations = self.create_operations() # pylint: disable=assignment-from-no-return
+
+ # Run fuse in foreground in this child process, internally libfuse
+ # will handle SIGTERM and gracefully exit its own little main loop.
+ #
+ FUSE(self.__operations, self.__mountpoint, nothreads=True, foreground=True, nonempty=True,
+ **self._fuse_mount_options)
+
+ # Explicit 0 exit code, if the operations crashed for some reason, the exit
+ # code will not be 0, and we want to know about it.
+ #
+ sys.exit(0)
diff --git a/src/buildstream/_gitsourcebase.py b/src/buildstream/_gitsourcebase.py
new file mode 100644
index 000000000..7d07c56cb
--- /dev/null
+++ b/src/buildstream/_gitsourcebase.py
@@ -0,0 +1,683 @@
+#
+# Copyright (C) 2016 Codethink Limited
+# Copyright (C) 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Chandan Singh <csingh43@bloomberg.net>
+
+"""Abstract base class for source implementations that work with a Git repository"""
+
+import os
+import re
+import shutil
+from collections.abc import Mapping
+from io import StringIO
+from tempfile import TemporaryFile
+
+from configparser import RawConfigParser
+
+from .source import Source, SourceError, SourceFetcher
+from .types import Consistency, CoreWarnings
+from . import utils
+from .utils import move_atomic, DirectoryExistsError
+
+GIT_MODULES = '.gitmodules'
+
+# Warnings
+WARN_INCONSISTENT_SUBMODULE = "inconsistent-submodule"
+WARN_UNLISTED_SUBMODULE = "unlisted-submodule"
+WARN_INVALID_SUBMODULE = "invalid-submodule"
+
+
+# Because of handling of submodules, we maintain a _GitMirror
+# for the primary git source and also for each submodule it
+# might have at a given time
+#
+class _GitMirror(SourceFetcher):
+
+ def __init__(self, source, path, url, ref, *, primary=False, tags=[]):
+
+ super().__init__()
+ self.source = source
+ self.path = path
+ self.url = url
+ self.ref = ref
+ self.tags = tags
+ self.primary = primary
+ self.mirror = os.path.join(source.get_mirror_directory(), utils.url_directory_name(url))
+ self.mark_download_url(url)
+
+ # Ensures that the mirror exists
+ def ensure(self, alias_override=None):
+
+ # Unfortunately, git does not know how to only clone just a specific ref,
+ # so we have to download all of those gigs even if we only need a couple
+ # of bytes.
+ if not os.path.exists(self.mirror):
+
+ # Do the initial clone in a tmpdir just because we want an atomic move
+ # after a long standing clone which could fail overtime, for now do
+ # this directly in our git directory, eliminating the chances that the
+ # system configured tmpdir is not on the same partition.
+ #
+ with self.source.tempdir() as tmpdir:
+ url = self.source.translate_url(self.url, alias_override=alias_override,
+ primary=self.primary)
+ self.source.call([self.source.host_git, 'clone', '--mirror', '-n', url, tmpdir],
+ fail="Failed to clone git repository {}".format(url),
+ fail_temporarily=True)
+
+ try:
+ move_atomic(tmpdir, self.mirror)
+ except DirectoryExistsError:
+ # Another process was quicker to download this repository.
+ # Let's discard our own
+ self.source.status("{}: Discarding duplicate clone of {}"
+ .format(self.source, url))
+ except OSError as e:
+ raise SourceError("{}: Failed to move cloned git repository {} from '{}' to '{}': {}"
+ .format(self.source, url, tmpdir, self.mirror, e)) from e
+
+ def _fetch(self, alias_override=None):
+ url = self.source.translate_url(self.url,
+ alias_override=alias_override,
+ primary=self.primary)
+
+ if alias_override:
+ remote_name = utils.url_directory_name(alias_override)
+ _, remotes = self.source.check_output(
+ [self.source.host_git, 'remote'],
+ fail="Failed to retrieve list of remotes in {}".format(self.mirror),
+ cwd=self.mirror
+ )
+ if remote_name not in remotes:
+ self.source.call(
+ [self.source.host_git, 'remote', 'add', remote_name, url],
+ fail="Failed to add remote {} with url {}".format(remote_name, url),
+ cwd=self.mirror
+ )
+ else:
+ remote_name = "origin"
+
+ self.source.call([self.source.host_git, 'fetch', remote_name, '--prune',
+ '+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'],
+ fail="Failed to fetch from remote git repository: {}".format(url),
+ fail_temporarily=True,
+ cwd=self.mirror)
+
+ def fetch(self, alias_override=None):
+ # Resolve the URL for the message
+ resolved_url = self.source.translate_url(self.url,
+ alias_override=alias_override,
+ primary=self.primary)
+
+ with self.source.timed_activity("Fetching from {}"
+ .format(resolved_url),
+ silent_nested=True):
+ self.ensure(alias_override)
+ if not self.has_ref():
+ self._fetch(alias_override)
+ self.assert_ref()
+
+ def has_ref(self):
+ if not self.ref:
+ return False
+
+ # If the mirror doesnt exist, we also dont have the ref
+ if not os.path.exists(self.mirror):
+ return False
+
+ # Check if the ref is really there
+ rc = self.source.call([self.source.host_git, 'cat-file', '-t', self.ref], cwd=self.mirror)
+ return rc == 0
+
+ def assert_ref(self):
+ if not self.has_ref():
+ raise SourceError("{}: expected ref '{}' was not found in git repository: '{}'"
+ .format(self.source, self.ref, self.url))
+
+ def latest_commit_with_tags(self, tracking, track_tags=False):
+ _, output = self.source.check_output(
+ [self.source.host_git, 'rev-parse', tracking],
+ fail="Unable to find commit for specified branch name '{}'".format(tracking),
+ cwd=self.mirror)
+ ref = output.rstrip('\n')
+
+ if self.source.ref_format == 'git-describe':
+ # Prefix the ref with the closest tag, if available,
+ # to make the ref human readable
+ exit_code, output = self.source.check_output(
+ [self.source.host_git, 'describe', '--tags', '--abbrev=40', '--long', ref],
+ cwd=self.mirror)
+ if exit_code == 0:
+ ref = output.rstrip('\n')
+
+ if not track_tags:
+ return ref, []
+
+ tags = set()
+ for options in [[], ['--first-parent'], ['--tags'], ['--tags', '--first-parent']]:
+ exit_code, output = self.source.check_output(
+ [self.source.host_git, 'describe', '--abbrev=0', ref, *options],
+ cwd=self.mirror)
+ if exit_code == 0:
+ tag = output.strip()
+ _, commit_ref = self.source.check_output(
+ [self.source.host_git, 'rev-parse', tag + '^{commit}'],
+ fail="Unable to resolve tag '{}'".format(tag),
+ cwd=self.mirror)
+ exit_code = self.source.call(
+ [self.source.host_git, 'cat-file', 'tag', tag],
+ cwd=self.mirror)
+ annotated = (exit_code == 0)
+
+ tags.add((tag, commit_ref.strip(), annotated))
+
+ return ref, list(tags)
+
+ def stage(self, directory):
+ fullpath = os.path.join(directory, self.path)
+
+ # Using --shared here avoids copying the objects into the checkout, in any
+ # case we're just checking out a specific commit and then removing the .git/
+ # directory.
+ self.source.call([self.source.host_git, 'clone', '--no-checkout', '--shared', self.mirror, fullpath],
+ fail="Failed to create git mirror {} in directory: {}".format(self.mirror, fullpath),
+ fail_temporarily=True)
+
+ self.source.call([self.source.host_git, 'checkout', '--force', self.ref],
+ fail="Failed to checkout git ref {}".format(self.ref),
+ cwd=fullpath)
+
+ # Remove .git dir
+ shutil.rmtree(os.path.join(fullpath, ".git"))
+
+ self._rebuild_git(fullpath)
+
+ def init_workspace(self, directory):
+ fullpath = os.path.join(directory, self.path)
+ url = self.source.translate_url(self.url)
+
+ self.source.call([self.source.host_git, 'clone', '--no-checkout', self.mirror, fullpath],
+ fail="Failed to clone git mirror {} in directory: {}".format(self.mirror, fullpath),
+ fail_temporarily=True)
+
+ self.source.call([self.source.host_git, 'remote', 'set-url', 'origin', url],
+ fail='Failed to add remote origin "{}"'.format(url),
+ cwd=fullpath)
+
+ self.source.call([self.source.host_git, 'checkout', '--force', self.ref],
+ fail="Failed to checkout git ref {}".format(self.ref),
+ cwd=fullpath)
+
+ # List the submodules (path/url tuples) present at the given ref of this repo
+ def submodule_list(self):
+ modules = "{}:{}".format(self.ref, GIT_MODULES)
+ exit_code, output = self.source.check_output(
+ [self.source.host_git, 'show', modules], cwd=self.mirror)
+
+ # If git show reports error code 128 here, we take it to mean there is
+ # no .gitmodules file to display for the given revision.
+ if exit_code == 128:
+ return
+ elif exit_code != 0:
+ raise SourceError(
+ "{plugin}: Failed to show gitmodules at ref {ref}".format(
+ plugin=self, ref=self.ref))
+
+ content = '\n'.join([l.strip() for l in output.splitlines()])
+
+ io = StringIO(content)
+ parser = RawConfigParser()
+ parser.read_file(io)
+
+ for section in parser.sections():
+ # validate section name against the 'submodule "foo"' pattern
+ if re.match(r'submodule "(.*)"', section):
+ path = parser.get(section, 'path')
+ url = parser.get(section, 'url')
+
+ yield (path, url)
+
+ # Fetch the ref which this mirror requires its submodule to have,
+ # at the given ref of this mirror.
+ def submodule_ref(self, submodule, ref=None):
+ if not ref:
+ ref = self.ref
+
+ # list objects in the parent repo tree to find the commit
+ # object that corresponds to the submodule
+ _, output = self.source.check_output([self.source.host_git, 'ls-tree', ref, submodule],
+ fail="ls-tree failed for commit {} and submodule: {}".format(
+ ref, submodule),
+ cwd=self.mirror)
+
+ # read the commit hash from the output
+ fields = output.split()
+ if len(fields) >= 2 and fields[1] == 'commit':
+ submodule_commit = output.split()[2]
+
+ # fail if the commit hash is invalid
+ if len(submodule_commit) != 40:
+ raise SourceError("{}: Error reading commit information for submodule '{}'"
+ .format(self.source, submodule))
+
+ return submodule_commit
+
+ else:
+ detail = "The submodule '{}' is defined either in the BuildStream source\n".format(submodule) + \
+ "definition, or in a .gitmodules file. But the submodule was never added to the\n" + \
+ "underlying git repository with `git submodule add`."
+
+ self.source.warn("{}: Ignoring inconsistent submodule '{}'"
+ .format(self.source, submodule), detail=detail,
+ warning_token=WARN_INCONSISTENT_SUBMODULE)
+
+ return None
+
+ def _rebuild_git(self, fullpath):
+ if not self.tags:
+ return
+
+ with self.source.tempdir() as tmpdir:
+ included = set()
+ shallow = set()
+ for _, commit_ref, _ in self.tags:
+
+ if commit_ref == self.ref:
+ # rev-list does not work in case of same rev
+ shallow.add(self.ref)
+ else:
+ _, out = self.source.check_output([self.source.host_git, 'rev-list',
+ '--ancestry-path', '--boundary',
+ '{}..{}'.format(commit_ref, self.ref)],
+ fail="Failed to get git history {}..{} in directory: {}"
+ .format(commit_ref, self.ref, fullpath),
+ fail_temporarily=True,
+ cwd=self.mirror)
+ self.source.warn("refs {}..{}: {}".format(commit_ref, self.ref, out.splitlines()))
+ for line in out.splitlines():
+ rev = line.lstrip('-')
+ if line[0] == '-':
+ shallow.add(rev)
+ else:
+ included.add(rev)
+
+ shallow -= included
+ included |= shallow
+
+ self.source.call([self.source.host_git, 'init'],
+ fail="Cannot initialize git repository: {}".format(fullpath),
+ cwd=fullpath)
+
+ for rev in included:
+ with TemporaryFile(dir=tmpdir) as commit_file:
+ self.source.call([self.source.host_git, 'cat-file', 'commit', rev],
+ stdout=commit_file,
+ fail="Failed to get commit {}".format(rev),
+ cwd=self.mirror)
+ commit_file.seek(0, 0)
+ self.source.call([self.source.host_git, 'hash-object', '-w', '-t', 'commit', '--stdin'],
+ stdin=commit_file,
+ fail="Failed to add commit object {}".format(rev),
+ cwd=fullpath)
+
+ with open(os.path.join(fullpath, '.git', 'shallow'), 'w') as shallow_file:
+ for rev in shallow:
+ shallow_file.write('{}\n'.format(rev))
+
+ for tag, commit_ref, annotated in self.tags:
+ if annotated:
+ with TemporaryFile(dir=tmpdir) as tag_file:
+ tag_data = 'object {}\ntype commit\ntag {}\n'.format(commit_ref, tag)
+ tag_file.write(tag_data.encode('ascii'))
+ tag_file.seek(0, 0)
+ _, tag_ref = self.source.check_output(
+ [self.source.host_git, 'hash-object', '-w', '-t',
+ 'tag', '--stdin'],
+ stdin=tag_file,
+ fail="Failed to add tag object {}".format(tag),
+ cwd=fullpath)
+
+ self.source.call([self.source.host_git, 'tag', tag, tag_ref.strip()],
+ fail="Failed to tag: {}".format(tag),
+ cwd=fullpath)
+ else:
+ self.source.call([self.source.host_git, 'tag', tag, commit_ref],
+ fail="Failed to tag: {}".format(tag),
+ cwd=fullpath)
+
+ with open(os.path.join(fullpath, '.git', 'HEAD'), 'w') as head:
+ self.source.call([self.source.host_git, 'rev-parse', self.ref],
+ stdout=head,
+ fail="Failed to parse commit {}".format(self.ref),
+ cwd=self.mirror)
+
+
+class _GitSourceBase(Source):
+ # pylint: disable=attribute-defined-outside-init
+
+ # The GitMirror class which this plugin uses. This may be
+ # overridden in derived plugins as long as the replacement class
+ # follows the same interface used by the _GitMirror class
+ BST_MIRROR_CLASS = _GitMirror
+
+ def configure(self, node):
+ ref = self.node_get_member(node, str, 'ref', None)
+
+ config_keys = ['url', 'track', 'ref', 'submodules',
+ 'checkout-submodules', 'ref-format',
+ 'track-tags', 'tags']
+ self.node_validate(node, config_keys + Source.COMMON_CONFIG_KEYS)
+
+ tags_node = self.node_get_member(node, list, 'tags', [])
+ for tag_node in tags_node:
+ self.node_validate(tag_node, ['tag', 'commit', 'annotated'])
+
+ tags = self._load_tags(node)
+ self.track_tags = self.node_get_member(node, bool, 'track-tags', False)
+
+ self.original_url = self.node_get_member(node, str, 'url')
+ self.mirror = self.BST_MIRROR_CLASS(self, '', self.original_url, ref, tags=tags, primary=True)
+ self.tracking = self.node_get_member(node, str, 'track', None)
+
+ self.ref_format = self.node_get_member(node, str, 'ref-format', 'sha1')
+ if self.ref_format not in ['sha1', 'git-describe']:
+ provenance = self.node_provenance(node, member_name='ref-format')
+ raise SourceError("{}: Unexpected value for ref-format: {}".format(provenance, self.ref_format))
+
+ # At this point we now know if the source has a ref and/or a track.
+ # If it is missing both then we will be unable to track or build.
+ if self.mirror.ref is None and self.tracking is None:
+ raise SourceError("{}: Git sources require a ref and/or track".format(self),
+ reason="missing-track-and-ref")
+
+ self.checkout_submodules = self.node_get_member(node, bool, 'checkout-submodules', True)
+ self.submodules = []
+
+ # Parse a dict of submodule overrides, stored in the submodule_overrides
+ # and submodule_checkout_overrides dictionaries.
+ self.submodule_overrides = {}
+ self.submodule_checkout_overrides = {}
+ modules = self.node_get_member(node, Mapping, 'submodules', {})
+ for path, _ in self.node_items(modules):
+ submodule = self.node_get_member(modules, Mapping, path)
+ url = self.node_get_member(submodule, str, 'url', None)
+
+ # Make sure to mark all URLs that are specified in the configuration
+ if url:
+ self.mark_download_url(url, primary=False)
+
+ self.submodule_overrides[path] = url
+ if 'checkout' in submodule:
+ checkout = self.node_get_member(submodule, bool, 'checkout')
+ self.submodule_checkout_overrides[path] = checkout
+
+ self.mark_download_url(self.original_url)
+
+ def preflight(self):
+ # Check if git is installed, get the binary at the same time
+ self.host_git = utils.get_host_tool('git')
+
+ def get_unique_key(self):
+ # Here we want to encode the local name of the repository and
+ # the ref, if the user changes the alias to fetch the same sources
+ # from another location, it should not affect the cache key.
+ key = [self.original_url, self.mirror.ref]
+ if self.mirror.tags:
+ tags = {tag: (commit, annotated) for tag, commit, annotated in self.mirror.tags}
+ key.append({'tags': tags})
+
+ # Only modify the cache key with checkout_submodules if it's something
+ # other than the default behaviour.
+ if self.checkout_submodules is False:
+ key.append({"checkout_submodules": self.checkout_submodules})
+
+ # We want the cache key to change if the source was
+ # configured differently, and submodules count.
+ if self.submodule_overrides:
+ key.append(self.submodule_overrides)
+
+ if self.submodule_checkout_overrides:
+ key.append({"submodule_checkout_overrides": self.submodule_checkout_overrides})
+
+ return key
+
+ def get_consistency(self):
+ if self._have_all_refs():
+ return Consistency.CACHED
+ elif self.mirror.ref is not None:
+ return Consistency.RESOLVED
+ return Consistency.INCONSISTENT
+
+ def load_ref(self, node):
+ self.mirror.ref = self.node_get_member(node, str, 'ref', None)
+ self.mirror.tags = self._load_tags(node)
+
+ def get_ref(self):
+ return self.mirror.ref, self.mirror.tags
+
+ def set_ref(self, ref_data, node):
+ if not ref_data:
+ self.mirror.ref = None
+ if 'ref' in node:
+ del node['ref']
+ self.mirror.tags = []
+ if 'tags' in node:
+ del node['tags']
+ else:
+ ref, tags = ref_data
+ node['ref'] = self.mirror.ref = ref
+ self.mirror.tags = tags
+ if tags:
+ node['tags'] = []
+ for tag, commit_ref, annotated in tags:
+ data = {'tag': tag,
+ 'commit': commit_ref,
+ 'annotated': annotated}
+ node['tags'].append(data)
+ else:
+ if 'tags' in node:
+ del node['tags']
+
+ def track(self):
+
+ # If self.tracking is not specified it's not an error, just silently return
+ if not self.tracking:
+ # Is there a better way to check if a ref is given.
+ if self.mirror.ref is None:
+ detail = 'Without a tracking branch ref can not be updated. Please ' + \
+ 'provide a ref or a track.'
+ raise SourceError("{}: No track or ref".format(self),
+ detail=detail, reason="track-attempt-no-track")
+ return None
+
+ # Resolve the URL for the message
+ resolved_url = self.translate_url(self.mirror.url)
+ with self.timed_activity("Tracking {} from {}"
+ .format(self.tracking, resolved_url),
+ silent_nested=True):
+ self.mirror.ensure()
+ self.mirror._fetch()
+
+ # Update self.mirror.ref and node.ref from the self.tracking branch
+ ret = self.mirror.latest_commit_with_tags(self.tracking, self.track_tags)
+
+ return ret
+
+ def init_workspace(self, directory):
+ # XXX: may wish to refactor this as some code dupe with stage()
+ self._refresh_submodules()
+
+ with self.timed_activity('Setting up workspace "{}"'.format(directory), silent_nested=True):
+ self.mirror.init_workspace(directory)
+ for mirror in self.submodules:
+ mirror.init_workspace(directory)
+
+ def stage(self, directory):
+
+ # Need to refresh submodule list here again, because
+ # it's possible that we did not load in the main process
+ # with submodules present (source needed fetching) and
+ # we may not know about the submodule yet come time to build.
+ #
+ self._refresh_submodules()
+
+ # Stage the main repo in the specified directory
+ #
+ with self.timed_activity("Staging {}".format(self.mirror.url), silent_nested=True):
+ self.mirror.stage(directory)
+ for mirror in self.submodules:
+ mirror.stage(directory)
+
+ def get_source_fetchers(self):
+ yield self.mirror
+ self._refresh_submodules()
+ for submodule in self.submodules:
+ yield submodule
+
+ def validate_cache(self):
+ discovered_submodules = {}
+ unlisted_submodules = []
+ invalid_submodules = []
+
+ for path, url in self.mirror.submodule_list():
+ discovered_submodules[path] = url
+ if self._ignore_submodule(path):
+ continue
+
+ override_url = self.submodule_overrides.get(path)
+ if not override_url:
+ unlisted_submodules.append((path, url))
+
+ # Warn about submodules which are explicitly configured but do not exist
+ for path, url in self.submodule_overrides.items():
+ if path not in discovered_submodules:
+ invalid_submodules.append((path, url))
+
+ if invalid_submodules:
+ detail = []
+ for path, url in invalid_submodules:
+ detail.append(" Submodule URL '{}' at path '{}'".format(url, path))
+
+ self.warn("{}: Invalid submodules specified".format(self),
+ warning_token=WARN_INVALID_SUBMODULE,
+ detail="The following submodules are specified in the source "
+ "description but do not exist according to the repository\n\n" +
+ "\n".join(detail))
+
+ # Warn about submodules which exist but have not been explicitly configured
+ if unlisted_submodules:
+ detail = []
+ for path, url in unlisted_submodules:
+ detail.append(" Submodule URL '{}' at path '{}'".format(url, path))
+
+ self.warn("{}: Unlisted submodules exist".format(self),
+ warning_token=WARN_UNLISTED_SUBMODULE,
+ detail="The following submodules exist but are not specified " +
+ "in the source description\n\n" +
+ "\n".join(detail))
+
+ # Assert that the ref exists in the track tag/branch, if track has been specified.
+ ref_in_track = False
+ if self.tracking:
+ _, branch = self.check_output([self.host_git, 'branch', '--list', self.tracking,
+ '--contains', self.mirror.ref],
+ cwd=self.mirror.mirror)
+ if branch:
+ ref_in_track = True
+ else:
+ _, tag = self.check_output([self.host_git, 'tag', '--list', self.tracking,
+ '--contains', self.mirror.ref],
+ cwd=self.mirror.mirror)
+ if tag:
+ ref_in_track = True
+
+ if not ref_in_track:
+ detail = "The ref provided for the element does not exist locally " + \
+ "in the provided track branch / tag '{}'.\n".format(self.tracking) + \
+ "You may wish to track the element to update the ref from '{}' ".format(self.tracking) + \
+ "with `bst source track`,\n" + \
+ "or examine the upstream at '{}' for the specific ref.".format(self.mirror.url)
+
+ self.warn("{}: expected ref '{}' was not found in given track '{}' for staged repository: '{}'\n"
+ .format(self, self.mirror.ref, self.tracking, self.mirror.url),
+ detail=detail, warning_token=CoreWarnings.REF_NOT_IN_TRACK)
+
+ ###########################################################
+ # Local Functions #
+ ###########################################################
+
+ def _have_all_refs(self):
+ if not self.mirror.has_ref():
+ return False
+
+ self._refresh_submodules()
+ for mirror in self.submodules:
+ if not os.path.exists(mirror.mirror):
+ return False
+ if not mirror.has_ref():
+ return False
+
+ return True
+
+ # Refreshes the BST_MIRROR_CLASS objects for submodules
+ #
+ # Assumes that we have our mirror and we have the ref which we point to
+ #
+ def _refresh_submodules(self):
+ self.mirror.ensure()
+ submodules = []
+
+ for path, url in self.mirror.submodule_list():
+
+ # Completely ignore submodules which are disabled for checkout
+ if self._ignore_submodule(path):
+ continue
+
+ # Allow configuration to override the upstream
+ # location of the submodules.
+ override_url = self.submodule_overrides.get(path)
+ if override_url:
+ url = override_url
+
+ ref = self.mirror.submodule_ref(path)
+ if ref is not None:
+ mirror = self.BST_MIRROR_CLASS(self, path, url, ref)
+ submodules.append(mirror)
+
+ self.submodules = submodules
+
+ def _load_tags(self, node):
+ tags = []
+ tags_node = self.node_get_member(node, list, 'tags', [])
+ for tag_node in tags_node:
+ tag = self.node_get_member(tag_node, str, 'tag')
+ commit_ref = self.node_get_member(tag_node, str, 'commit')
+ annotated = self.node_get_member(tag_node, bool, 'annotated')
+ tags.append((tag, commit_ref, annotated))
+ return tags
+
+ # Checks whether the plugin configuration has explicitly
+ # configured this submodule to be ignored
+ def _ignore_submodule(self, path):
+ try:
+ checkout = self.submodule_checkout_overrides[path]
+ except KeyError:
+ checkout = self.checkout_submodules
+
+ return not checkout
diff --git a/src/buildstream/_includes.py b/src/buildstream/_includes.py
new file mode 100644
index 000000000..f792b7716
--- /dev/null
+++ b/src/buildstream/_includes.py
@@ -0,0 +1,145 @@
+import os
+from . import _yaml
+from ._exceptions import LoadError, LoadErrorReason
+
+
+# Includes()
+#
+# This takes care of processing include directives "(@)".
+#
+# Args:
+# loader (Loader): The Loader object
+# copy_tree (bool): Whether to make a copy, of tree in
+# provenance. Should be true if intended to be
+# serialized.
+class Includes:
+
+ def __init__(self, loader, *, copy_tree=False):
+ self._loader = loader
+ self._loaded = {}
+ self._copy_tree = copy_tree
+
+ # process()
+ #
+ # Process recursively include directives in a YAML node.
+ #
+ # Args:
+ # node (dict): A YAML node
+ # included (set): Fail for recursion if trying to load any files in this set
+ # current_loader (Loader): Use alternative loader (for junction files)
+ # only_local (bool): Whether to ignore junction files
+ def process(self, node, *,
+ included=set(),
+ current_loader=None,
+ only_local=False):
+ if current_loader is None:
+ current_loader = self._loader
+
+ includes = _yaml.node_get(node, None, '(@)', default_value=None)
+ if isinstance(includes, str):
+ includes = [includes]
+
+ if not isinstance(includes, list) and includes is not None:
+ provenance = _yaml.node_get_provenance(node, key='(@)')
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: {} must either be list or str".format(provenance, includes))
+
+ include_provenance = None
+ if includes:
+ include_provenance = _yaml.node_get_provenance(node, key='(@)')
+ _yaml.node_del(node, '(@)')
+
+ for include in reversed(includes):
+ if only_local and ':' in include:
+ continue
+ try:
+ include_node, file_path, sub_loader = self._include_file(include,
+ current_loader)
+ except LoadError as e:
+ if e.reason == LoadErrorReason.MISSING_FILE:
+ message = "{}: Include block references a file that could not be found: '{}'.".format(
+ include_provenance, include)
+ raise LoadError(LoadErrorReason.MISSING_FILE, message) from e
+ elif e.reason == LoadErrorReason.LOADING_DIRECTORY:
+ message = "{}: Include block references a directory instead of a file: '{}'.".format(
+ include_provenance, include)
+ raise LoadError(LoadErrorReason.LOADING_DIRECTORY, message) from e
+ else:
+ raise
+
+ if file_path in included:
+ raise LoadError(LoadErrorReason.RECURSIVE_INCLUDE,
+ "{}: trying to recursively include {}". format(include_provenance,
+ file_path))
+ # Because the included node will be modified, we need
+ # to copy it so that we do not modify the toplevel
+ # node of the provenance.
+ include_node = _yaml.node_copy(include_node)
+
+ try:
+ included.add(file_path)
+ self.process(include_node, included=included,
+ current_loader=sub_loader,
+ only_local=only_local)
+ finally:
+ included.remove(file_path)
+
+ _yaml.composite_and_move(node, include_node)
+
+ for _, value in _yaml.node_items(node):
+ self._process_value(value,
+ included=included,
+ current_loader=current_loader,
+ only_local=only_local)
+
+ # _include_file()
+ #
+ # Load include YAML file from with a loader.
+ #
+ # Args:
+ # include (str): file path relative to loader's project directory.
+ # Can be prefixed with junctio name.
+ # loader (Loader): Loader for the current project.
+ def _include_file(self, include, loader):
+ shortname = include
+ if ':' in include:
+ junction, include = include.split(':', 1)
+ junction_loader = loader._get_loader(junction, fetch_subprojects=True)
+ current_loader = junction_loader
+ else:
+ current_loader = loader
+ project = current_loader.project
+ directory = project.directory
+ file_path = os.path.join(directory, include)
+ key = (current_loader, file_path)
+ if key not in self._loaded:
+ self._loaded[key] = _yaml.load(file_path,
+ shortname=shortname,
+ project=project,
+ copy_tree=self._copy_tree)
+ return self._loaded[key], file_path, current_loader
+
+ # _process_value()
+ #
+ # Select processing for value that could be a list or a dictionary.
+ #
+ # Args:
+ # value: Value to process. Can be a list or a dictionary.
+ # included (set): Fail for recursion if trying to load any files in this set
+ # current_loader (Loader): Use alternative loader (for junction files)
+ # only_local (bool): Whether to ignore junction files
+ def _process_value(self, value, *,
+ included=set(),
+ current_loader=None,
+ only_local=False):
+ if _yaml.is_node(value):
+ self.process(value,
+ included=included,
+ current_loader=current_loader,
+ only_local=only_local)
+ elif isinstance(value, list):
+ for v in value:
+ self._process_value(v,
+ included=included,
+ current_loader=current_loader,
+ only_local=only_local)
diff --git a/src/buildstream/_loader/__init__.py b/src/buildstream/_loader/__init__.py
new file mode 100644
index 000000000..a2c31796e
--- /dev/null
+++ b/src/buildstream/_loader/__init__.py
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .metasource import MetaSource
+from .metaelement import MetaElement
+from .loader import Loader
diff --git a/src/buildstream/_loader/loadelement.py b/src/buildstream/_loader/loadelement.py
new file mode 100644
index 000000000..684c32554
--- /dev/null
+++ b/src/buildstream/_loader/loadelement.py
@@ -0,0 +1,181 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+# System imports
+from itertools import count
+
+from pyroaring import BitMap, FrozenBitMap # pylint: disable=no-name-in-module
+
+# BuildStream toplevel imports
+from .. import _yaml
+
+# Local package imports
+from .types import Symbol, Dependency
+
+
+# LoadElement():
+#
+# A transient object breaking down what is loaded allowing us to
+# do complex operations in multiple passes.
+#
+# Args:
+# node (dict): A YAML loaded dictionary
+# name (str): The element name
+# loader (Loader): The Loader object for this element
+#
+class LoadElement():
+ # Dependency():
+ #
+ # A link from a LoadElement to its dependencies.
+ #
+ # Keeps a link to one of the current Element's dependencies, together with
+ # its dependency type.
+ #
+ # Args:
+ # element (LoadElement): a LoadElement on which there is a dependency
+ # dep_type (str): the type of dependency this dependency link is
+ class Dependency:
+ def __init__(self, element, dep_type):
+ self.element = element
+ self.dep_type = dep_type
+
+ _counter = count()
+
+ def __init__(self, node, filename, loader):
+
+ #
+ # Public members
+ #
+ self.node = node # The YAML node
+ self.name = filename # The element name
+ self.full_name = None # The element full name (with associated junction)
+ self.deps = None # The list of Dependency objects
+ self.node_id = next(self._counter)
+
+ #
+ # Private members
+ #
+ self._loader = loader # The Loader object
+ self._dep_cache = None # The dependency cache, to speed up depends()
+
+ #
+ # Initialization
+ #
+ if loader.project.junction:
+ # dependency is in subproject, qualify name
+ self.full_name = '{}:{}'.format(loader.project.junction.name, self.name)
+ else:
+ # dependency is in top-level project
+ self.full_name = self.name
+
+ # Ensure the root node is valid
+ _yaml.node_validate(self.node, [
+ 'kind', 'depends', 'sources', 'sandbox',
+ 'variables', 'environment', 'environment-nocache',
+ 'config', 'public', 'description',
+ 'build-depends', 'runtime-depends',
+ ])
+
+ self.dependencies = []
+
+ @property
+ def junction(self):
+ return self._loader.project.junction
+
+ # depends():
+ #
+ # Checks if this element depends on another element, directly
+ # or indirectly.
+ #
+ # Args:
+ # other (LoadElement): Another LoadElement
+ #
+ # Returns:
+ # (bool): True if this LoadElement depends on 'other'
+ #
+ def depends(self, other):
+ self._ensure_depends_cache()
+ return other.node_id in self._dep_cache
+
+ ###########################################
+ # Private Methods #
+ ###########################################
+ def _ensure_depends_cache(self):
+
+ if self._dep_cache:
+ return
+
+ self._dep_cache = BitMap()
+
+ for dep in self.dependencies:
+ elt = dep.element
+
+ # Ensure the cache of the element we depend on
+ elt._ensure_depends_cache()
+
+ # We depend on this element
+ self._dep_cache.add(elt.node_id)
+
+ # And we depend on everything this element depends on
+ self._dep_cache.update(elt._dep_cache)
+
+ self._dep_cache = FrozenBitMap(self._dep_cache)
+
+
+# _extract_depends_from_node():
+#
+# Creates an array of Dependency objects from a given dict node 'node',
+# allows both strings and dicts for expressing the dependency and
+# throws a comprehensive LoadError in the case that the node is malformed.
+#
+# After extracting depends, the symbol is deleted from the node
+#
+# Args:
+# node (dict): A YAML loaded dictionary
+#
+# Returns:
+# (list): a list of Dependency objects
+#
+def _extract_depends_from_node(node, *, key=None):
+ if key is None:
+ build_depends = _extract_depends_from_node(node, key=Symbol.BUILD_DEPENDS)
+ runtime_depends = _extract_depends_from_node(node, key=Symbol.RUNTIME_DEPENDS)
+ depends = _extract_depends_from_node(node, key=Symbol.DEPENDS)
+ return build_depends + runtime_depends + depends
+ elif key == Symbol.BUILD_DEPENDS:
+ default_dep_type = Symbol.BUILD
+ elif key == Symbol.RUNTIME_DEPENDS:
+ default_dep_type = Symbol.RUNTIME
+ elif key == Symbol.DEPENDS:
+ default_dep_type = None
+ else:
+ assert False, "Unexpected value of key '{}'".format(key)
+
+ depends = _yaml.node_get(node, list, key, default_value=[])
+ output_deps = []
+
+ for index, dep in enumerate(depends):
+ dep_provenance = _yaml.node_get_provenance(node, key=key, indices=[index])
+ dependency = Dependency(dep, dep_provenance, default_dep_type=default_dep_type)
+ output_deps.append(dependency)
+
+ # Now delete the field, we dont want it anymore
+ _yaml.node_del(node, key, safe=True)
+
+ return output_deps
diff --git a/src/buildstream/_loader/loader.py b/src/buildstream/_loader/loader.py
new file mode 100644
index 000000000..261ec40e4
--- /dev/null
+++ b/src/buildstream/_loader/loader.py
@@ -0,0 +1,710 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+import os
+from functools import cmp_to_key
+from collections.abc import Mapping
+
+from .._exceptions import LoadError, LoadErrorReason
+from .. import Consistency
+from .. import _yaml
+from ..element import Element
+from .._profile import Topics, PROFILER
+from .._includes import Includes
+
+from .types import Symbol
+from .loadelement import LoadElement, _extract_depends_from_node
+from .metaelement import MetaElement
+from .metasource import MetaSource
+from ..types import CoreWarnings
+from .._message import Message, MessageType
+
+
+# Loader():
+#
+# The Loader class does the heavy lifting of parsing target
+# bst files and ultimately transforming them into a list of MetaElements
+# with their own MetaSources, ready for instantiation by the core.
+#
+# Args:
+# context (Context): The Context object
+# project (Project): The toplevel Project object
+# parent (Loader): A parent Loader object, in the case this is a junctioned Loader
+#
+class Loader():
+
+ def __init__(self, context, project, *, parent=None):
+
+ # Ensure we have an absolute path for the base directory
+ basedir = project.element_path
+ if not os.path.isabs(basedir):
+ basedir = os.path.abspath(basedir)
+
+ #
+ # Public members
+ #
+ self.project = project # The associated Project
+
+ #
+ # Private members
+ #
+ self._context = context
+ self._options = project.options # Project options (OptionPool)
+ self._basedir = basedir # Base project directory
+ self._first_pass_options = project.first_pass_config.options # Project options (OptionPool)
+ self._parent = parent # The parent loader
+
+ self._meta_elements = {} # Dict of resolved meta elements by name
+ self._elements = {} # Dict of elements
+ self._loaders = {} # Dict of junction loaders
+
+ self._includes = Includes(self, copy_tree=True)
+
+ # load():
+ #
+ # Loads the project based on the parameters given to the constructor
+ #
+ # Args:
+ # rewritable (bool): Whether the loaded files should be rewritable
+ # this is a bit more expensive due to deep copies
+ # ticker (callable): An optional function for tracking load progress
+ # targets (list of str): Target, element-path relative bst filenames in the project
+ # fetch_subprojects (bool): Whether to fetch subprojects while loading
+ #
+ # Raises: LoadError
+ #
+ # Returns: The toplevel LoadElement
+ def load(self, targets, rewritable=False, ticker=None, fetch_subprojects=False):
+
+ for filename in targets:
+ if os.path.isabs(filename):
+ # XXX Should this just be an assertion ?
+ # Expect that the caller gives us the right thing at least ?
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "Target '{}' was not specified as a relative "
+ "path to the base project directory: {}"
+ .format(filename, self._basedir))
+
+ self._warn_invalid_elements(targets)
+
+ # First pass, recursively load files and populate our table of LoadElements
+ #
+ target_elements = []
+
+ for target in targets:
+ with PROFILER.profile(Topics.LOAD_PROJECT, target):
+ _junction, name, loader = self._parse_name(target, rewritable, ticker,
+ fetch_subprojects=fetch_subprojects)
+ element = loader._load_file(name, rewritable, ticker, fetch_subprojects)
+ target_elements.append(element)
+
+ #
+ # Now that we've resolve the dependencies, scan them for circular dependencies
+ #
+
+ # Set up a dummy element that depends on all top-level targets
+ # to resolve potential circular dependencies between them
+ dummy_target = LoadElement(_yaml.new_empty_node(), "", self)
+ dummy_target.dependencies.extend(
+ LoadElement.Dependency(element, Symbol.RUNTIME)
+ for element in target_elements
+ )
+
+ with PROFILER.profile(Topics.CIRCULAR_CHECK, "_".join(targets)):
+ self._check_circular_deps(dummy_target)
+
+ ret = []
+ #
+ # Sort direct dependencies of elements by their dependency ordering
+ #
+ for element in target_elements:
+ loader = element._loader
+ with PROFILER.profile(Topics.SORT_DEPENDENCIES, element.name):
+ loader._sort_dependencies(element)
+
+ # Finally, wrap what we have into LoadElements and return the target
+ #
+ ret.append(loader._collect_element(element))
+
+ self._clean_caches()
+
+ return ret
+
+ # clean_caches()
+ #
+ # Clean internal loader caches, recursively
+ #
+ # When loading the elements, the loaders use caches in order to not load the
+ # same element twice. These are kept after loading and prevent garbage
+ # collection. Cleaning them explicitely is required.
+ #
+ def _clean_caches(self):
+ for loader in self._loaders.values():
+ # value may be None with nested junctions without overrides
+ if loader is not None:
+ loader._clean_caches()
+
+ self._meta_elements = {}
+ self._elements = {}
+
+ ###########################################
+ # Private Methods #
+ ###########################################
+
+ # _load_file():
+ #
+ # Recursively load bst files
+ #
+ # Args:
+ # filename (str): The element-path relative bst file
+ # rewritable (bool): Whether we should load in round trippable mode
+ # ticker (callable): A callback to report loaded filenames to the frontend
+ # fetch_subprojects (bool): Whether to fetch subprojects while loading
+ # provenance (Provenance): The location from where the file was referred to, or None
+ #
+ # Returns:
+ # (LoadElement): A loaded LoadElement
+ #
+ def _load_file(self, filename, rewritable, ticker, fetch_subprojects, provenance=None):
+
+ # Silently ignore already loaded files
+ if filename in self._elements:
+ return self._elements[filename]
+
+ # Call the ticker
+ if ticker:
+ ticker(filename)
+
+ # Load the data and process any conditional statements therein
+ fullpath = os.path.join(self._basedir, filename)
+ try:
+ node = _yaml.load(fullpath, shortname=filename, copy_tree=rewritable,
+ project=self.project)
+ except LoadError as e:
+ if e.reason == LoadErrorReason.MISSING_FILE:
+
+ if self.project.junction:
+ message = "Could not find element '{}' in project referred to by junction element '{}'" \
+ .format(filename, self.project.junction.name)
+ else:
+ message = "Could not find element '{}' in elements directory '{}'".format(filename, self._basedir)
+
+ if provenance:
+ message = "{}: {}".format(provenance, message)
+
+ # If we can't find the file, try to suggest plausible
+ # alternatives by stripping the element-path from the given
+ # filename, and verifying that it exists.
+ detail = None
+ elements_dir = os.path.relpath(self._basedir, self.project.directory)
+ element_relpath = os.path.relpath(filename, elements_dir)
+ if filename.startswith(elements_dir) and os.path.exists(os.path.join(self._basedir, element_relpath)):
+ detail = "Did you mean '{}'?".format(element_relpath)
+
+ raise LoadError(LoadErrorReason.MISSING_FILE,
+ message, detail=detail) from e
+
+ elif e.reason == LoadErrorReason.LOADING_DIRECTORY:
+ # If a <directory>.bst file exists in the element path,
+ # let's suggest this as a plausible alternative.
+ message = str(e)
+ if provenance:
+ message = "{}: {}".format(provenance, message)
+ detail = None
+ if os.path.exists(os.path.join(self._basedir, filename + '.bst')):
+ element_name = filename + '.bst'
+ detail = "Did you mean '{}'?\n".format(element_name)
+ raise LoadError(LoadErrorReason.LOADING_DIRECTORY,
+ message, detail=detail) from e
+ else:
+ raise
+ kind = _yaml.node_get(node, str, Symbol.KIND)
+ if kind == "junction":
+ self._first_pass_options.process_node(node)
+ else:
+ self.project.ensure_fully_loaded()
+
+ self._includes.process(node)
+
+ self._options.process_node(node)
+
+ element = LoadElement(node, filename, self)
+
+ self._elements[filename] = element
+
+ dependencies = _extract_depends_from_node(node)
+
+ # Load all dependency files for the new LoadElement
+ for dep in dependencies:
+ if dep.junction:
+ self._load_file(dep.junction, rewritable, ticker, fetch_subprojects, dep.provenance)
+ loader = self._get_loader(dep.junction, rewritable=rewritable, ticker=ticker,
+ fetch_subprojects=fetch_subprojects, provenance=dep.provenance)
+ else:
+ loader = self
+
+ dep_element = loader._load_file(dep.name, rewritable, ticker,
+ fetch_subprojects, dep.provenance)
+
+ if _yaml.node_get(dep_element.node, str, Symbol.KIND) == 'junction':
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Cannot depend on junction"
+ .format(dep.provenance))
+
+ element.dependencies.append(LoadElement.Dependency(dep_element, dep.dep_type))
+
+ deps_names = [dep.name for dep in dependencies]
+ self._warn_invalid_elements(deps_names)
+
+ return element
+
+ # _check_circular_deps():
+ #
+ # Detect circular dependencies on LoadElements with
+ # dependencies already resolved.
+ #
+ # Args:
+ # element (str): The element to check
+ #
+ # Raises:
+ # (LoadError): In case there was a circular dependency error
+ #
+ def _check_circular_deps(self, element, check_elements=None, validated=None, sequence=None):
+
+ if check_elements is None:
+ check_elements = set()
+ if validated is None:
+ validated = set()
+ if sequence is None:
+ sequence = []
+
+ # Skip already validated branches
+ if element in validated:
+ return
+
+ if element in check_elements:
+ # Create `chain`, the loop of element dependencies from this
+ # element back to itself, by trimming everything before this
+ # element from the sequence under consideration.
+ chain = sequence[sequence.index(element.full_name):]
+ chain.append(element.full_name)
+ raise LoadError(LoadErrorReason.CIRCULAR_DEPENDENCY,
+ ("Circular dependency detected at element: {}\n" +
+ "Dependency chain: {}")
+ .format(element.full_name, " -> ".join(chain)))
+
+ # Push / Check each dependency / Pop
+ check_elements.add(element)
+ sequence.append(element.full_name)
+ for dep in element.dependencies:
+ dep.element._loader._check_circular_deps(dep.element, check_elements, validated, sequence)
+ check_elements.remove(element)
+ sequence.pop()
+
+ # Eliminate duplicate paths
+ validated.add(element)
+
+ # _sort_dependencies():
+ #
+ # Sort dependencies of each element by their dependencies,
+ # so that direct dependencies which depend on other direct
+ # dependencies (directly or indirectly) appear later in the
+ # list.
+ #
+ # This avoids the need for performing multiple topological
+ # sorts throughout the build process.
+ #
+ # Args:
+ # element (LoadElement): The element to sort
+ #
+ def _sort_dependencies(self, element, visited=None):
+ if visited is None:
+ visited = set()
+
+ if element in visited:
+ return
+
+ for dep in element.dependencies:
+ dep.element._loader._sort_dependencies(dep.element, visited=visited)
+
+ def dependency_cmp(dep_a, dep_b):
+ element_a = dep_a.element
+ element_b = dep_b.element
+
+ # Sort on inter element dependency first
+ if element_a.depends(element_b):
+ return 1
+ elif element_b.depends(element_a):
+ return -1
+
+ # If there are no inter element dependencies, place
+ # runtime only dependencies last
+ if dep_a.dep_type != dep_b.dep_type:
+ if dep_a.dep_type == Symbol.RUNTIME:
+ return 1
+ elif dep_b.dep_type == Symbol.RUNTIME:
+ return -1
+
+ # All things being equal, string comparison.
+ if element_a.name > element_b.name:
+ return 1
+ elif element_a.name < element_b.name:
+ return -1
+
+ # Sort local elements before junction elements
+ # and use string comparison between junction elements
+ if element_a.junction and element_b.junction:
+ if element_a.junction > element_b.junction:
+ return 1
+ elif element_a.junction < element_b.junction:
+ return -1
+ elif element_a.junction:
+ return -1
+ elif element_b.junction:
+ return 1
+
+ # This wont ever happen
+ return 0
+
+ # Now dependency sort, we ensure that if any direct dependency
+ # directly or indirectly depends on another direct dependency,
+ # it is found later in the list.
+ element.dependencies.sort(key=cmp_to_key(dependency_cmp))
+
+ visited.add(element)
+
+ # _collect_element()
+ #
+ # Collect the toplevel elements we have
+ #
+ # Args:
+ # element (LoadElement): The element for which to load a MetaElement
+ #
+ # Returns:
+ # (MetaElement): A recursively loaded MetaElement
+ #
+ def _collect_element(self, element):
+ # Return the already built one, if we already built it
+ meta_element = self._meta_elements.get(element.name)
+ if meta_element:
+ return meta_element
+
+ node = element.node
+ elt_provenance = _yaml.node_get_provenance(node)
+ meta_sources = []
+
+ sources = _yaml.node_get(node, list, Symbol.SOURCES, default_value=[])
+ element_kind = _yaml.node_get(node, str, Symbol.KIND)
+
+ # Safe loop calling into _yaml.node_get() for each element ensures
+ # we have good error reporting
+ for i in range(len(sources)):
+ source = _yaml.node_get(node, Mapping, Symbol.SOURCES, indices=[i])
+ kind = _yaml.node_get(source, str, Symbol.KIND)
+ _yaml.node_del(source, Symbol.KIND)
+
+ # Directory is optional
+ directory = _yaml.node_get(source, str, Symbol.DIRECTORY, default_value=None)
+ if directory:
+ _yaml.node_del(source, Symbol.DIRECTORY)
+
+ index = sources.index(source)
+ meta_source = MetaSource(element.name, index, element_kind, kind, source, directory)
+ meta_sources.append(meta_source)
+
+ meta_element = MetaElement(self.project, element.name, element_kind,
+ elt_provenance, meta_sources,
+ _yaml.node_get(node, Mapping, Symbol.CONFIG, default_value={}),
+ _yaml.node_get(node, Mapping, Symbol.VARIABLES, default_value={}),
+ _yaml.node_get(node, Mapping, Symbol.ENVIRONMENT, default_value={}),
+ _yaml.node_get(node, list, Symbol.ENV_NOCACHE, default_value=[]),
+ _yaml.node_get(node, Mapping, Symbol.PUBLIC, default_value={}),
+ _yaml.node_get(node, Mapping, Symbol.SANDBOX, default_value={}),
+ element_kind == 'junction')
+
+ # Cache it now, make sure it's already there before recursing
+ self._meta_elements[element.name] = meta_element
+
+ # Descend
+ for dep in element.dependencies:
+ loader = dep.element._loader
+ meta_dep = loader._collect_element(dep.element)
+ if dep.dep_type != 'runtime':
+ meta_element.build_dependencies.append(meta_dep)
+ if dep.dep_type != 'build':
+ meta_element.dependencies.append(meta_dep)
+
+ return meta_element
+
+ # _get_loader():
+ #
+ # Return loader for specified junction
+ #
+ # Args:
+ # filename (str): Junction name
+ # fetch_subprojects (bool): Whether to fetch subprojects while loading
+ #
+ # Raises: LoadError
+ #
+ # Returns: A Loader or None if specified junction does not exist
+ def _get_loader(self, filename, *, rewritable=False, ticker=None, level=0,
+ fetch_subprojects=False, provenance=None):
+
+ provenance_str = ""
+ if provenance is not None:
+ provenance_str = "{}: ".format(provenance)
+
+ # return previously determined result
+ if filename in self._loaders:
+ loader = self._loaders[filename]
+
+ if loader is None:
+ # do not allow junctions with the same name in different
+ # subprojects
+ raise LoadError(LoadErrorReason.CONFLICTING_JUNCTION,
+ "{}Conflicting junction {} in subprojects, define junction in {}"
+ .format(provenance_str, filename, self.project.name))
+
+ return loader
+
+ if self._parent:
+ # junctions in the parent take precedence over junctions defined
+ # in subprojects
+ loader = self._parent._get_loader(filename, rewritable=rewritable, ticker=ticker,
+ level=level + 1, fetch_subprojects=fetch_subprojects,
+ provenance=provenance)
+ if loader:
+ self._loaders[filename] = loader
+ return loader
+
+ try:
+ self._load_file(filename, rewritable, ticker, fetch_subprojects)
+ except LoadError as e:
+ if e.reason != LoadErrorReason.MISSING_FILE:
+ # other load error
+ raise
+
+ if level == 0:
+ # junction element not found in this or ancestor projects
+ raise
+ else:
+ # mark junction as not available to allow detection of
+ # conflicting junctions in subprojects
+ self._loaders[filename] = None
+ return None
+
+ # meta junction element
+ meta_element = self._collect_element(self._elements[filename])
+ if meta_element.kind != 'junction':
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}{}: Expected junction but element kind is {}".format(
+ provenance_str, filename, meta_element.kind))
+
+ element = Element._new_from_meta(meta_element)
+ element._preflight()
+
+ # If this junction element points to a sub-sub-project, we need to
+ # find loader for that project.
+ if element.target:
+ subproject_loader = self._get_loader(element.target_junction, rewritable=rewritable, ticker=ticker,
+ level=level, fetch_subprojects=fetch_subprojects,
+ provenance=provenance)
+ loader = subproject_loader._get_loader(element.target_element, rewritable=rewritable, ticker=ticker,
+ level=level, fetch_subprojects=fetch_subprojects,
+ provenance=provenance)
+ self._loaders[filename] = loader
+ return loader
+
+ sources = list(element.sources())
+ if not element._source_cached():
+ for idx, source in enumerate(sources):
+ # Handle the case where a subproject needs to be fetched
+ #
+ if source.get_consistency() == Consistency.RESOLVED:
+ if fetch_subprojects:
+ if ticker:
+ ticker(filename, 'Fetching subproject from {} source'.format(source.get_kind()))
+ source._fetch(sources[0:idx])
+ else:
+ detail = "Try fetching the project with `bst source fetch {}`".format(filename)
+ raise LoadError(LoadErrorReason.SUBPROJECT_FETCH_NEEDED,
+ "{}Subproject fetch needed for junction: {}".format(provenance_str, filename),
+ detail=detail)
+
+ # Handle the case where a subproject has no ref
+ #
+ elif source.get_consistency() == Consistency.INCONSISTENT:
+ detail = "Try tracking the junction element with `bst source track {}`".format(filename)
+ raise LoadError(LoadErrorReason.SUBPROJECT_INCONSISTENT,
+ "{}Subproject has no ref for junction: {}".format(provenance_str, filename),
+ detail=detail)
+
+ workspace = element._get_workspace()
+ if workspace:
+ # If a workspace is open, load it from there instead
+ basedir = workspace.get_absolute_path()
+ elif len(sources) == 1 and sources[0]._get_local_path():
+ # Optimization for junctions with a single local source
+ basedir = sources[0]._get_local_path()
+ else:
+ # Stage sources
+ element._set_required()
+ basedir = os.path.join(self.project.directory, ".bst", "staged-junctions",
+ filename, element._get_cache_key())
+ if not os.path.exists(basedir):
+ os.makedirs(basedir, exist_ok=True)
+ element._stage_sources_at(basedir, mount_workspaces=False)
+
+ # Load the project
+ project_dir = os.path.join(basedir, element.path)
+ try:
+ from .._project import Project # pylint: disable=cyclic-import
+ project = Project(project_dir, self._context, junction=element,
+ parent_loader=self, search_for_project=False)
+ except LoadError as e:
+ if e.reason == LoadErrorReason.MISSING_PROJECT_CONF:
+ message = (
+ provenance_str + "Could not find the project.conf file in the project "
+ "referred to by junction element '{}'.".format(element.name)
+ )
+ if element.path:
+ message += " Was expecting it at path '{}' in the junction's source.".format(element.path)
+ raise LoadError(reason=LoadErrorReason.INVALID_JUNCTION,
+ message=message) from e
+ else:
+ raise
+
+ loader = project.loader
+ self._loaders[filename] = loader
+
+ return loader
+
+ # _parse_name():
+ #
+ # Get junction and base name of element along with loader for the sub-project
+ #
+ # Args:
+ # name (str): Name of target
+ # rewritable (bool): Whether the loaded files should be rewritable
+ # this is a bit more expensive due to deep copies
+ # ticker (callable): An optional function for tracking load progress
+ # fetch_subprojects (bool): Whether to fetch subprojects while loading
+ #
+ # Returns:
+ # (tuple): - (str): name of the junction element
+ # - (str): name of the element
+ # - (Loader): loader for sub-project
+ #
+ def _parse_name(self, name, rewritable, ticker, fetch_subprojects=False):
+ # We allow to split only once since deep junctions names are forbidden.
+ # Users who want to refer to elements in sub-sub-projects are required
+ # to create junctions on the top level project.
+ junction_path = name.rsplit(':', 1)
+ if len(junction_path) == 1:
+ return None, junction_path[-1], self
+ else:
+ self._load_file(junction_path[-2], rewritable, ticker, fetch_subprojects)
+ loader = self._get_loader(junction_path[-2], rewritable=rewritable, ticker=ticker,
+ fetch_subprojects=fetch_subprojects)
+ return junction_path[-2], junction_path[-1], loader
+
+ # Print a warning message, checks warning_token against project configuration
+ #
+ # Args:
+ # brief (str): The brief message
+ # warning_token (str): An optional configurable warning assosciated with this warning,
+ # this will cause PluginError to be raised if this warning is configured as fatal.
+ # (*Since 1.4*)
+ #
+ # Raises:
+ # (:class:`.LoadError`): When warning_token is considered fatal by the project configuration
+ #
+ def _warn(self, brief, *, warning_token=None):
+ if warning_token:
+ if self.project._warning_is_fatal(warning_token):
+ raise LoadError(warning_token, brief)
+
+ message = Message(None, MessageType.WARN, brief)
+ self._context.message(message)
+
+ # Print warning messages if any of the specified elements have invalid names.
+ #
+ # Valid filenames should end with ".bst" extension.
+ #
+ # Args:
+ # elements (list): List of element names
+ #
+ # Raises:
+ # (:class:`.LoadError`): When warning_token is considered fatal by the project configuration
+ #
+ def _warn_invalid_elements(self, elements):
+
+ # invalid_elements
+ #
+ # A dict that maps warning types to the matching elements.
+ invalid_elements = {
+ CoreWarnings.BAD_ELEMENT_SUFFIX: [],
+ CoreWarnings.BAD_CHARACTERS_IN_NAME: [],
+ }
+
+ for filename in elements:
+ if not filename.endswith(".bst"):
+ invalid_elements[CoreWarnings.BAD_ELEMENT_SUFFIX].append(filename)
+ if not self._valid_chars_name(filename):
+ invalid_elements[CoreWarnings.BAD_CHARACTERS_IN_NAME].append(filename)
+
+ if invalid_elements[CoreWarnings.BAD_ELEMENT_SUFFIX]:
+ self._warn("Target elements '{}' do not have expected file extension `.bst` "
+ "Improperly named elements will not be discoverable by commands"
+ .format(invalid_elements[CoreWarnings.BAD_ELEMENT_SUFFIX]),
+ warning_token=CoreWarnings.BAD_ELEMENT_SUFFIX)
+ if invalid_elements[CoreWarnings.BAD_CHARACTERS_IN_NAME]:
+ self._warn("Target elements '{}' have invalid characerts in their name."
+ .format(invalid_elements[CoreWarnings.BAD_CHARACTERS_IN_NAME]),
+ warning_token=CoreWarnings.BAD_CHARACTERS_IN_NAME)
+
+ # Check if given filename containers valid characters.
+ #
+ # Args:
+ # name (str): Name of the file
+ #
+ # Returns:
+ # (bool): True if all characters are valid, False otherwise.
+ #
+ def _valid_chars_name(self, name):
+ for char in name:
+ char_val = ord(char)
+
+ # 0-31 are control chars, 127 is DEL, and >127 means non-ASCII
+ if char_val <= 31 or char_val >= 127:
+ return False
+
+ # Disallow characters that are invalid on Windows. The list can be
+ # found at https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file
+ #
+ # Note that although : (colon) is not allowed, we do not raise
+ # warnings because of that, since we use it as a separator for
+ # junctioned elements.
+ #
+ # We also do not raise warnings on slashes since they are used as
+ # path separators.
+ if char in r'<>"|?*':
+ return False
+
+ return True
diff --git a/src/buildstream/_loader/metaelement.py b/src/buildstream/_loader/metaelement.py
new file mode 100644
index 000000000..45eb6f4d0
--- /dev/null
+++ b/src/buildstream/_loader/metaelement.py
@@ -0,0 +1,60 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .. import _yaml
+
+
+class MetaElement():
+
+ # MetaElement()
+ #
+ # An abstract object holding data suitable for constructing an Element
+ #
+ # Args:
+ # project: The project that contains the element
+ # name: The resolved element name
+ # kind: The element kind
+ # provenance: The provenance of the element
+ # sources: An array of MetaSource objects
+ # config: The configuration data for the element
+ # variables: The variables declared or overridden on this element
+ # environment: The environment variables declared or overridden on this element
+ # env_nocache: List of environment vars which should not be considered in cache keys
+ # public: Public domain data dictionary
+ # sandbox: Configuration specific to the sandbox environment
+ # first_pass: The element is to be loaded with first pass configuration (junction)
+ #
+ def __init__(self, project, name, kind=None, provenance=None, sources=None, config=None,
+ variables=None, environment=None, env_nocache=None, public=None,
+ sandbox=None, first_pass=False):
+ self.project = project
+ self.name = name
+ self.kind = kind
+ self.provenance = provenance
+ self.sources = sources
+ self.config = config or _yaml.new_empty_node()
+ self.variables = variables or _yaml.new_empty_node()
+ self.environment = environment or _yaml.new_empty_node()
+ self.env_nocache = env_nocache or []
+ self.public = public or _yaml.new_empty_node()
+ self.sandbox = sandbox or _yaml.new_empty_node()
+ self.build_dependencies = []
+ self.dependencies = []
+ self.first_pass = first_pass
+ self.is_junction = kind == "junction"
diff --git a/src/buildstream/_loader/metasource.py b/src/buildstream/_loader/metasource.py
new file mode 100644
index 000000000..da2c0e292
--- /dev/null
+++ b/src/buildstream/_loader/metasource.py
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+
+class MetaSource():
+
+ # MetaSource()
+ #
+ # An abstract object holding data suitable for constructing a Source
+ #
+ # Args:
+ # element_name: The name of the owning element
+ # element_index: The index of the source in the owning element's source list
+ # element_kind: The kind of the owning element
+ # kind: The kind of the source
+ # config: The configuration data for the source
+ # first_pass: This source will be used with first project pass configuration (used for junctions).
+ #
+ def __init__(self, element_name, element_index, element_kind, kind, config, directory):
+ self.element_name = element_name
+ self.element_index = element_index
+ self.element_kind = element_kind
+ self.kind = kind
+ self.config = config
+ self.directory = directory
+ self.first_pass = False
diff --git a/src/buildstream/_loader/types.py b/src/buildstream/_loader/types.py
new file mode 100644
index 000000000..f9dd38ca0
--- /dev/null
+++ b/src/buildstream/_loader/types.py
@@ -0,0 +1,112 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .._exceptions import LoadError, LoadErrorReason
+from .. import _yaml
+
+
+# Symbol():
+#
+# A simple object to denote the symbols we load with from YAML
+#
+class Symbol():
+ FILENAME = "filename"
+ KIND = "kind"
+ DEPENDS = "depends"
+ BUILD_DEPENDS = "build-depends"
+ RUNTIME_DEPENDS = "runtime-depends"
+ SOURCES = "sources"
+ CONFIG = "config"
+ VARIABLES = "variables"
+ ENVIRONMENT = "environment"
+ ENV_NOCACHE = "environment-nocache"
+ PUBLIC = "public"
+ TYPE = "type"
+ BUILD = "build"
+ RUNTIME = "runtime"
+ ALL = "all"
+ DIRECTORY = "directory"
+ JUNCTION = "junction"
+ SANDBOX = "sandbox"
+
+
+# Dependency()
+#
+# A simple object describing a dependency
+#
+# Args:
+# name (str): The element name
+# dep_type (str): The type of dependency, can be
+# Symbol.ALL, Symbol.BUILD, or Symbol.RUNTIME
+# junction (str): The element name of the junction, or None
+# provenance (Provenance): The YAML node provenance of where this
+# dependency was declared
+#
+class Dependency():
+ def __init__(self, dep, provenance, default_dep_type=None):
+ self.provenance = provenance
+
+ if isinstance(dep, str):
+ self.name = dep
+ self.dep_type = default_dep_type
+ self.junction = None
+
+ elif _yaml.is_node(dep):
+ if default_dep_type:
+ _yaml.node_validate(dep, ['filename', 'junction'])
+ dep_type = default_dep_type
+ else:
+ _yaml.node_validate(dep, ['filename', 'type', 'junction'])
+
+ # Make type optional, for this we set it to None
+ dep_type = _yaml.node_get(dep, str, Symbol.TYPE, default_value=None)
+ if dep_type is None or dep_type == Symbol.ALL:
+ dep_type = None
+ elif dep_type not in [Symbol.BUILD, Symbol.RUNTIME]:
+ provenance = _yaml.node_get_provenance(dep, key=Symbol.TYPE)
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Dependency type '{}' is not 'build', 'runtime' or 'all'"
+ .format(provenance, dep_type))
+
+ self.name = _yaml.node_get(dep, str, Symbol.FILENAME)
+ self.dep_type = dep_type
+ self.junction = _yaml.node_get(dep, str, Symbol.JUNCTION, default_value=None)
+
+ else:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Dependency is not specified as a string or a dictionary".format(provenance))
+
+ # `:` characters are not allowed in filename if a junction was
+ # explicitly specified
+ if self.junction and ':' in self.name:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Dependency {} contains `:` in its name. "
+ "`:` characters are not allowed in filename when "
+ "junction attribute is specified.".format(self.provenance, self.name))
+
+ # Name of the element should never contain more than one `:` characters
+ if self.name.count(':') > 1:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Dependency {} contains multiple `:` in its name. "
+ "Recursive lookups for cross-junction elements is not "
+ "allowed.".format(self.provenance, self.name))
+
+ # Attempt to split name if no junction was specified explicitly
+ if not self.junction and self.name.count(':') == 1:
+ self.junction, self.name = self.name.split(':')
diff --git a/src/buildstream/_message.py b/src/buildstream/_message.py
new file mode 100644
index 000000000..c2cdb8277
--- /dev/null
+++ b/src/buildstream/_message.py
@@ -0,0 +1,80 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+import datetime
+import os
+
+
+# Types of status messages.
+#
+class MessageType():
+ DEBUG = "debug" # Debugging message
+ STATUS = "status" # Status message, verbose details
+ INFO = "info" # Informative messages
+ WARN = "warning" # Warning messages
+ ERROR = "error" # Error messages
+ BUG = "bug" # An unhandled exception was raised in a plugin
+ LOG = "log" # Messages for log files _only_, never in the frontend
+
+ # Timed Messages: SUCCESS and FAIL have duration timestamps
+ START = "start" # Status start message
+ SUCCESS = "success" # Successful status complete message
+ FAIL = "failure" # Failing status complete message
+ SKIPPED = "skipped"
+
+
+# Messages which should be reported regardless of whether
+# they are currently silenced or not
+unconditional_messages = [
+ MessageType.INFO,
+ MessageType.WARN,
+ MessageType.FAIL,
+ MessageType.ERROR,
+ MessageType.BUG
+]
+
+
+# Message object
+#
+class Message():
+
+ def __init__(self, unique_id, message_type, message,
+ task_id=None,
+ detail=None,
+ action_name=None,
+ elapsed=None,
+ depth=None,
+ logfile=None,
+ sandbox=None,
+ scheduler=False):
+ self.message_type = message_type # Message type
+ self.message = message # The message string
+ self.detail = detail # An additional detail string
+ self.action_name = action_name # Name of the task queue (fetch, refresh, build, etc)
+ self.elapsed = elapsed # The elapsed time, in timed messages
+ self.depth = depth # The depth of a timed message
+ self.logfile = logfile # The log file path where commands took place
+ self.sandbox = sandbox # The error that caused this message used a sandbox
+ self.pid = os.getpid() # The process pid
+ self.unique_id = unique_id # The plugin object ID issueing the message
+ self.task_id = task_id # The plugin object ID of the task
+ self.scheduler = scheduler # Whether this is a scheduler level message
+ self.creation_time = datetime.datetime.now()
+ if message_type in (MessageType.SUCCESS, MessageType.FAIL):
+ assert elapsed is not None
diff --git a/src/buildstream/_options/__init__.py b/src/buildstream/_options/__init__.py
new file mode 100644
index 000000000..70bbe35aa
--- /dev/null
+++ b/src/buildstream/_options/__init__.py
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .optionpool import OptionPool
diff --git a/src/buildstream/_options/option.py b/src/buildstream/_options/option.py
new file mode 100644
index 000000000..ffdb4d272
--- /dev/null
+++ b/src/buildstream/_options/option.py
@@ -0,0 +1,112 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .. import _yaml
+
+
+# Shared symbols for validation purposes
+#
+OPTION_SYMBOLS = [
+ 'type',
+ 'description',
+ 'variable'
+]
+
+
+# Option()
+#
+# An abstract class representing a project option.
+#
+# Concrete classes must be created to handle option types,
+# the loaded project options is a collection of typed Option
+# instances.
+#
+class Option():
+
+ # Subclasses use this to specify the type name used
+ # for the yaml format and error messages
+ OPTION_TYPE = None
+
+ def __init__(self, name, definition, pool):
+ self.name = name
+ self.description = None
+ self.variable = None
+ self.value = None
+ self.pool = pool
+ self.load(definition)
+
+ # load()
+ #
+ # Loads the option attributes from the descriptions
+ # in the project.conf
+ #
+ # Args:
+ # node (dict): The loaded YAML dictionary describing
+ # the option
+ def load(self, node):
+ self.description = _yaml.node_get(node, str, 'description')
+ self.variable = _yaml.node_get(node, str, 'variable', default_value=None)
+
+ # Assert valid symbol name for variable name
+ if self.variable is not None:
+ p = _yaml.node_get_provenance(node, 'variable')
+ _yaml.assert_symbol_name(p, self.variable, 'variable name')
+
+ # load_value()
+ #
+ # Loads the value of the option in string form.
+ #
+ # Args:
+ # node (Mapping): The YAML loaded key/value dictionary
+ # to load the value from
+ # transform (callbable): Transform function for variable substitution
+ #
+ def load_value(self, node, *, transform=None):
+ pass # pragma: nocover
+
+ # set_value()
+ #
+ # Sets the value of an option from a string passed
+ # to buildstream on the command line
+ #
+ # Args:
+ # value (str): The value in string form
+ #
+ def set_value(self, value):
+ pass # pragma: nocover
+
+ # get_value()
+ #
+ # Gets the value of an option in string form, this
+ # is for the purpose of exporting option values to
+ # variables which must be in string form.
+ #
+ # Returns:
+ # (str): The value in string form
+ #
+ def get_value(self):
+ pass # pragma: nocover
+
+ # resolve()
+ #
+ # Called on each option once, after all configuration
+ # and cli options have been passed.
+ #
+ def resolve(self):
+ pass # pragma: nocover
diff --git a/src/buildstream/_options/optionarch.py b/src/buildstream/_options/optionarch.py
new file mode 100644
index 000000000..0e2963c84
--- /dev/null
+++ b/src/buildstream/_options/optionarch.py
@@ -0,0 +1,84 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .. import _yaml
+from .._exceptions import LoadError, LoadErrorReason, PlatformError
+from .._platform import Platform
+from .optionenum import OptionEnum
+
+
+# OptionArch
+#
+# An enumeration project option which does not allow
+# definition of a default value, but instead tries to set
+# the default value to the machine architecture introspected
+# using `uname`
+#
+# Note that when using OptionArch in a project, it will automatically
+# bail out of the host machine `uname` reports a machine architecture
+# not supported by the project, in the case that no option was
+# specifically specified
+#
+class OptionArch(OptionEnum):
+
+ OPTION_TYPE = 'arch'
+
+ def load(self, node):
+ super(OptionArch, self).load(node, allow_default_definition=False)
+
+ def load_default_value(self, node):
+ arch = Platform.get_host_arch()
+
+ default_value = None
+
+ for index, value in enumerate(self.values):
+ try:
+ canonical_value = Platform.canonicalize_arch(value)
+ if default_value is None and canonical_value == arch:
+ default_value = value
+ # Do not terminate the loop early to ensure we validate
+ # all values in the list.
+ except PlatformError as e:
+ provenance = _yaml.node_get_provenance(node, key='values', indices=[index])
+ prefix = ""
+ if provenance:
+ prefix = "{}: ".format(provenance)
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}Invalid value for {} option '{}': {}"
+ .format(prefix, self.OPTION_TYPE, self.name, e))
+
+ if default_value is None:
+ # Host architecture is not supported by the project.
+ # Do not raise an error here as the user may override it.
+ # If the user does not override it, an error will be raised
+ # by resolve()/validate().
+ default_value = arch
+
+ return default_value
+
+ def resolve(self):
+
+ # Validate that the default machine arch reported by uname() is
+ # explicitly supported by the project, only if it was not
+ # overridden by user configuration or cli.
+ #
+ # If the value is specified on the cli or user configuration,
+ # then it will already be valid.
+ #
+ self.validate(self.value)
diff --git a/src/buildstream/_options/optionbool.py b/src/buildstream/_options/optionbool.py
new file mode 100644
index 000000000..867de22df
--- /dev/null
+++ b/src/buildstream/_options/optionbool.py
@@ -0,0 +1,58 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .. import _yaml
+from .._exceptions import LoadError, LoadErrorReason
+from .option import Option, OPTION_SYMBOLS
+
+
+# OptionBool
+#
+# A boolean project option
+#
+class OptionBool(Option):
+
+ OPTION_TYPE = 'bool'
+
+ def load(self, node):
+
+ super(OptionBool, self).load(node)
+ _yaml.node_validate(node, OPTION_SYMBOLS + ['default'])
+ self.value = _yaml.node_get(node, bool, 'default')
+
+ def load_value(self, node, *, transform=None):
+ if transform:
+ self.set_value(transform(_yaml.node_get(node, str, self.name)))
+ else:
+ self.value = _yaml.node_get(node, bool, self.name)
+
+ def set_value(self, value):
+ if value in ('True', 'true'):
+ self.value = True
+ elif value in ('False', 'false'):
+ self.value = False
+ else:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "Invalid value for boolean option {}: {}".format(self.name, value))
+
+ def get_value(self):
+ if self.value:
+ return "1"
+ else:
+ return "0"
diff --git a/src/buildstream/_options/optioneltmask.py b/src/buildstream/_options/optioneltmask.py
new file mode 100644
index 000000000..09c2ce8c2
--- /dev/null
+++ b/src/buildstream/_options/optioneltmask.py
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .. import utils
+from .optionflags import OptionFlags
+
+
+# OptionEltMask
+#
+# A flags option which automatically only allows element
+# names as values.
+#
+class OptionEltMask(OptionFlags):
+
+ OPTION_TYPE = 'element-mask'
+
+ def load(self, node):
+ # Ask the parent constructor to disallow value definitions,
+ # we define those automatically only.
+ super(OptionEltMask, self).load(node, allow_value_definitions=False)
+
+ # Here we want all valid elements as possible values,
+ # but we'll settle for just the relative filenames
+ # of files ending with ".bst" in the project element directory
+ def load_valid_values(self, node):
+ values = []
+ for filename in utils.list_relative_paths(self.pool.element_path):
+ if filename.endswith('.bst'):
+ values.append(filename)
+ return values
diff --git a/src/buildstream/_options/optionenum.py b/src/buildstream/_options/optionenum.py
new file mode 100644
index 000000000..095b9c356
--- /dev/null
+++ b/src/buildstream/_options/optionenum.py
@@ -0,0 +1,77 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .. import _yaml
+from .._exceptions import LoadError, LoadErrorReason
+from .option import Option, OPTION_SYMBOLS
+
+
+# OptionEnum
+#
+# An enumeration project option
+#
+class OptionEnum(Option):
+
+ OPTION_TYPE = 'enum'
+
+ def load(self, node, allow_default_definition=True):
+ super(OptionEnum, self).load(node)
+
+ valid_symbols = OPTION_SYMBOLS + ['values']
+ if allow_default_definition:
+ valid_symbols += ['default']
+
+ _yaml.node_validate(node, valid_symbols)
+
+ self.values = _yaml.node_get(node, list, 'values', default_value=[])
+ if not self.values:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: No values specified for {} option '{}'"
+ .format(_yaml.node_get_provenance(node), self.OPTION_TYPE, self.name))
+
+ # Allow subclass to define the default value
+ self.value = self.load_default_value(node)
+
+ def load_value(self, node, *, transform=None):
+ self.value = _yaml.node_get(node, str, self.name)
+ if transform:
+ self.value = transform(self.value)
+ self.validate(self.value, _yaml.node_get_provenance(node, self.name))
+
+ def set_value(self, value):
+ self.validate(value)
+ self.value = value
+
+ def get_value(self):
+ return self.value
+
+ def validate(self, value, provenance=None):
+ if value not in self.values:
+ prefix = ""
+ if provenance:
+ prefix = "{}: ".format(provenance)
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}Invalid value for {} option '{}': {}\n"
+ .format(prefix, self.OPTION_TYPE, self.name, value) +
+ "Valid values: {}".format(", ".join(self.values)))
+
+ def load_default_value(self, node):
+ value = _yaml.node_get(node, str, 'default')
+ self.validate(value, _yaml.node_get_provenance(node, 'default'))
+ return value
diff --git a/src/buildstream/_options/optionflags.py b/src/buildstream/_options/optionflags.py
new file mode 100644
index 000000000..0271208d9
--- /dev/null
+++ b/src/buildstream/_options/optionflags.py
@@ -0,0 +1,86 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .. import _yaml
+from .._exceptions import LoadError, LoadErrorReason
+from .option import Option, OPTION_SYMBOLS
+
+
+# OptionFlags
+#
+# A flags project option
+#
+class OptionFlags(Option):
+
+ OPTION_TYPE = 'flags'
+
+ def load(self, node, allow_value_definitions=True):
+ super(OptionFlags, self).load(node)
+
+ valid_symbols = OPTION_SYMBOLS + ['default']
+ if allow_value_definitions:
+ valid_symbols += ['values']
+
+ _yaml.node_validate(node, valid_symbols)
+
+ # Allow subclass to define the valid values
+ self.values = self.load_valid_values(node)
+ if not self.values:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: No values specified for {} option '{}'"
+ .format(_yaml.node_get_provenance(node), self.OPTION_TYPE, self.name))
+
+ self.value = _yaml.node_get(node, list, 'default', default_value=[])
+ self.validate(self.value, _yaml.node_get_provenance(node, 'default'))
+
+ def load_value(self, node, *, transform=None):
+ self.value = _yaml.node_get(node, list, self.name)
+ if transform:
+ self.value = [transform(x) for x in self.value]
+ self.value = sorted(self.value)
+ self.validate(self.value, _yaml.node_get_provenance(node, self.name))
+
+ def set_value(self, value):
+ # Strip out all whitespace, allowing: "value1, value2 , value3"
+ stripped = "".join(value.split())
+
+ # Get the comma separated values
+ list_value = stripped.split(',')
+
+ self.validate(list_value)
+ self.value = sorted(list_value)
+
+ def get_value(self):
+ return ",".join(self.value)
+
+ def validate(self, value, provenance=None):
+ for flag in value:
+ if flag not in self.values:
+ prefix = ""
+ if provenance:
+ prefix = "{}: ".format(provenance)
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}Invalid value for flags option '{}': {}\n"
+ .format(prefix, self.name, value) +
+ "Valid values: {}".format(", ".join(self.values)))
+
+ def load_valid_values(self, node):
+ # Allow the more descriptive error to raise when no values
+ # exist rather than bailing out here (by specifying default_value)
+ return _yaml.node_get(node, list, 'values', default_value=[])
diff --git a/src/buildstream/_options/optionos.py b/src/buildstream/_options/optionos.py
new file mode 100644
index 000000000..2d46b70ba
--- /dev/null
+++ b/src/buildstream/_options/optionos.py
@@ -0,0 +1,41 @@
+
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Raoul Hidalgo Charman <raoul.hidalgocharman@codethink.co.uk>
+
+import platform
+from .optionenum import OptionEnum
+
+
+# OptionOS
+#
+class OptionOS(OptionEnum):
+
+ OPTION_TYPE = 'os'
+
+ def load(self, node):
+ super(OptionOS, self).load(node, allow_default_definition=False)
+
+ def load_default_value(self, node):
+ return platform.uname().system
+
+ def resolve(self):
+
+ # Validate that the default OS reported by uname() is explicitly
+ # supported by the project, if not overridden by user config or cli.
+ self.validate(self.value)
diff --git a/src/buildstream/_options/optionpool.py b/src/buildstream/_options/optionpool.py
new file mode 100644
index 000000000..de3af3e15
--- /dev/null
+++ b/src/buildstream/_options/optionpool.py
@@ -0,0 +1,295 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+#
+
+import jinja2
+
+from .. import _yaml
+from .._exceptions import LoadError, LoadErrorReason
+from .optionbool import OptionBool
+from .optionenum import OptionEnum
+from .optionflags import OptionFlags
+from .optioneltmask import OptionEltMask
+from .optionarch import OptionArch
+from .optionos import OptionOS
+
+
+_OPTION_TYPES = {
+ OptionBool.OPTION_TYPE: OptionBool,
+ OptionEnum.OPTION_TYPE: OptionEnum,
+ OptionFlags.OPTION_TYPE: OptionFlags,
+ OptionEltMask.OPTION_TYPE: OptionEltMask,
+ OptionArch.OPTION_TYPE: OptionArch,
+ OptionOS.OPTION_TYPE: OptionOS,
+}
+
+
+class OptionPool():
+
+ def __init__(self, element_path):
+ # We hold on to the element path for the sake of OptionEltMask
+ self.element_path = element_path
+
+ #
+ # Private members
+ #
+ self._options = {} # The Options
+ self._variables = None # The Options resolved into typed variables
+
+ # jinja2 environment, with default globals cleared out of the way
+ self._environment = jinja2.Environment(undefined=jinja2.StrictUndefined)
+ self._environment.globals = []
+
+ # load()
+ #
+ # Loads the options described in the project.conf
+ #
+ # Args:
+ # node (dict): The loaded YAML options
+ #
+ def load(self, options):
+
+ for option_name, option_definition in _yaml.node_items(options):
+
+ # Assert that the option name is a valid symbol
+ p = _yaml.node_get_provenance(options, option_name)
+ _yaml.assert_symbol_name(p, option_name, "option name", allow_dashes=False)
+
+ opt_type_name = _yaml.node_get(option_definition, str, 'type')
+ try:
+ opt_type = _OPTION_TYPES[opt_type_name]
+ except KeyError:
+ p = _yaml.node_get_provenance(option_definition, 'type')
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Invalid option type '{}'".format(p, opt_type_name))
+
+ option = opt_type(option_name, option_definition, self)
+ self._options[option_name] = option
+
+ # load_yaml_values()
+ #
+ # Loads the option values specified in a key/value
+ # dictionary loaded from YAML
+ #
+ # Args:
+ # node (dict): The loaded YAML options
+ #
+ def load_yaml_values(self, node, *, transform=None):
+ for option_name in _yaml.node_keys(node):
+ try:
+ option = self._options[option_name]
+ except KeyError as e:
+ p = _yaml.node_get_provenance(node, option_name)
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Unknown option '{}' specified"
+ .format(p, option_name)) from e
+ option.load_value(node, transform=transform)
+
+ # load_cli_values()
+ #
+ # Loads the option values specified in a list of tuples
+ # collected from the command line
+ #
+ # Args:
+ # cli_options (list): A list of (str, str) tuples
+ # ignore_unknown (bool): Whether to silently ignore unknown options.
+ #
+ def load_cli_values(self, cli_options, *, ignore_unknown=False):
+ for option_name, option_value in cli_options:
+ try:
+ option = self._options[option_name]
+ except KeyError as e:
+ if not ignore_unknown:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "Unknown option '{}' specified on the command line"
+ .format(option_name)) from e
+ else:
+ option.set_value(option_value)
+
+ # resolve()
+ #
+ # Resolves the loaded options, this is just a step which must be
+ # performed after loading all options and their values, and before
+ # ever trying to evaluate an expression
+ #
+ def resolve(self):
+ self._variables = {}
+ for option_name, option in self._options.items():
+ # Delegate one more method for options to
+ # do some last minute validation once any
+ # overrides have been performed.
+ #
+ option.resolve()
+
+ self._variables[option_name] = option.value
+
+ # export_variables()
+ #
+ # Exports the option values which are declared
+ # to be exported, to the passed dictionary.
+ #
+ # Variable values are exported in string form
+ #
+ # Args:
+ # variables (dict): A variables dictionary
+ #
+ def export_variables(self, variables):
+ for _, option in self._options.items():
+ if option.variable:
+ _yaml.node_set(variables, option.variable, option.get_value())
+
+ # printable_variables()
+ #
+ # Exports all option names and string values
+ # to the passed dictionary in alphabetical order.
+ #
+ # Args:
+ # variables (dict): A variables dictionary
+ #
+ def printable_variables(self, variables):
+ for key in sorted(self._options):
+ variables[key] = self._options[key].get_value()
+
+ # process_node()
+ #
+ # Args:
+ # node (node): A YAML Loaded dictionary
+ #
+ def process_node(self, node):
+
+ # A conditional will result in composition, which can
+ # in turn add new conditionals to the root.
+ #
+ # Keep processing conditionals on the root node until
+ # all directly nested conditionals are resolved.
+ #
+ while self._process_one_node(node):
+ pass
+
+ # Now recurse into nested dictionaries and lists
+ # and process any indirectly nested conditionals.
+ #
+ for _, value in _yaml.node_items(node):
+ if _yaml.is_node(value):
+ self.process_node(value)
+ elif isinstance(value, list):
+ self._process_list(value)
+
+ #######################################################
+ # Private Methods #
+ #######################################################
+
+ # _evaluate()
+ #
+ # Evaluates a jinja2 style expression with the loaded options in context.
+ #
+ # Args:
+ # expression (str): The jinja2 style expression
+ #
+ # Returns:
+ # (bool): Whether the expression resolved to a truthy value or a falsy one.
+ #
+ # Raises:
+ # LoadError: If the expression failed to resolve for any reason
+ #
+ def _evaluate(self, expression):
+
+ #
+ # Variables must be resolved at this point.
+ #
+ try:
+ template_string = "{{% if {} %}} True {{% else %}} False {{% endif %}}".format(expression)
+ template = self._environment.from_string(template_string)
+ context = template.new_context(self._variables, shared=True)
+ result = template.root_render_func(context)
+ evaluated = jinja2.utils.concat(result)
+ val = evaluated.strip()
+
+ if val == "True":
+ return True
+ elif val == "False":
+ return False
+ else: # pragma: nocover
+ raise LoadError(LoadErrorReason.EXPRESSION_FAILED,
+ "Failed to evaluate expression: {}".format(expression))
+ except jinja2.exceptions.TemplateError as e:
+ raise LoadError(LoadErrorReason.EXPRESSION_FAILED,
+ "Failed to evaluate expression ({}): {}".format(expression, e))
+
+ # Recursion assistent for lists, in case there
+ # are lists of lists.
+ #
+ def _process_list(self, values):
+ for value in values:
+ if _yaml.is_node(value):
+ self.process_node(value)
+ elif isinstance(value, list):
+ self._process_list(value)
+
+ # Process a single conditional, resulting in composition
+ # at the root level on the passed node
+ #
+ # Return true if a conditional was processed.
+ #
+ def _process_one_node(self, node):
+ conditions = _yaml.node_get(node, list, '(?)', default_value=None)
+ assertion = _yaml.node_get(node, str, '(!)', default_value=None)
+
+ # Process assersions first, we want to abort on the first encountered
+ # assertion in a given dictionary, and not lose an assertion due to
+ # it being overwritten by a later assertion which might also trigger.
+ if assertion is not None:
+ p = _yaml.node_get_provenance(node, '(!)')
+ raise LoadError(LoadErrorReason.USER_ASSERTION,
+ "{}: {}".format(p, assertion.strip()))
+
+ if conditions is not None:
+
+ # Collect provenance first, we need to delete the (?) key
+ # before any composition occurs.
+ provenance = [
+ _yaml.node_get_provenance(node, '(?)', indices=[i])
+ for i in range(len(conditions))
+ ]
+ _yaml.node_del(node, '(?)')
+
+ for condition, p in zip(conditions, provenance):
+ tuples = list(_yaml.node_items(condition))
+ if len(tuples) > 1:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Conditional statement has more than one key".format(p))
+
+ expression, value = tuples[0]
+ try:
+ apply_fragment = self._evaluate(expression)
+ except LoadError as e:
+ # Prepend the provenance of the error
+ raise LoadError(e.reason, "{}: {}".format(p, e)) from e
+
+ if not _yaml.is_node(value):
+ raise LoadError(LoadErrorReason.ILLEGAL_COMPOSITE,
+ "{}: Only values of type 'dict' can be composed.".format(p))
+
+ # Apply the yaml fragment if its condition evaluates to true
+ if apply_fragment:
+ _yaml.composite(node, value)
+
+ return True
+
+ return False
diff --git a/src/buildstream/_pipeline.py b/src/buildstream/_pipeline.py
new file mode 100644
index 000000000..c176b82f6
--- /dev/null
+++ b/src/buildstream/_pipeline.py
@@ -0,0 +1,516 @@
+#
+# Copyright (C) 2016-2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+import os
+import itertools
+from operator import itemgetter
+from collections import OrderedDict
+
+from pyroaring import BitMap # pylint: disable=no-name-in-module
+
+from ._exceptions import PipelineError
+from ._message import Message, MessageType
+from ._profile import Topics, PROFILER
+from . import Scope, Consistency
+from ._project import ProjectRefStorage
+
+
+# PipelineSelection()
+#
+# Defines the kind of pipeline selection to make when the pipeline
+# is provided a list of targets, for whichever purpose.
+#
+# These values correspond to the CLI `--deps` arguments for convenience.
+#
+class PipelineSelection():
+
+ # Select only the target elements in the associated targets
+ NONE = 'none'
+
+ # As NONE, but redirect elements that are capable of it
+ REDIRECT = 'redirect'
+
+ # Select elements which must be built for the associated targets to be built
+ PLAN = 'plan'
+
+ # All dependencies of all targets, including the targets
+ ALL = 'all'
+
+ # All direct build dependencies and their recursive runtime dependencies,
+ # excluding the targets
+ BUILD = 'build'
+
+ # All direct runtime dependencies and their recursive runtime dependencies,
+ # including the targets
+ RUN = 'run'
+
+
+# Pipeline()
+#
+# Args:
+# project (Project): The Project object
+# context (Context): The Context object
+# artifacts (Context): The ArtifactCache object
+#
+class Pipeline():
+
+ def __init__(self, context, project, artifacts):
+
+ self._context = context # The Context
+ self._project = project # The toplevel project
+
+ #
+ # Private members
+ #
+ self._artifacts = artifacts
+
+ # load()
+ #
+ # Loads elements from target names.
+ #
+ # This function is called with a list of lists, such that multiple
+ # target groups may be specified. Element names specified in `targets`
+ # are allowed to be redundant.
+ #
+ # Args:
+ # target_groups (list of lists): Groups of toplevel targets to load
+ # fetch_subprojects (bool): Whether we should fetch subprojects as a part of the
+ # loading process, if they are not yet locally cached
+ # rewritable (bool): Whether the loaded files should be rewritable
+ # this is a bit more expensive due to deep copies
+ #
+ # Returns:
+ # (tuple of lists): A tuple of grouped Element objects corresponding to target_groups
+ #
+ def load(self, target_groups, *,
+ fetch_subprojects=True,
+ rewritable=False):
+
+ # First concatenate all the lists for the loader's sake
+ targets = list(itertools.chain(*target_groups))
+
+ with PROFILER.profile(Topics.LOAD_PIPELINE, "_".join(t.replace(os.sep, "-") for t in targets)):
+ elements = self._project.load_elements(targets,
+ rewritable=rewritable,
+ fetch_subprojects=fetch_subprojects)
+
+ # Now create element groups to match the input target groups
+ elt_iter = iter(elements)
+ element_groups = [
+ [next(elt_iter) for i in range(len(group))]
+ for group in target_groups
+ ]
+
+ return tuple(element_groups)
+
+ # resolve_elements()
+ #
+ # Resolve element state and cache keys.
+ #
+ # Args:
+ # targets (list of Element): The list of toplevel element targets
+ #
+ def resolve_elements(self, targets):
+ with self._context.timed_activity("Resolving cached state", silent_nested=True):
+ for element in self.dependencies(targets, Scope.ALL):
+
+ # Preflight
+ element._preflight()
+
+ # Determine initial element state.
+ element._update_state()
+
+ # dependencies()
+ #
+ # Generator function to iterate over elements and optionally
+ # also iterate over sources.
+ #
+ # Args:
+ # targets (list of Element): The target Elements to loop over
+ # scope (Scope): The scope to iterate over
+ # recurse (bool): Whether to recurse into dependencies
+ #
+ def dependencies(self, targets, scope, *, recurse=True):
+ # Keep track of 'visited' in this scope, so that all targets
+ # share the same context.
+ visited = (BitMap(), BitMap())
+
+ for target in targets:
+ for element in target.dependencies(scope, recurse=recurse, visited=visited):
+ yield element
+
+ # plan()
+ #
+ # Generator function to iterate over only the elements
+ # which are required to build the pipeline target, omitting
+ # cached elements. The elements are yielded in a depth sorted
+ # ordering for optimal build plans
+ #
+ # Args:
+ # elements (list of Element): List of target elements to plan
+ #
+ # Returns:
+ # (list of Element): A depth sorted list of the build plan
+ #
+ def plan(self, elements):
+ # Keep locally cached elements in the plan if remote artifact cache is used
+ # to allow pulling artifact with strict cache key, if available.
+ plan_cached = not self._context.get_strict() and self._artifacts.has_fetch_remotes()
+
+ return _Planner().plan(elements, plan_cached)
+
+ # get_selection()
+ #
+ # Gets a full list of elements based on a toplevel
+ # list of element targets
+ #
+ # Args:
+ # targets (list of Element): The target Elements
+ # mode (PipelineSelection): The PipelineSelection mode
+ #
+ # Various commands define a --deps option to specify what elements to
+ # use in the result, this function reports a list that is appropriate for
+ # the selected option.
+ #
+ def get_selection(self, targets, mode, *, silent=True):
+
+ elements = None
+ if mode == PipelineSelection.NONE:
+ elements = targets
+ elif mode == PipelineSelection.REDIRECT:
+ # Redirect and log if permitted
+ elements = []
+ for t in targets:
+ new_elm = t._get_source_element()
+ if new_elm != t and not silent:
+ self._message(MessageType.INFO, "Element '{}' redirected to '{}'"
+ .format(t.name, new_elm.name))
+ if new_elm not in elements:
+ elements.append(new_elm)
+ elif mode == PipelineSelection.PLAN:
+ elements = self.plan(targets)
+ else:
+ if mode == PipelineSelection.ALL:
+ scope = Scope.ALL
+ elif mode == PipelineSelection.BUILD:
+ scope = Scope.BUILD
+ elif mode == PipelineSelection.RUN:
+ scope = Scope.RUN
+
+ elements = list(self.dependencies(targets, scope))
+
+ return elements
+
+ # except_elements():
+ #
+ # Return what we are left with after the intersection between
+ # excepted and target elements and their unique dependencies is
+ # gone.
+ #
+ # Args:
+ # targets (list of Element): List of toplevel targetted elements
+ # elements (list of Element): The list to remove elements from
+ # except_targets (list of Element): List of toplevel except targets
+ #
+ # Returns:
+ # (list of Element): The elements list with the intersected
+ # exceptions removed
+ #
+ def except_elements(self, targets, elements, except_targets):
+ if not except_targets:
+ return elements
+
+ targeted = list(self.dependencies(targets, Scope.ALL))
+ visited = []
+
+ def find_intersection(element):
+ if element in visited:
+ return
+ visited.append(element)
+
+ # Intersection elements are those that are also in
+ # 'targeted', as long as we don't recurse into them.
+ if element in targeted:
+ yield element
+ else:
+ for dep in element.dependencies(Scope.ALL, recurse=False):
+ yield from find_intersection(dep)
+
+ # Build a list of 'intersection' elements, i.e. the set of
+ # elements that lie on the border closest to excepted elements
+ # between excepted and target elements.
+ intersection = list(itertools.chain.from_iterable(
+ find_intersection(element) for element in except_targets
+ ))
+
+ # Now use this set of elements to traverse the targeted
+ # elements, except 'intersection' elements and their unique
+ # dependencies.
+ queue = []
+ visited = []
+
+ queue.extend(targets)
+ while queue:
+ element = queue.pop()
+ if element in visited or element in intersection:
+ continue
+ visited.append(element)
+
+ queue.extend(element.dependencies(Scope.ALL, recurse=False))
+
+ # That looks like a lot, but overall we only traverse (part
+ # of) the graph twice. This could be reduced to once if we
+ # kept track of parent elements, but is probably not
+ # significant.
+
+ # Ensure that we return elements in the same order they were
+ # in before.
+ return [element for element in elements if element in visited]
+
+ # targets_include()
+ #
+ # Checks whether the given targets are, or depend on some elements
+ #
+ # Args:
+ # targets (list of Element): A list of targets
+ # elements (list of Element): List of elements to check
+ #
+ # Returns:
+ # (bool): True if all of `elements` are the `targets`, or are
+ # somehow depended on by `targets`.
+ #
+ def targets_include(self, targets, elements):
+ target_element_set = set(self.dependencies(targets, Scope.ALL))
+ element_set = set(elements)
+ return element_set.issubset(target_element_set)
+
+ # subtract_elements()
+ #
+ # Subtract a subset of elements
+ #
+ # Args:
+ # elements (list of Element): The element list
+ # subtract (list of Element): List of elements to subtract from elements
+ #
+ # Returns:
+ # (list): The original elements list, with elements in subtract removed
+ #
+ def subtract_elements(self, elements, subtract):
+ subtract_set = set(subtract)
+ return [
+ e for e in elements
+ if e not in subtract_set
+ ]
+
+ # track_cross_junction_filter()
+ #
+ # Filters out elements which are across junction boundaries,
+ # otherwise asserts that there are no such elements.
+ #
+ # This is currently assumed to be only relevant for element
+ # lists targetted at tracking.
+ #
+ # Args:
+ # project (Project): Project used for cross_junction filtering.
+ # All elements are expected to belong to that project.
+ # elements (list of Element): The list of elements to filter
+ # cross_junction_requested (bool): Whether the user requested
+ # cross junction tracking
+ #
+ # Returns:
+ # (list of Element): The filtered or asserted result
+ #
+ def track_cross_junction_filter(self, project, elements, cross_junction_requested):
+ # Filter out cross junctioned elements
+ if not cross_junction_requested:
+ elements = self._filter_cross_junctions(project, elements)
+ self._assert_junction_tracking(elements)
+
+ return elements
+
+ # assert_consistent()
+ #
+ # Asserts that the given list of elements are in a consistent state, that
+ # is to say that all sources are consistent and can at least be fetched.
+ #
+ # Consequently it also means that cache keys can be resolved.
+ #
+ def assert_consistent(self, elements):
+ inconsistent = []
+ inconsistent_workspaced = []
+ with self._context.timed_activity("Checking sources"):
+ for element in elements:
+ if element._get_consistency() == Consistency.INCONSISTENT:
+ if element._get_workspace():
+ inconsistent_workspaced.append(element)
+ else:
+ inconsistent.append(element)
+
+ if inconsistent:
+ detail = "Exact versions are missing for the following elements:\n\n"
+ for element in inconsistent:
+ detail += " Element: {} is inconsistent\n".format(element._get_full_name())
+ for source in element.sources():
+ if source._get_consistency() == Consistency.INCONSISTENT:
+ detail += " {} is missing ref\n".format(source)
+ detail += '\n'
+ detail += "Try tracking these elements first with `bst source track`\n"
+
+ raise PipelineError("Inconsistent pipeline", detail=detail, reason="inconsistent-pipeline")
+
+ if inconsistent_workspaced:
+ detail = "Some workspaces do not exist but are not closed\n" + \
+ "Try closing them with `bst workspace close`\n\n"
+ for element in inconsistent_workspaced:
+ detail += " " + element._get_full_name() + "\n"
+ raise PipelineError("Inconsistent pipeline", detail=detail, reason="inconsistent-pipeline-workspaced")
+
+ # assert_sources_cached()
+ #
+ # Asserts that sources for the given list of elements are cached.
+ #
+ # Args:
+ # elements (list): The list of elements
+ #
+ def assert_sources_cached(self, elements):
+ uncached = []
+ with self._context.timed_activity("Checking sources"):
+ for element in elements:
+ if element._get_consistency() < Consistency.CACHED and \
+ not element._source_cached():
+ uncached.append(element)
+
+ if uncached:
+ detail = "Sources are not cached for the following elements:\n\n"
+ for element in uncached:
+ detail += " Following sources for element: {} are not cached:\n".format(element._get_full_name())
+ for source in element.sources():
+ if source._get_consistency() < Consistency.CACHED:
+ detail += " {}\n".format(source)
+ detail += '\n'
+ detail += "Try fetching these elements first with `bst source fetch`,\n" + \
+ "or run this command with `--fetch` option\n"
+
+ raise PipelineError("Uncached sources", detail=detail, reason="uncached-sources")
+
+ #############################################################
+ # Private Methods #
+ #############################################################
+
+ # _filter_cross_junction()
+ #
+ # Filters out cross junction elements from the elements
+ #
+ # Args:
+ # project (Project): The project on which elements are allowed
+ # elements (list of Element): The list of elements to be tracked
+ #
+ # Returns:
+ # (list): A filtered list of `elements` which does
+ # not contain any cross junction elements.
+ #
+ def _filter_cross_junctions(self, project, elements):
+ return [
+ element for element in elements
+ if element._get_project() is project
+ ]
+
+ # _assert_junction_tracking()
+ #
+ # Raises an error if tracking is attempted on junctioned elements and
+ # a project.refs file is not enabled for the toplevel project.
+ #
+ # Args:
+ # elements (list of Element): The list of elements to be tracked
+ #
+ def _assert_junction_tracking(self, elements):
+
+ # We can track anything if the toplevel project uses project.refs
+ #
+ if self._project.ref_storage == ProjectRefStorage.PROJECT_REFS:
+ return
+
+ # Ideally, we would want to report every cross junction element but not
+ # their dependencies, unless those cross junction elements dependencies
+ # were also explicitly requested on the command line.
+ #
+ # But this is too hard, lets shoot for a simple error.
+ for element in elements:
+ element_project = element._get_project()
+ if element_project is not self._project:
+ detail = "Requested to track sources across junction boundaries\n" + \
+ "in a project which does not use project.refs ref-storage."
+
+ raise PipelineError("Untrackable sources", detail=detail, reason="untrackable-sources")
+
+ # _message()
+ #
+ # Local message propagator
+ #
+ def _message(self, message_type, message, **kwargs):
+ args = dict(kwargs)
+ self._context.message(
+ Message(None, message_type, message, **args))
+
+
+# _Planner()
+#
+# An internal object used for constructing build plan
+# from a given resolved toplevel element, while considering what
+# parts need to be built depending on build only dependencies
+# being cached, and depth sorting for more efficient processing.
+#
+class _Planner():
+ def __init__(self):
+ self.depth_map = OrderedDict()
+ self.visiting_elements = set()
+
+ # Here we want to traverse the same element more than once when
+ # it is reachable from multiple places, with the interest of finding
+ # the deepest occurance of every element
+ def plan_element(self, element, depth):
+ if element in self.visiting_elements:
+ # circular dependency, already being processed
+ return
+
+ prev_depth = self.depth_map.get(element)
+ if prev_depth is not None and prev_depth >= depth:
+ # element and dependencies already processed at equal or greater depth
+ return
+
+ self.visiting_elements.add(element)
+ for dep in element.dependencies(Scope.RUN, recurse=False):
+ self.plan_element(dep, depth)
+
+ # Dont try to plan builds of elements that are cached already
+ if not element._cached_success():
+ for dep in element.dependencies(Scope.BUILD, recurse=False):
+ self.plan_element(dep, depth + 1)
+
+ self.depth_map[element] = depth
+ self.visiting_elements.remove(element)
+
+ def plan(self, roots, plan_cached):
+ for root in roots:
+ self.plan_element(root, 0)
+
+ depth_sorted = sorted(self.depth_map.items(), key=itemgetter(1), reverse=True)
+ return [item[0] for item in depth_sorted if plan_cached or not item[0]._cached_success()]
diff --git a/src/buildstream/_platform/__init__.py b/src/buildstream/_platform/__init__.py
new file mode 100644
index 000000000..29a29894b
--- /dev/null
+++ b/src/buildstream/_platform/__init__.py
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+from .platform import Platform
diff --git a/src/buildstream/_platform/darwin.py b/src/buildstream/_platform/darwin.py
new file mode 100644
index 000000000..8e08685ec
--- /dev/null
+++ b/src/buildstream/_platform/darwin.py
@@ -0,0 +1,48 @@
+#
+# Copyright (C) 2017 Codethink Limited
+# Copyright (C) 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from ..sandbox import SandboxDummy
+
+from .platform import Platform
+
+
+class Darwin(Platform):
+
+ # This value comes from OPEN_MAX in syslimits.h
+ OPEN_MAX = 10240
+
+ def create_sandbox(self, *args, **kwargs):
+ kwargs['dummy_reason'] = \
+ "OSXFUSE is not supported and there are no supported sandbox " + \
+ "technologies for MacOS at this time"
+ return SandboxDummy(*args, **kwargs)
+
+ def check_sandbox_config(self, config):
+ # Accept all sandbox configs as it's irrelevant with the dummy sandbox (no Sandbox.run).
+ return True
+
+ def get_cpu_count(self, cap=None):
+ cpu_count = os.cpu_count()
+ if cap is None:
+ return cpu_count
+ else:
+ return min(cpu_count, cap)
+
+ def set_resource_limits(self, soft_limit=OPEN_MAX, hard_limit=None):
+ super().set_resource_limits(soft_limit)
diff --git a/src/buildstream/_platform/linux.py b/src/buildstream/_platform/linux.py
new file mode 100644
index 000000000..e4ce02572
--- /dev/null
+++ b/src/buildstream/_platform/linux.py
@@ -0,0 +1,150 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+import os
+import subprocess
+
+from .. import _site
+from .. import utils
+from ..sandbox import SandboxDummy
+
+from .platform import Platform
+from .._exceptions import PlatformError
+
+
+class Linux(Platform):
+
+ def __init__(self):
+
+ super().__init__()
+
+ self._uid = os.geteuid()
+ self._gid = os.getegid()
+
+ self._have_fuse = os.path.exists("/dev/fuse")
+
+ bwrap_version = _site.get_bwrap_version()
+
+ if bwrap_version is None:
+ self._bwrap_exists = False
+ self._have_good_bwrap = False
+ self._die_with_parent_available = False
+ self._json_status_available = False
+ else:
+ self._bwrap_exists = True
+ self._have_good_bwrap = (0, 1, 2) <= bwrap_version
+ self._die_with_parent_available = (0, 1, 8) <= bwrap_version
+ self._json_status_available = (0, 3, 2) <= bwrap_version
+
+ self._local_sandbox_available = self._have_fuse and self._have_good_bwrap
+
+ if self._local_sandbox_available:
+ self._user_ns_available = self._check_user_ns_available()
+ else:
+ self._user_ns_available = False
+
+ # Set linux32 option
+ self._linux32 = False
+
+ def create_sandbox(self, *args, **kwargs):
+ if not self._local_sandbox_available:
+ return self._create_dummy_sandbox(*args, **kwargs)
+ else:
+ return self._create_bwrap_sandbox(*args, **kwargs)
+
+ def check_sandbox_config(self, config):
+ if not self._local_sandbox_available:
+ # Accept all sandbox configs as it's irrelevant with the dummy sandbox (no Sandbox.run).
+ return True
+
+ if self._user_ns_available:
+ # User namespace support allows arbitrary build UID/GID settings.
+ pass
+ elif (config.build_uid != self._uid or config.build_gid != self._gid):
+ # Without user namespace support, the UID/GID in the sandbox
+ # will match the host UID/GID.
+ return False
+
+ # We can't do builds for another host or architecture except x86-32 on
+ # x86-64
+ host_os = self.get_host_os()
+ host_arch = self.get_host_arch()
+ if config.build_os != host_os:
+ raise PlatformError("Configured and host OS don't match.")
+ elif config.build_arch != host_arch:
+ # We can use linux32 for building 32bit on 64bit machines
+ if (host_os == "Linux" and
+ ((config.build_arch == "x86-32" and host_arch == "x86-64") or
+ (config.build_arch == "aarch32" and host_arch == "aarch64"))):
+ # check linux32 is available
+ try:
+ utils.get_host_tool('linux32')
+ self._linux32 = True
+ except utils.ProgramNotFoundError:
+ pass
+ else:
+ raise PlatformError("Configured architecture and host architecture don't match.")
+
+ return True
+
+ ################################################
+ # Private Methods #
+ ################################################
+
+ def _create_dummy_sandbox(self, *args, **kwargs):
+ reasons = []
+ if not self._have_fuse:
+ reasons.append("FUSE is unavailable")
+ if not self._have_good_bwrap:
+ if self._bwrap_exists:
+ reasons.append("`bwrap` is too old (bst needs at least 0.1.2)")
+ else:
+ reasons.append("`bwrap` executable not found")
+
+ kwargs['dummy_reason'] = " and ".join(reasons)
+ return SandboxDummy(*args, **kwargs)
+
+ def _create_bwrap_sandbox(self, *args, **kwargs):
+ from ..sandbox._sandboxbwrap import SandboxBwrap
+ # Inform the bubblewrap sandbox as to whether it can use user namespaces or not
+ kwargs['user_ns_available'] = self._user_ns_available
+ kwargs['die_with_parent_available'] = self._die_with_parent_available
+ kwargs['json_status_available'] = self._json_status_available
+ kwargs['linux32'] = self._linux32
+ return SandboxBwrap(*args, **kwargs)
+
+ def _check_user_ns_available(self):
+ # Here, lets check if bwrap is able to create user namespaces,
+ # issue a warning if it's not available, and save the state
+ # locally so that we can inform the sandbox to not try it
+ # later on.
+ bwrap = utils.get_host_tool('bwrap')
+ whoami = utils.get_host_tool('whoami')
+ try:
+ output = subprocess.check_output([
+ bwrap,
+ '--ro-bind', '/', '/',
+ '--unshare-user',
+ '--uid', '0', '--gid', '0',
+ whoami,
+ ], universal_newlines=True).strip()
+ except subprocess.CalledProcessError:
+ output = ''
+
+ return output == 'root'
diff --git a/src/buildstream/_platform/platform.py b/src/buildstream/_platform/platform.py
new file mode 100644
index 000000000..dba60ddca
--- /dev/null
+++ b/src/buildstream/_platform/platform.py
@@ -0,0 +1,164 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+import os
+import platform
+import sys
+import resource
+
+from .._exceptions import PlatformError, ImplError
+
+
+class Platform():
+ _instance = None
+
+ # Platform()
+ #
+ # A class to manage platform-specific details. Currently holds the
+ # sandbox factory as well as platform helpers.
+ #
+ def __init__(self):
+ self.set_resource_limits()
+
+ @classmethod
+ def _create_instance(cls):
+ # Meant for testing purposes and therefore hidden in the
+ # deepest corners of the source code. Try not to abuse this,
+ # please?
+ if os.getenv('BST_FORCE_BACKEND'):
+ backend = os.getenv('BST_FORCE_BACKEND')
+ elif sys.platform.startswith('linux'):
+ backend = 'linux'
+ elif sys.platform.startswith('darwin'):
+ backend = 'darwin'
+ else:
+ backend = 'unix'
+
+ if backend == 'linux':
+ from .linux import Linux as PlatformImpl # pylint: disable=cyclic-import
+ elif backend == 'darwin':
+ from .darwin import Darwin as PlatformImpl # pylint: disable=cyclic-import
+ elif backend == 'unix':
+ from .unix import Unix as PlatformImpl # pylint: disable=cyclic-import
+ else:
+ raise PlatformError("No such platform: '{}'".format(backend))
+
+ cls._instance = PlatformImpl()
+
+ @classmethod
+ def get_platform(cls):
+ if not cls._instance:
+ cls._create_instance()
+ return cls._instance
+
+ def get_cpu_count(self, cap=None):
+ cpu_count = len(os.sched_getaffinity(0))
+ if cap is None:
+ return cpu_count
+ else:
+ return min(cpu_count, cap)
+
+ @staticmethod
+ def get_host_os():
+ return platform.uname().system
+
+ # canonicalize_arch():
+ #
+ # This returns the canonical, OS-independent architecture name
+ # or raises a PlatformError if the architecture is unknown.
+ #
+ @staticmethod
+ def canonicalize_arch(arch):
+ # Note that these are all expected to be lowercase, as we want a
+ # case-insensitive lookup. Windows can report its arch in ALLCAPS.
+ aliases = {
+ "aarch32": "aarch32",
+ "aarch64": "aarch64",
+ "aarch64-be": "aarch64-be",
+ "amd64": "x86-64",
+ "arm": "aarch32",
+ "armv8l": "aarch64",
+ "armv8b": "aarch64-be",
+ "i386": "x86-32",
+ "i486": "x86-32",
+ "i586": "x86-32",
+ "i686": "x86-32",
+ "power-isa-be": "power-isa-be",
+ "power-isa-le": "power-isa-le",
+ "ppc64": "power-isa-be",
+ "ppc64le": "power-isa-le",
+ "sparc": "sparc-v9",
+ "sparc64": "sparc-v9",
+ "sparc-v9": "sparc-v9",
+ "x86-32": "x86-32",
+ "x86-64": "x86-64"
+ }
+
+ try:
+ return aliases[arch.replace('_', '-').lower()]
+ except KeyError:
+ raise PlatformError("Unknown architecture: {}".format(arch))
+
+ # get_host_arch():
+ #
+ # This returns the architecture of the host machine. The possible values
+ # map from uname -m in order to be a OS independent list.
+ #
+ # Returns:
+ # (string): String representing the architecture
+ @staticmethod
+ def get_host_arch():
+ # get the hardware identifier from uname
+ uname_machine = platform.uname().machine
+ return Platform.canonicalize_arch(uname_machine)
+
+ ##################################################################
+ # Sandbox functions #
+ ##################################################################
+
+ # create_sandbox():
+ #
+ # Create a build sandbox suitable for the environment
+ #
+ # Args:
+ # args (dict): The arguments to pass to the sandbox constructor
+ # kwargs (file): The keyword arguments to pass to the sandbox constructor
+ #
+ # Returns:
+ # (Sandbox) A sandbox
+ #
+ def create_sandbox(self, *args, **kwargs):
+ raise ImplError("Platform {platform} does not implement create_sandbox()"
+ .format(platform=type(self).__name__))
+
+ def check_sandbox_config(self, config):
+ raise ImplError("Platform {platform} does not implement check_sandbox_config()"
+ .format(platform=type(self).__name__))
+
+ def set_resource_limits(self, soft_limit=None, hard_limit=None):
+ # Need to set resources for _frontend/app.py as this is dependent on the platform
+ # SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox.
+ # Avoid hitting the limit too quickly.
+ limits = resource.getrlimit(resource.RLIMIT_NOFILE)
+ if limits[0] != limits[1]:
+ if soft_limit is None:
+ soft_limit = limits[1]
+ if hard_limit is None:
+ hard_limit = limits[1]
+ resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
diff --git a/src/buildstream/_platform/unix.py b/src/buildstream/_platform/unix.py
new file mode 100644
index 000000000..d04b0712c
--- /dev/null
+++ b/src/buildstream/_platform/unix.py
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+import os
+
+from .._exceptions import PlatformError
+
+from .platform import Platform
+
+
+class Unix(Platform):
+
+ def __init__(self):
+
+ super().__init__()
+
+ self._uid = os.geteuid()
+ self._gid = os.getegid()
+
+ # Not necessarily 100% reliable, but we want to fail early.
+ if self._uid != 0:
+ raise PlatformError("Root privileges are required to run without bubblewrap.")
+
+ def create_sandbox(self, *args, **kwargs):
+ from ..sandbox._sandboxchroot import SandboxChroot
+ return SandboxChroot(*args, **kwargs)
+
+ def check_sandbox_config(self, config):
+ # With the chroot sandbox, the UID/GID in the sandbox
+ # will match the host UID/GID (typically 0/0).
+ if config.build_uid != self._uid or config.build_gid != self._gid:
+ return False
+
+ # Check host os and architecture match
+ if config.build_os != self.get_host_os():
+ raise PlatformError("Configured and host OS don't match.")
+ elif config.build_arch != self.get_host_arch():
+ raise PlatformError("Configured and host architecture don't match.")
+
+ return True
diff --git a/src/buildstream/_plugincontext.py b/src/buildstream/_plugincontext.py
new file mode 100644
index 000000000..7a5407cf6
--- /dev/null
+++ b/src/buildstream/_plugincontext.py
@@ -0,0 +1,239 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+import os
+import inspect
+
+from ._exceptions import PluginError, LoadError, LoadErrorReason
+from . import utils
+from . import _yaml
+
+
+# A Context for loading plugin types
+#
+# Args:
+# plugin_base (PluginBase): The main PluginBase object to work with
+# base_type (type): A base object type for this context
+# site_plugin_path (str): Path to where buildstream keeps plugins
+# plugin_origins (list): Data used to search for plugins
+#
+# Since multiple pipelines can be processed recursively
+# within the same interpretor, it's important that we have
+# one context associated to the processing of a given pipeline,
+# this way sources and element types which are particular to
+# a given BuildStream project are isolated to their respective
+# Pipelines.
+#
+class PluginContext():
+
+ def __init__(self, plugin_base, base_type, site_plugin_path, *,
+ plugin_origins=None, dependencies=None,
+ format_versions={}):
+
+ # The plugin kinds which were loaded
+ self.loaded_dependencies = []
+
+ #
+ # Private members
+ #
+ self._dependencies = dependencies
+ self._base_type = base_type # The base class plugins derive from
+ self._types = {} # Plugin type lookup table by kind
+ self._plugin_origins = plugin_origins or []
+
+ # The PluginSource object
+ self._plugin_base = plugin_base
+ self._site_source = plugin_base.make_plugin_source(searchpath=site_plugin_path)
+ self._alternate_sources = {}
+ self._format_versions = format_versions
+
+ # lookup():
+ #
+ # Fetches a type loaded from a plugin in this plugin context
+ #
+ # Args:
+ # kind (str): The kind of Plugin to create
+ #
+ # Returns: the type associated with the given kind
+ #
+ # Raises: PluginError
+ #
+ def lookup(self, kind):
+ return self._ensure_plugin(kind)
+
+ def _get_local_plugin_source(self, path):
+ if ('local', path) not in self._alternate_sources:
+ # key by a tuple to avoid collision
+ source = self._plugin_base.make_plugin_source(searchpath=[path])
+ # Ensure that sources never get garbage collected,
+ # as they'll take the plugins with them.
+ self._alternate_sources[('local', path)] = source
+ else:
+ source = self._alternate_sources[('local', path)]
+ return source
+
+ def _get_pip_plugin_source(self, package_name, kind):
+ defaults = None
+ if ('pip', package_name) not in self._alternate_sources:
+ import pkg_resources
+ # key by a tuple to avoid collision
+ try:
+ package = pkg_resources.get_entry_info(package_name,
+ 'buildstream.plugins',
+ kind)
+ except pkg_resources.DistributionNotFound as e:
+ raise PluginError("Failed to load {} plugin '{}': {}"
+ .format(self._base_type.__name__, kind, e)) from e
+
+ if package is None:
+ raise PluginError("Pip package {} does not contain a plugin named '{}'"
+ .format(package_name, kind))
+
+ location = package.dist.get_resource_filename(
+ pkg_resources._manager,
+ package.module_name.replace('.', os.sep) + '.py'
+ )
+
+ # Also load the defaults - required since setuptools
+ # may need to extract the file.
+ try:
+ defaults = package.dist.get_resource_filename(
+ pkg_resources._manager,
+ package.module_name.replace('.', os.sep) + '.yaml'
+ )
+ except KeyError:
+ # The plugin didn't have an accompanying YAML file
+ defaults = None
+
+ source = self._plugin_base.make_plugin_source(searchpath=[os.path.dirname(location)])
+ self._alternate_sources[('pip', package_name)] = source
+
+ else:
+ source = self._alternate_sources[('pip', package_name)]
+
+ return source, defaults
+
+ def _ensure_plugin(self, kind):
+
+ if kind not in self._types:
+ # Check whether the plugin is specified in plugins
+ source = None
+ defaults = None
+ loaded_dependency = False
+
+ for origin in self._plugin_origins:
+ if kind not in _yaml.node_get(origin, list, 'plugins'):
+ continue
+
+ if _yaml.node_get(origin, str, 'origin') == 'local':
+ local_path = _yaml.node_get(origin, str, 'path')
+ source = self._get_local_plugin_source(local_path)
+ elif _yaml.node_get(origin, str, 'origin') == 'pip':
+ package_name = _yaml.node_get(origin, str, 'package-name')
+ source, defaults = self._get_pip_plugin_source(package_name, kind)
+ else:
+ raise PluginError("Failed to load plugin '{}': "
+ "Unexpected plugin origin '{}'"
+ .format(kind, _yaml.node_get(origin, str, 'origin')))
+ loaded_dependency = True
+ break
+
+ # Fall back to getting the source from site
+ if not source:
+ if kind not in self._site_source.list_plugins():
+ raise PluginError("No {} type registered for kind '{}'"
+ .format(self._base_type.__name__, kind))
+
+ source = self._site_source
+
+ self._types[kind] = self._load_plugin(source, kind, defaults)
+ if loaded_dependency:
+ self.loaded_dependencies.append(kind)
+
+ return self._types[kind]
+
+ def _load_plugin(self, source, kind, defaults):
+
+ try:
+ plugin = source.load_plugin(kind)
+
+ if not defaults:
+ plugin_file = inspect.getfile(plugin)
+ plugin_dir = os.path.dirname(plugin_file)
+ plugin_conf_name = "{}.yaml".format(kind)
+ defaults = os.path.join(plugin_dir, plugin_conf_name)
+
+ except ImportError as e:
+ raise PluginError("Failed to load {} plugin '{}': {}"
+ .format(self._base_type.__name__, kind, e)) from e
+
+ try:
+ plugin_type = plugin.setup()
+ except AttributeError as e:
+ raise PluginError("{} plugin '{}' did not provide a setup() function"
+ .format(self._base_type.__name__, kind)) from e
+ except TypeError as e:
+ raise PluginError("setup symbol in {} plugin '{}' is not a function"
+ .format(self._base_type.__name__, kind)) from e
+
+ self._assert_plugin(kind, plugin_type)
+ self._assert_version(kind, plugin_type)
+ return (plugin_type, defaults)
+
+ def _assert_plugin(self, kind, plugin_type):
+ if kind in self._types:
+ raise PluginError("Tried to register {} plugin for existing kind '{}' "
+ "(already registered {})"
+ .format(self._base_type.__name__, kind, self._types[kind].__name__))
+ try:
+ if not issubclass(plugin_type, self._base_type):
+ raise PluginError("{} plugin '{}' returned type '{}', which is not a subclass of {}"
+ .format(self._base_type.__name__, kind,
+ plugin_type.__name__,
+ self._base_type.__name__))
+ except TypeError as e:
+ raise PluginError("{} plugin '{}' returned something that is not a type (expected subclass of {})"
+ .format(self._base_type.__name__, kind,
+ self._base_type.__name__)) from e
+
+ def _assert_version(self, kind, plugin_type):
+
+ # Now assert BuildStream version
+ bst_major, bst_minor = utils.get_bst_version()
+
+ if bst_major < plugin_type.BST_REQUIRED_VERSION_MAJOR or \
+ (bst_major == plugin_type.BST_REQUIRED_VERSION_MAJOR and
+ bst_minor < plugin_type.BST_REQUIRED_VERSION_MINOR):
+ raise PluginError("BuildStream {}.{} is too old for {} plugin '{}' (requires {}.{})"
+ .format(
+ bst_major, bst_minor,
+ self._base_type.__name__, kind,
+ plugin_type.BST_REQUIRED_VERSION_MAJOR,
+ plugin_type.BST_REQUIRED_VERSION_MINOR))
+
+ # _assert_plugin_format()
+ #
+ # Helper to raise a PluginError if the loaded plugin is of a lesser version then
+ # the required version for this plugin
+ #
+ def _assert_plugin_format(self, plugin, version):
+ if plugin.BST_FORMAT_VERSION < version:
+ raise LoadError(LoadErrorReason.UNSUPPORTED_PLUGIN,
+ "{}: Format version {} is too old for requested version {}"
+ .format(plugin, plugin.BST_FORMAT_VERSION, version))
diff --git a/src/buildstream/_profile.py b/src/buildstream/_profile.py
new file mode 100644
index 000000000..b17215d0e
--- /dev/null
+++ b/src/buildstream/_profile.py
@@ -0,0 +1,160 @@
+#
+# Copyright (C) 2017 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# James Ennis <james.ennis@codethink.co.uk>
+# Benjamin Schubert <bschubert15@bloomberg.net>
+
+
+import contextlib
+import cProfile
+import pstats
+import os
+import datetime
+import time
+
+
+# Use the topic values here to decide what to profile
+# by setting them in the BST_PROFILE environment variable.
+#
+# Multiple topics can be set with the ':' separator.
+#
+# E.g.:
+#
+# BST_PROFILE=circ-dep-check:sort-deps bst <command> <args>
+#
+# The special 'all' value will enable all profiles.
+class Topics():
+ CIRCULAR_CHECK = 'circ-dep-check'
+ SORT_DEPENDENCIES = 'sort-deps'
+ LOAD_CONTEXT = 'load-context'
+ LOAD_PROJECT = 'load-project'
+ LOAD_PIPELINE = 'load-pipeline'
+ LOAD_SELECTION = 'load-selection'
+ SCHEDULER = 'scheduler'
+ ALL = 'all'
+
+
+class _Profile:
+ def __init__(self, key, message):
+ self.profiler = cProfile.Profile()
+ self._additional_pstats_files = []
+
+ self.key = key
+ self.message = message
+
+ self.start_time = time.time()
+ filename_template = os.path.join(
+ os.getcwd(),
+ "profile-{}-{}".format(
+ datetime.datetime.fromtimestamp(self.start_time).strftime("%Y%m%dT%H%M%S"),
+ self.key.replace("/", "-").replace(".", "-")
+ )
+ )
+ self.log_filename = "{}.log".format(filename_template)
+ self.cprofile_filename = "{}.cprofile".format(filename_template)
+
+ def __enter__(self):
+ self.start()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.stop()
+ self.save()
+
+ def merge(self, profile):
+ self._additional_pstats_files.append(profile.cprofile_filename)
+
+ def start(self):
+ self.profiler.enable()
+
+ def stop(self):
+ self.profiler.disable()
+
+ def save(self):
+ heading = "\n".join([
+ "-" * 64,
+ "Profile for key: {}".format(self.key),
+ "Started at: {}".format(self.start_time),
+ "\n\t{}".format(self.message) if self.message else "",
+ "-" * 64,
+ "" # for a final new line
+ ])
+
+ with open(self.log_filename, "a") as fp:
+ stats = pstats.Stats(self.profiler, *self._additional_pstats_files, stream=fp)
+
+ # Create the log file
+ fp.write(heading)
+ stats.sort_stats("cumulative")
+ stats.print_stats()
+
+ # Dump the cprofile
+ stats.dump_stats(self.cprofile_filename)
+
+
+class _Profiler:
+ def __init__(self, settings):
+ self.active_topics = set()
+ self.enabled_topics = set()
+ self._active_profilers = []
+
+ if settings:
+ self.enabled_topics = {
+ topic
+ for topic in settings.split(":")
+ }
+
+ @contextlib.contextmanager
+ def profile(self, topic, key, message=None):
+ if not self._is_profile_enabled(topic):
+ yield
+ return
+
+ if self._active_profilers:
+ # we are in a nested profiler, stop the parent
+ self._active_profilers[-1].stop()
+
+ key = "{}-{}".format(topic, key)
+
+ assert key not in self.active_topics
+ self.active_topics.add(key)
+
+ profiler = _Profile(key, message)
+ self._active_profilers.append(profiler)
+
+ with profiler:
+ yield
+
+ self.active_topics.remove(key)
+
+ # Remove the last profiler from the list
+ self._active_profilers.pop()
+
+ if self._active_profilers:
+ # We were in a previous profiler, add the previous results to it
+ # and reenable it.
+ parent_profiler = self._active_profilers[-1]
+ parent_profiler.merge(profiler)
+ parent_profiler.start()
+
+ def _is_profile_enabled(self, topic):
+ return topic in self.enabled_topics or Topics.ALL in self.enabled_topics
+
+
+# Export a profiler to be used by BuildStream
+PROFILER = _Profiler(os.getenv("BST_PROFILE"))
diff --git a/src/buildstream/_project.py b/src/buildstream/_project.py
new file mode 100644
index 000000000..c40321c66
--- /dev/null
+++ b/src/buildstream/_project.py
@@ -0,0 +1,975 @@
+#
+# Copyright (C) 2016-2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Tiago Gomes <tiago.gomes@codethink.co.uk>
+
+import os
+import sys
+from collections import OrderedDict
+from collections.abc import Mapping
+from pathlib import Path
+from pluginbase import PluginBase
+from . import utils
+from . import _cachekey
+from . import _site
+from . import _yaml
+from ._artifactelement import ArtifactElement
+from ._profile import Topics, PROFILER
+from ._exceptions import LoadError, LoadErrorReason
+from ._options import OptionPool
+from ._artifactcache import ArtifactCache
+from ._sourcecache import SourceCache
+from .sandbox import SandboxRemote
+from ._elementfactory import ElementFactory
+from ._sourcefactory import SourceFactory
+from .types import CoreWarnings
+from ._projectrefs import ProjectRefs, ProjectRefStorage
+from ._versions import BST_FORMAT_VERSION
+from ._loader import Loader
+from .element import Element
+from ._message import Message, MessageType
+from ._includes import Includes
+from ._platform import Platform
+from ._workspaces import WORKSPACE_PROJECT_FILE
+
+
+# Project Configuration file
+_PROJECT_CONF_FILE = 'project.conf'
+
+
+# HostMount()
+#
+# A simple object describing the behavior of
+# a host mount.
+#
+class HostMount():
+
+ def __init__(self, path, host_path=None, optional=False):
+
+ # Support environment variable expansion in host mounts
+ path = os.path.expandvars(path)
+ if host_path is not None:
+ host_path = os.path.expandvars(host_path)
+
+ self.path = path # Path inside the sandbox
+ self.host_path = host_path # Path on the host
+ self.optional = optional # Optional mounts do not incur warnings or errors
+
+ if self.host_path is None:
+ self.host_path = self.path
+
+
+# Represents project configuration that can have different values for junctions.
+class ProjectConfig:
+ def __init__(self):
+ self.element_factory = None
+ self.source_factory = None
+ self.options = None # OptionPool
+ self.base_variables = {} # The base set of variables
+ self.element_overrides = {} # Element specific configurations
+ self.source_overrides = {} # Source specific configurations
+ self.mirrors = OrderedDict() # contains dicts of alias-mappings to URIs.
+ self.default_mirror = None # The name of the preferred mirror.
+ self._aliases = {} # Aliases dictionary
+
+
+# Project()
+#
+# The Project Configuration
+#
+class Project():
+
+ def __init__(self, directory, context, *, junction=None, cli_options=None,
+ default_mirror=None, parent_loader=None,
+ search_for_project=True):
+
+ # The project name
+ self.name = None
+
+ self._context = context # The invocation Context, a private member
+
+ if search_for_project:
+ self.directory, self._invoked_from_workspace_element = self._find_project_dir(directory)
+ else:
+ self.directory = directory
+ self._invoked_from_workspace_element = None
+
+ self._absolute_directory_path = Path(self.directory).resolve()
+
+ # Absolute path to where elements are loaded from within the project
+ self.element_path = None
+
+ # Default target elements
+ self._default_targets = None
+
+ # ProjectRefs for the main refs and also for junctions
+ self.refs = ProjectRefs(self.directory, 'project.refs')
+ self.junction_refs = ProjectRefs(self.directory, 'junction.refs')
+
+ self.config = ProjectConfig()
+ self.first_pass_config = ProjectConfig()
+
+ self.junction = junction # The junction Element object, if this is a subproject
+
+ self.ref_storage = None # ProjectRefStorage setting
+ self.base_environment = {} # The base set of environment variables
+ self.base_env_nocache = None # The base nocache mask (list) for the environment
+
+ #
+ # Private Members
+ #
+
+ self._default_mirror = default_mirror # The name of the preferred mirror.
+
+ self._cli_options = cli_options
+ self._cache_key = None
+
+ self._fatal_warnings = [] # A list of warnings which should trigger an error
+
+ self._shell_command = [] # The default interactive shell command
+ self._shell_environment = {} # Statically set environment vars
+ self._shell_host_files = [] # A list of HostMount objects
+
+ self.artifact_cache_specs = None
+ self.source_cache_specs = None
+ self.remote_execution_specs = None
+ self._sandbox = None
+ self._splits = None
+
+ self._context.add_project(self)
+
+ self._partially_loaded = False
+ self._fully_loaded = False
+ self._project_includes = None
+
+ with PROFILER.profile(Topics.LOAD_PROJECT, self.directory.replace(os.sep, '-')):
+ self._load(parent_loader=parent_loader)
+
+ self._partially_loaded = True
+
+ @property
+ def options(self):
+ return self.config.options
+
+ @property
+ def base_variables(self):
+ return self.config.base_variables
+
+ @property
+ def element_overrides(self):
+ return self.config.element_overrides
+
+ @property
+ def source_overrides(self):
+ return self.config.source_overrides
+
+ # translate_url():
+ #
+ # Translates the given url which may be specified with an alias
+ # into a fully qualified url.
+ #
+ # Args:
+ # url (str): A url, which may be using an alias
+ # first_pass (bool): Whether to use first pass configuration (for junctions)
+ #
+ # Returns:
+ # str: The fully qualified url, with aliases resolved
+ #
+ # This method is provided for :class:`.Source` objects to resolve
+ # fully qualified urls based on the shorthand which is allowed
+ # to be specified in the YAML
+ def translate_url(self, url, *, first_pass=False):
+ if first_pass:
+ config = self.first_pass_config
+ else:
+ config = self.config
+
+ if url and utils._ALIAS_SEPARATOR in url:
+ url_alias, url_body = url.split(utils._ALIAS_SEPARATOR, 1)
+ alias_url = _yaml.node_get(config._aliases, str, url_alias, default_value=None)
+ if alias_url:
+ url = alias_url + url_body
+
+ return url
+
+ # get_shell_config()
+ #
+ # Gets the project specified shell configuration
+ #
+ # Returns:
+ # (list): The shell command
+ # (dict): The shell environment
+ # (list): The list of HostMount objects
+ #
+ def get_shell_config(self):
+ return (self._shell_command, self._shell_environment, self._shell_host_files)
+
+ # get_cache_key():
+ #
+ # Returns the cache key, calculating it if necessary
+ #
+ # Returns:
+ # (str): A hex digest cache key for the Context
+ #
+ def get_cache_key(self):
+ if self._cache_key is None:
+
+ # Anything that alters the build goes into the unique key
+ # (currently nothing here)
+ self._cache_key = _cachekey.generate_key(_yaml.new_empty_node())
+
+ return self._cache_key
+
+ # get_path_from_node()
+ #
+ # Fetches the project path from a dictionary node and validates it
+ #
+ # Paths are asserted to never lead to a directory outside of the project
+ # directory. In addition, paths can not point to symbolic links, fifos,
+ # sockets and block/character devices.
+ #
+ # The `check_is_file` and `check_is_dir` parameters can be used to
+ # perform additional validations on the path. Note that an exception
+ # will always be raised if both parameters are set to ``True``.
+ #
+ # Args:
+ # node (dict): A dictionary loaded from YAML
+ # key (str): The key whose value contains a path to validate
+ # check_is_file (bool): If ``True`` an error will also be raised
+ # if path does not point to a regular file.
+ # Defaults to ``False``
+ # check_is_dir (bool): If ``True`` an error will be also raised
+ # if path does not point to a directory.
+ # Defaults to ``False``
+ # Returns:
+ # (str): The project path
+ #
+ # Raises:
+ # (LoadError): In case that the project path is not valid or does not
+ # exist
+ #
+ def get_path_from_node(self, node, key, *,
+ check_is_file=False, check_is_dir=False):
+ path_str = _yaml.node_get(node, str, key)
+ path = Path(path_str)
+ full_path = self._absolute_directory_path / path
+
+ provenance = _yaml.node_get_provenance(node, key=key)
+
+ if full_path.is_symlink():
+ raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND,
+ "{}: Specified path '{}' must not point to "
+ "symbolic links "
+ .format(provenance, path_str))
+
+ if path.parts and path.parts[0] == '..':
+ raise LoadError(LoadErrorReason.PROJ_PATH_INVALID,
+ "{}: Specified path '{}' first component must "
+ "not be '..'"
+ .format(provenance, path_str))
+
+ try:
+ if sys.version_info[0] == 3 and sys.version_info[1] < 6:
+ full_resolved_path = full_path.resolve()
+ else:
+ full_resolved_path = full_path.resolve(strict=True) # pylint: disable=unexpected-keyword-arg
+ except FileNotFoundError:
+ raise LoadError(LoadErrorReason.MISSING_FILE,
+ "{}: Specified path '{}' does not exist"
+ .format(provenance, path_str))
+
+ is_inside = self._absolute_directory_path in full_resolved_path.parents or (
+ full_resolved_path == self._absolute_directory_path)
+
+ if not is_inside:
+ raise LoadError(LoadErrorReason.PROJ_PATH_INVALID,
+ "{}: Specified path '{}' must not lead outside of the "
+ "project directory"
+ .format(provenance, path_str))
+
+ if path.is_absolute():
+ raise LoadError(LoadErrorReason.PROJ_PATH_INVALID,
+ "{}: Absolute path: '{}' invalid.\n"
+ "Please specify a path relative to the project's root."
+ .format(provenance, path))
+
+ if full_resolved_path.is_socket() or (
+ full_resolved_path.is_fifo() or
+ full_resolved_path.is_block_device()):
+ raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND,
+ "{}: Specified path '{}' points to an unsupported "
+ "file kind"
+ .format(provenance, path_str))
+
+ if check_is_file and not full_resolved_path.is_file():
+ raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND,
+ "{}: Specified path '{}' is not a regular file"
+ .format(provenance, path_str))
+
+ if check_is_dir and not full_resolved_path.is_dir():
+ raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND,
+ "{}: Specified path '{}' is not a directory"
+ .format(provenance, path_str))
+
+ return path_str
+
+ def _validate_node(self, node):
+ _yaml.node_validate(node, [
+ 'format-version',
+ 'element-path', 'variables',
+ 'environment', 'environment-nocache',
+ 'split-rules', 'elements', 'plugins',
+ 'aliases', 'name', 'defaults',
+ 'artifacts', 'options',
+ 'fail-on-overlap', 'shell', 'fatal-warnings',
+ 'ref-storage', 'sandbox', 'mirrors', 'remote-execution',
+ 'sources', 'source-caches', '(@)'
+ ])
+
+ # create_element()
+ #
+ # Instantiate and return an element
+ #
+ # Args:
+ # meta (MetaElement): The loaded MetaElement
+ # first_pass (bool): Whether to use first pass configuration (for junctions)
+ #
+ # Returns:
+ # (Element): A newly created Element object of the appropriate kind
+ #
+ def create_element(self, meta, *, first_pass=False):
+ if first_pass:
+ return self.first_pass_config.element_factory.create(self._context, self, meta)
+ else:
+ return self.config.element_factory.create(self._context, self, meta)
+
+ # create_artifact_element()
+ #
+ # Instantiate and return an ArtifactElement
+ #
+ # Args:
+ # ref (str): A string of the artifact ref
+ #
+ # Returns:
+ # (ArtifactElement): A newly created ArtifactElement object of the appropriate kind
+ #
+ def create_artifact_element(self, ref):
+ return ArtifactElement(self._context, ref)
+
+ # create_source()
+ #
+ # Instantiate and return a Source
+ #
+ # Args:
+ # meta (MetaSource): The loaded MetaSource
+ # first_pass (bool): Whether to use first pass configuration (for junctions)
+ #
+ # Returns:
+ # (Source): A newly created Source object of the appropriate kind
+ #
+ def create_source(self, meta, *, first_pass=False):
+ if first_pass:
+ return self.first_pass_config.source_factory.create(self._context, self, meta)
+ else:
+ return self.config.source_factory.create(self._context, self, meta)
+
+ # get_alias_uri()
+ #
+ # Returns the URI for a given alias, if it exists
+ #
+ # Args:
+ # alias (str): The alias.
+ # first_pass (bool): Whether to use first pass configuration (for junctions)
+ #
+ # Returns:
+ # str: The URI for the given alias; or None: if there is no URI for
+ # that alias.
+ def get_alias_uri(self, alias, *, first_pass=False):
+ if first_pass:
+ config = self.first_pass_config
+ else:
+ config = self.config
+
+ return _yaml.node_get(config._aliases, str, alias, default_value=None)
+
+ # get_alias_uris()
+ #
+ # Args:
+ # alias (str): The alias.
+ # first_pass (bool): Whether to use first pass configuration (for junctions)
+ #
+ # Returns a list of every URI to replace an alias with
+ def get_alias_uris(self, alias, *, first_pass=False):
+ if first_pass:
+ config = self.first_pass_config
+ else:
+ config = self.config
+
+ if not alias or alias not in config._aliases:
+ return [None]
+
+ mirror_list = []
+ for key, alias_mapping in config.mirrors.items():
+ if alias in alias_mapping:
+ if key == config.default_mirror:
+ mirror_list = alias_mapping[alias] + mirror_list
+ else:
+ mirror_list += alias_mapping[alias]
+ mirror_list.append(_yaml.node_get(config._aliases, str, alias))
+ return mirror_list
+
+ # load_elements()
+ #
+ # Loads elements from target names.
+ #
+ # Args:
+ # targets (list): Target names
+ # rewritable (bool): Whether the loaded files should be rewritable
+ # this is a bit more expensive due to deep copies
+ # fetch_subprojects (bool): Whether we should fetch subprojects as a part of the
+ # loading process, if they are not yet locally cached
+ #
+ # Returns:
+ # (list): A list of loaded Element
+ #
+ def load_elements(self, targets, *,
+ rewritable=False, fetch_subprojects=False):
+ with self._context.timed_activity("Loading elements", silent_nested=True):
+ meta_elements = self.loader.load(targets, rewritable=rewritable,
+ ticker=None,
+ fetch_subprojects=fetch_subprojects)
+
+ with self._context.timed_activity("Resolving elements"):
+ elements = [
+ Element._new_from_meta(meta)
+ for meta in meta_elements
+ ]
+
+ Element._clear_meta_elements_cache()
+
+ # Now warn about any redundant source references which may have
+ # been discovered in the resolve() phase.
+ redundant_refs = Element._get_redundant_source_refs()
+ if redundant_refs:
+ detail = "The following inline specified source references will be ignored:\n\n"
+ lines = [
+ "{}:{}".format(source._get_provenance(), ref)
+ for source, ref in redundant_refs
+ ]
+ detail += "\n".join(lines)
+ self._context.message(
+ Message(None, MessageType.WARN, "Ignoring redundant source references", detail=detail))
+
+ return elements
+
+ # ensure_fully_loaded()
+ #
+ # Ensure project has finished loading. At first initialization, a
+ # project can only load junction elements. Other elements require
+ # project to be fully loaded.
+ #
+ def ensure_fully_loaded(self):
+ if self._fully_loaded:
+ return
+ assert self._partially_loaded
+ self._fully_loaded = True
+
+ if self.junction:
+ self.junction._get_project().ensure_fully_loaded()
+
+ self._load_second_pass()
+
+ # invoked_from_workspace_element()
+ #
+ # Returns the element whose workspace was used to invoke buildstream
+ # if buildstream was invoked from an external workspace
+ #
+ def invoked_from_workspace_element(self):
+ return self._invoked_from_workspace_element
+
+ # cleanup()
+ #
+ # Cleans up resources used loading elements
+ #
+ def cleanup(self):
+ # Reset the element loader state
+ Element._reset_load_state()
+
+ # get_default_target()
+ #
+ # Attempts to interpret which element the user intended to run a command on.
+ # This is for commands that only accept a single target element and thus,
+ # this only uses the workspace element (if invoked from workspace directory)
+ # and does not use the project default targets.
+ #
+ def get_default_target(self):
+ return self._invoked_from_workspace_element
+
+ # get_default_targets()
+ #
+ # Attempts to interpret which elements the user intended to run a command on.
+ # This is for commands that accept multiple target elements.
+ #
+ def get_default_targets(self):
+
+ # If _invoked_from_workspace_element has a value,
+ # a workspace element was found before a project config
+ # Therefore the workspace does not contain a project
+ if self._invoked_from_workspace_element:
+ return (self._invoked_from_workspace_element,)
+
+ # Default targets from project configuration
+ if self._default_targets:
+ return tuple(self._default_targets)
+
+ # If default targets are not configured, default to all project elements
+ default_targets = []
+ for root, dirs, files in os.walk(self.element_path):
+ # Do not recurse down the ".bst" directory which is where we stage
+ # junctions and other BuildStream internals.
+ if ".bst" in dirs:
+ dirs.remove(".bst")
+ for file in files:
+ if file.endswith(".bst"):
+ rel_dir = os.path.relpath(root, self.element_path)
+ rel_file = os.path.join(rel_dir, file).lstrip("./")
+ default_targets.append(rel_file)
+
+ return tuple(default_targets)
+
+ # _load():
+ #
+ # Loads the project configuration file in the project
+ # directory process the first pass.
+ #
+ # Raises: LoadError if there was a problem with the project.conf
+ #
+ def _load(self, parent_loader=None):
+
+ # Load builtin default
+ projectfile = os.path.join(self.directory, _PROJECT_CONF_FILE)
+ self._default_config_node = _yaml.load(_site.default_project_config)
+
+ # Load project local config and override the builtin
+ try:
+ self._project_conf = _yaml.load(projectfile)
+ except LoadError as e:
+ # Raise a more specific error here
+ if e.reason == LoadErrorReason.MISSING_FILE:
+ raise LoadError(LoadErrorReason.MISSING_PROJECT_CONF, str(e)) from e
+ else:
+ raise
+
+ pre_config_node = _yaml.node_copy(self._default_config_node)
+ _yaml.composite(pre_config_node, self._project_conf)
+
+ # Assert project's format version early, before validating toplevel keys
+ format_version = _yaml.node_get(pre_config_node, int, 'format-version')
+ if BST_FORMAT_VERSION < format_version:
+ major, minor = utils.get_bst_version()
+ raise LoadError(
+ LoadErrorReason.UNSUPPORTED_PROJECT,
+ "Project requested format version {}, but BuildStream {}.{} only supports up until format version {}"
+ .format(format_version, major, minor, BST_FORMAT_VERSION))
+
+ self._validate_node(pre_config_node)
+
+ # The project name, element path and option declarations
+ # are constant and cannot be overridden by option conditional statements
+ self.name = _yaml.node_get(self._project_conf, str, 'name')
+
+ # Validate that project name is a valid symbol name
+ _yaml.assert_symbol_name(_yaml.node_get_provenance(pre_config_node, 'name'),
+ self.name, "project name")
+
+ self.element_path = os.path.join(
+ self.directory,
+ self.get_path_from_node(pre_config_node, 'element-path',
+ check_is_dir=True)
+ )
+
+ self.config.options = OptionPool(self.element_path)
+ self.first_pass_config.options = OptionPool(self.element_path)
+
+ defaults = _yaml.node_get(pre_config_node, Mapping, 'defaults')
+ _yaml.node_validate(defaults, ['targets'])
+ self._default_targets = _yaml.node_get(defaults, list, "targets")
+
+ # Fatal warnings
+ self._fatal_warnings = _yaml.node_get(pre_config_node, list, 'fatal-warnings', default_value=[])
+
+ self.loader = Loader(self._context, self,
+ parent=parent_loader)
+
+ self._project_includes = Includes(self.loader, copy_tree=False)
+
+ project_conf_first_pass = _yaml.node_copy(self._project_conf)
+ self._project_includes.process(project_conf_first_pass, only_local=True)
+ config_no_include = _yaml.node_copy(self._default_config_node)
+ _yaml.composite(config_no_include, project_conf_first_pass)
+
+ self._load_pass(config_no_include, self.first_pass_config,
+ ignore_unknown=True)
+
+ # Use separate file for storing source references
+ self.ref_storage = _yaml.node_get(pre_config_node, str, 'ref-storage')
+ if self.ref_storage not in [ProjectRefStorage.INLINE, ProjectRefStorage.PROJECT_REFS]:
+ p = _yaml.node_get_provenance(pre_config_node, 'ref-storage')
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Invalid value '{}' specified for ref-storage"
+ .format(p, self.ref_storage))
+
+ if self.ref_storage == ProjectRefStorage.PROJECT_REFS:
+ self.junction_refs.load(self.first_pass_config.options)
+
+ # _load_second_pass()
+ #
+ # Process the second pass of loading the project configuration.
+ #
+ def _load_second_pass(self):
+ project_conf_second_pass = _yaml.node_copy(self._project_conf)
+ self._project_includes.process(project_conf_second_pass)
+ config = _yaml.node_copy(self._default_config_node)
+ _yaml.composite(config, project_conf_second_pass)
+
+ self._load_pass(config, self.config)
+
+ self._validate_node(config)
+
+ #
+ # Now all YAML composition is done, from here on we just load
+ # the values from our loaded configuration dictionary.
+ #
+
+ # Load artifacts pull/push configuration for this project
+ project_specs = ArtifactCache.specs_from_config_node(config, self.directory)
+ override_specs = ArtifactCache.specs_from_config_node(
+ self._context.get_overrides(self.name), self.directory)
+
+ self.artifact_cache_specs = override_specs + project_specs
+
+ if self.junction:
+ parent = self.junction._get_project()
+ self.artifact_cache_specs = parent.artifact_cache_specs + self.artifact_cache_specs
+
+ # Load source caches with pull/push config
+ self.source_cache_specs = SourceCache.specs_from_config_node(config, self.directory)
+
+ # Load remote-execution configuration for this project
+ project_specs = SandboxRemote.specs_from_config_node(config, self.directory)
+ override_specs = SandboxRemote.specs_from_config_node(
+ self._context.get_overrides(self.name), self.directory)
+
+ if override_specs is not None:
+ self.remote_execution_specs = override_specs
+ elif project_specs is not None:
+ self.remote_execution_specs = project_specs
+ else:
+ self.remote_execution_specs = self._context.remote_execution_specs
+
+ # Load sandbox environment variables
+ self.base_environment = _yaml.node_get(config, Mapping, 'environment')
+ self.base_env_nocache = _yaml.node_get(config, list, 'environment-nocache')
+
+ # Load sandbox configuration
+ self._sandbox = _yaml.node_get(config, Mapping, 'sandbox')
+
+ # Load project split rules
+ self._splits = _yaml.node_get(config, Mapping, 'split-rules')
+
+ # Support backwards compatibility for fail-on-overlap
+ fail_on_overlap = _yaml.node_get(config, bool, 'fail-on-overlap', default_value=None)
+
+ if (CoreWarnings.OVERLAPS not in self._fatal_warnings) and fail_on_overlap:
+ self._fatal_warnings.append(CoreWarnings.OVERLAPS)
+
+ # Deprecation check
+ if fail_on_overlap is not None:
+ self._context.message(
+ Message(
+ None,
+ MessageType.WARN,
+ "Use of fail-on-overlap within project.conf " +
+ "is deprecated. Consider using fatal-warnings instead."
+ )
+ )
+
+ # Load project.refs if it exists, this may be ignored.
+ if self.ref_storage == ProjectRefStorage.PROJECT_REFS:
+ self.refs.load(self.options)
+
+ # Parse shell options
+ shell_options = _yaml.node_get(config, Mapping, 'shell')
+ _yaml.node_validate(shell_options, ['command', 'environment', 'host-files'])
+ self._shell_command = _yaml.node_get(shell_options, list, 'command')
+
+ # Perform environment expansion right away
+ shell_environment = _yaml.node_get(shell_options, Mapping, 'environment', default_value={})
+ for key in _yaml.node_keys(shell_environment):
+ value = _yaml.node_get(shell_environment, str, key)
+ self._shell_environment[key] = os.path.expandvars(value)
+
+ # Host files is parsed as a list for convenience
+ host_files = _yaml.node_get(shell_options, list, 'host-files', default_value=[])
+ for host_file in host_files:
+ if isinstance(host_file, str):
+ mount = HostMount(host_file)
+ else:
+ # Some validation
+ index = host_files.index(host_file)
+ host_file_desc = _yaml.node_get(shell_options, Mapping, 'host-files', indices=[index])
+ _yaml.node_validate(host_file_desc, ['path', 'host_path', 'optional'])
+
+ # Parse the host mount
+ path = _yaml.node_get(host_file_desc, str, 'path')
+ host_path = _yaml.node_get(host_file_desc, str, 'host_path', default_value=None)
+ optional = _yaml.node_get(host_file_desc, bool, 'optional', default_value=False)
+ mount = HostMount(path, host_path, optional)
+
+ self._shell_host_files.append(mount)
+
+ # _load_pass():
+ #
+ # Loads parts of the project configuration that are different
+ # for first and second pass configurations.
+ #
+ # Args:
+ # config (dict) - YaML node of the configuration file.
+ # output (ProjectConfig) - ProjectConfig to load configuration onto.
+ # ignore_unknown (bool) - Whether option loader shoud ignore unknown options.
+ #
+ def _load_pass(self, config, output, *,
+ ignore_unknown=False):
+
+ # Element and Source type configurations will be composited later onto
+ # element/source types, so we delete it from here and run our final
+ # assertion after.
+ output.element_overrides = _yaml.node_get(config, Mapping, 'elements', default_value={})
+ output.source_overrides = _yaml.node_get(config, Mapping, 'sources', default_value={})
+ _yaml.node_del(config, 'elements', safe=True)
+ _yaml.node_del(config, 'sources', safe=True)
+ _yaml.node_final_assertions(config)
+
+ self._load_plugin_factories(config, output)
+
+ # Load project options
+ options_node = _yaml.node_get(config, Mapping, 'options', default_value={})
+ output.options.load(options_node)
+ if self.junction:
+ # load before user configuration
+ output.options.load_yaml_values(self.junction.options, transform=self.junction._subst_string)
+
+ # Collect option values specified in the user configuration
+ overrides = self._context.get_overrides(self.name)
+ override_options = _yaml.node_get(overrides, Mapping, 'options', default_value={})
+ output.options.load_yaml_values(override_options)
+ if self._cli_options:
+ output.options.load_cli_values(self._cli_options, ignore_unknown=ignore_unknown)
+
+ # We're done modifying options, now we can use them for substitutions
+ output.options.resolve()
+
+ #
+ # Now resolve any conditionals in the remaining configuration,
+ # any conditionals specified for project option declarations,
+ # or conditionally specifying the project name; will be ignored.
+ #
+ # Don't forget to also resolve options in the element and source overrides.
+ output.options.process_node(config)
+ output.options.process_node(output.element_overrides)
+ output.options.process_node(output.source_overrides)
+
+ # Load base variables
+ output.base_variables = _yaml.node_get(config, Mapping, 'variables')
+
+ # Add the project name as a default variable
+ _yaml.node_set(output.base_variables, 'project-name', self.name)
+
+ # Extend variables with automatic variables and option exports
+ # Initialize it as a string as all variables are processed as strings.
+ # Based on some testing (mainly on AWS), maximum effective
+ # max-jobs value seems to be around 8-10 if we have enough cores
+ # users should set values based on workload and build infrastructure
+ platform = Platform.get_platform()
+ _yaml.node_set(output.base_variables, 'max-jobs', str(platform.get_cpu_count(8)))
+
+ # Export options into variables, if that was requested
+ output.options.export_variables(output.base_variables)
+
+ # Override default_mirror if not set by command-line
+ output.default_mirror = self._default_mirror or _yaml.node_get(overrides, str,
+ 'default-mirror', default_value=None)
+
+ mirrors = _yaml.node_get(config, list, 'mirrors', default_value=[])
+ for mirror in mirrors:
+ allowed_mirror_fields = [
+ 'name', 'aliases'
+ ]
+ _yaml.node_validate(mirror, allowed_mirror_fields)
+ mirror_name = _yaml.node_get(mirror, str, 'name')
+ alias_mappings = {}
+ for alias_mapping, uris in _yaml.node_items(_yaml.node_get(mirror, Mapping, 'aliases')):
+ assert isinstance(uris, list)
+ alias_mappings[alias_mapping] = list(uris)
+ output.mirrors[mirror_name] = alias_mappings
+ if not output.default_mirror:
+ output.default_mirror = mirror_name
+
+ # Source url aliases
+ output._aliases = _yaml.node_get(config, Mapping, 'aliases', default_value={})
+
+ # _find_project_dir()
+ #
+ # Returns path of the project directory, if a configuration file is found
+ # in given directory or any of its parent directories.
+ #
+ # Args:
+ # directory (str) - directory from where the command was invoked
+ #
+ # Raises:
+ # LoadError if project.conf is not found
+ #
+ # Returns:
+ # (str) - the directory that contains the project, and
+ # (str) - the name of the element required to find the project, or None
+ #
+ def _find_project_dir(self, directory):
+ workspace_element = None
+ config_filenames = [_PROJECT_CONF_FILE, WORKSPACE_PROJECT_FILE]
+ found_directory, filename = utils._search_upward_for_files(
+ directory, config_filenames
+ )
+ if filename == _PROJECT_CONF_FILE:
+ project_directory = found_directory
+ elif filename == WORKSPACE_PROJECT_FILE:
+ workspace_project_cache = self._context.get_workspace_project_cache()
+ workspace_project = workspace_project_cache.get(found_directory)
+ if workspace_project:
+ project_directory = workspace_project.get_default_project_path()
+ workspace_element = workspace_project.get_default_element()
+ else:
+ raise LoadError(
+ LoadErrorReason.MISSING_PROJECT_CONF,
+ "None of {names} found in '{path}' or any of its parent directories"
+ .format(names=config_filenames, path=directory))
+
+ return project_directory, workspace_element
+
+ def _load_plugin_factories(self, config, output):
+ plugin_source_origins = [] # Origins of custom sources
+ plugin_element_origins = [] # Origins of custom elements
+
+ # Plugin origins and versions
+ origins = _yaml.node_get(config, list, 'plugins', default_value=[])
+ source_format_versions = {}
+ element_format_versions = {}
+ for origin in origins:
+ allowed_origin_fields = [
+ 'origin', 'sources', 'elements',
+ 'package-name', 'path',
+ ]
+ allowed_origins = ['core', 'local', 'pip']
+ _yaml.node_validate(origin, allowed_origin_fields)
+
+ origin_value = _yaml.node_get(origin, str, 'origin')
+ if origin_value not in allowed_origins:
+ raise LoadError(
+ LoadErrorReason.INVALID_YAML,
+ "Origin '{}' is not one of the allowed types"
+ .format(origin_value))
+
+ # Store source versions for checking later
+ source_versions = _yaml.node_get(origin, Mapping, 'sources', default_value={})
+ for key in _yaml.node_keys(source_versions):
+ if key in source_format_versions:
+ raise LoadError(
+ LoadErrorReason.INVALID_YAML,
+ "Duplicate listing of source '{}'".format(key))
+ source_format_versions[key] = _yaml.node_get(source_versions, int, key)
+
+ # Store element versions for checking later
+ element_versions = _yaml.node_get(origin, Mapping, 'elements', default_value={})
+ for key in _yaml.node_keys(element_versions):
+ if key in element_format_versions:
+ raise LoadError(
+ LoadErrorReason.INVALID_YAML,
+ "Duplicate listing of element '{}'".format(key))
+ element_format_versions[key] = _yaml.node_get(element_versions, int, key)
+
+ # Store the origins if they're not 'core'.
+ # core elements are loaded by default, so storing is unnecessary.
+ if _yaml.node_get(origin, str, 'origin') != 'core':
+ self._store_origin(origin, 'sources', plugin_source_origins)
+ self._store_origin(origin, 'elements', plugin_element_origins)
+
+ pluginbase = PluginBase(package='buildstream.plugins')
+ output.element_factory = ElementFactory(pluginbase,
+ plugin_origins=plugin_element_origins,
+ format_versions=element_format_versions)
+ output.source_factory = SourceFactory(pluginbase,
+ plugin_origins=plugin_source_origins,
+ format_versions=source_format_versions)
+
+ # _store_origin()
+ #
+ # Helper function to store plugin origins
+ #
+ # Args:
+ # origin (node) - a node indicating the origin of a group of
+ # plugins.
+ # plugin_group (str) - The name of the type of plugin that is being
+ # loaded
+ # destination (list) - A list of nodes to store the origins in
+ #
+ # Raises:
+ # LoadError if 'origin' is an unexpected value
+ def _store_origin(self, origin, plugin_group, destination):
+ expected_groups = ['sources', 'elements']
+ if plugin_group not in expected_groups:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "Unexpected plugin group: {}, expecting {}"
+ .format(plugin_group, expected_groups))
+ node_keys = [key for key in _yaml.node_keys(origin)]
+ if plugin_group in node_keys:
+ origin_node = _yaml.node_copy(origin)
+ plugins = _yaml.node_get(origin, Mapping, plugin_group, default_value={})
+ _yaml.node_set(origin_node, 'plugins', [k for k in _yaml.node_keys(plugins)])
+ for group in expected_groups:
+ if group in origin_node:
+ _yaml.node_del(origin_node, group)
+
+ if _yaml.node_get(origin_node, str, 'origin') == 'local':
+ path = self.get_path_from_node(origin, 'path',
+ check_is_dir=True)
+ # paths are passed in relative to the project, but must be absolute
+ _yaml.node_set(origin_node, 'path', os.path.join(self.directory, path))
+ destination.append(origin_node)
+
+ # _warning_is_fatal():
+ #
+ # Returns true if the warning in question should be considered fatal based on
+ # the project configuration.
+ #
+ # Args:
+ # warning_str (str): The warning configuration string to check against
+ #
+ # Returns:
+ # (bool): True if the warning should be considered fatal and cause an error.
+ #
+ def _warning_is_fatal(self, warning_str):
+ return warning_str in self._fatal_warnings
diff --git a/src/buildstream/_projectrefs.py b/src/buildstream/_projectrefs.py
new file mode 100644
index 000000000..09205a7c3
--- /dev/null
+++ b/src/buildstream/_projectrefs.py
@@ -0,0 +1,155 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+import os
+
+from . import _yaml
+from ._exceptions import LoadError, LoadErrorReason
+
+
+# ProjectRefStorage()
+#
+# Indicates the type of ref storage
+class ProjectRefStorage():
+
+ # Source references are stored inline
+ #
+ INLINE = 'inline'
+
+ # Source references are stored in a central project.refs file
+ #
+ PROJECT_REFS = 'project.refs'
+
+
+# ProjectRefs()
+#
+# The project.refs file management
+#
+# Args:
+# directory (str): The project directory
+# base_name (str): The project.refs basename
+#
+class ProjectRefs():
+
+ def __init__(self, directory, base_name):
+ directory = os.path.abspath(directory)
+ self._fullpath = os.path.join(directory, base_name)
+ self._base_name = base_name
+ self._toplevel_node = None
+ self._toplevel_save = None
+
+ # load()
+ #
+ # Load the project.refs file
+ #
+ # Args:
+ # options (OptionPool): To resolve conditional statements
+ #
+ def load(self, options):
+ try:
+ self._toplevel_node = _yaml.load(self._fullpath, shortname=self._base_name, copy_tree=True)
+ provenance = _yaml.node_get_provenance(self._toplevel_node)
+ self._toplevel_save = provenance.toplevel
+
+ # Process any project options immediately
+ options.process_node(self._toplevel_node)
+
+ # Run any final assertions on the project.refs, just incase there
+ # are list composition directives or anything left unprocessed.
+ _yaml.node_final_assertions(self._toplevel_node)
+
+ except LoadError as e:
+ if e.reason != LoadErrorReason.MISSING_FILE:
+ raise
+
+ # Ignore failure if the file doesnt exist, it'll be created and
+ # for now just assumed to be empty
+ self._toplevel_node = _yaml.new_synthetic_file(self._fullpath)
+ self._toplevel_save = self._toplevel_node
+
+ _yaml.node_validate(self._toplevel_node, ['projects'])
+
+ # Ensure we create our toplevel entry point on the fly here
+ for node in [self._toplevel_node, self._toplevel_save]:
+ if 'projects' not in node:
+ _yaml.node_set(node, 'projects', _yaml.new_empty_node(ref_node=node))
+
+ # lookup_ref()
+ #
+ # Fetch the ref node for a given Source. If the ref node does not
+ # exist and `write` is specified, it will be automatically created.
+ #
+ # Args:
+ # project (str): The project to lookup
+ # element (str): The element name to lookup
+ # source_index (int): The index of the Source in the specified element
+ # write (bool): Whether we want to read the node or write to it
+ #
+ # Returns:
+ # (node): The YAML dictionary where the ref is stored
+ #
+ def lookup_ref(self, project, element, source_index, *, write=False):
+
+ node = self._lookup(self._toplevel_node, project, element, source_index)
+
+ if write:
+
+ # If we couldnt find the orignal, create a new one.
+ #
+ if node is None:
+ node = self._lookup(self._toplevel_save, project, element, source_index, ensure=True)
+
+ return node
+
+ # _lookup()
+ #
+ # Looks up a ref node in the project.refs file, creates one if ensure is True.
+ #
+ def _lookup(self, toplevel, project, element, source_index, *, ensure=False):
+ # Fetch the project
+ try:
+ projects = _yaml.node_get(toplevel, dict, 'projects')
+ project_node = _yaml.node_get(projects, dict, project)
+ except LoadError:
+ if not ensure:
+ return None
+ project_node = _yaml.new_empty_node(ref_node=projects)
+ _yaml.node_set(projects, project, project_node)
+
+ # Fetch the element
+ try:
+ element_list = _yaml.node_get(project_node, list, element)
+ except LoadError:
+ if not ensure:
+ return None
+ element_list = []
+ _yaml.node_set(project_node, element, element_list)
+
+ # Fetch the source index
+ try:
+ node = element_list[source_index]
+ except IndexError:
+ if not ensure:
+ return None
+
+ # Pad the list with empty newly created dictionaries
+ _yaml.node_extend_list(project_node, element, source_index + 1, {})
+
+ node = _yaml.node_get(project_node, dict, element, indices=[source_index])
+
+ return node
diff --git a/src/buildstream/_protos/__init__.py b/src/buildstream/_protos/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/__init__.py
diff --git a/src/buildstream/_protos/build/__init__.py b/src/buildstream/_protos/build/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/build/__init__.py
diff --git a/src/buildstream/_protos/build/bazel/__init__.py b/src/buildstream/_protos/build/bazel/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/__init__.py
diff --git a/src/buildstream/_protos/build/bazel/remote/__init__.py b/src/buildstream/_protos/build/bazel/remote/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/__init__.py
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/__init__.py b/src/buildstream/_protos/build/bazel/remote/execution/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/execution/__init__.py
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/__init__.py b/src/buildstream/_protos/build/bazel/remote/execution/v2/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/__init__.py
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
new file mode 100644
index 000000000..7edbce3bc
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
@@ -0,0 +1,1331 @@
+// Copyright 2018 The Bazel Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package build.bazel.remote.execution.v2;
+
+import "build/bazel/semver/semver.proto";
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Build.Bazel.Remote.Execution.V2";
+option go_package = "remoteexecution";
+option java_multiple_files = true;
+option java_outer_classname = "RemoteExecutionProto";
+option java_package = "build.bazel.remote.execution.v2";
+option objc_class_prefix = "REX";
+
+
+// The Remote Execution API is used to execute an
+// [Action][build.bazel.remote.execution.v2.Action] on the remote
+// workers.
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service Execution {
+ // Execute an action remotely.
+ //
+ // In order to execute an action, the client must first upload all of the
+ // inputs, the
+ // [Command][build.bazel.remote.execution.v2.Command] to run, and the
+ // [Action][build.bazel.remote.execution.v2.Action] into the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ // It then calls `Execute` with an `action_digest` referring to them. The
+ // server will run the action and eventually return the result.
+ //
+ // The input `Action`'s fields MUST meet the various canonicalization
+ // requirements specified in the documentation for their types so that it has
+ // the same digest as other logically equivalent `Action`s. The server MAY
+ // enforce the requirements and return errors if a non-canonical input is
+ // received. It MAY also proceed without verifying some or all of the
+ // requirements, such as for performance reasons. If the server does not
+ // verify the requirement, then it will treat the `Action` as distinct from
+ // another logically equivalent action if they hash differently.
+ //
+ // Returns a stream of
+ // [google.longrunning.Operation][google.longrunning.Operation] messages
+ // describing the resulting execution, with eventual `response`
+ // [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The
+ // `metadata` on the operation is of type
+ // [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata].
+ //
+ // If the client remains connected after the first response is returned after
+ // the server, then updates are streamed as if the client had called
+ // [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]
+ // until the execution completes or the request reaches an error. The
+ // operation can also be queried using [Operations
+ // API][google.longrunning.Operations.GetOperation].
+ //
+ // The server NEED NOT implement other methods or functionality of the
+ // Operations API.
+ //
+ // Errors discovered during creation of the `Operation` will be reported
+ // as gRPC Status errors, while errors that occurred while running the
+ // action will be reported in the `status` field of the `ExecuteResponse`. The
+ // server MUST NOT set the `error` field of the `Operation` proto.
+ // The possible errors include:
+ // * `INVALID_ARGUMENT`: One or more arguments are invalid.
+ // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
+ // action requested, such as a missing input or command or no worker being
+ // available. The client may be able to fix the errors and retry.
+ // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
+ // the action.
+ // * `UNAVAILABLE`: Due to a transient condition, such as all workers being
+ // occupied (and the server does not support a queue), the action could not
+ // be started. The client should retry.
+ // * `INTERNAL`: An internal error occurred in the execution engine or the
+ // worker.
+ // * `DEADLINE_EXCEEDED`: The execution timed out.
+ //
+ // In the case of a missing input or command, the server SHOULD additionally
+ // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
+ // where, for each requested blob not present in the CAS, there is a
+ // `Violation` with a `type` of `MISSING` and a `subject` of
+ // `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+ rpc Execute(ExecuteRequest) returns (stream google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v2/{instance_name=**}/actions:execute" body: "*" };
+ }
+
+ // Wait for an execution operation to complete. When the client initially
+ // makes the request, the server immediately responds with the current status
+ // of the execution. The server will leave the request stream open until the
+ // operation completes, and then respond with the completed operation. The
+ // server MAY choose to stream additional updates as execution progresses,
+ // such as to provide an update as to the state of the execution.
+ rpc WaitExecution(WaitExecutionRequest) returns (stream google.longrunning.Operation) {
+ option (google.api.http) = { post: "/v2/{name=operations/**}:waitExecution" body: "*" };
+ }
+}
+
+// The action cache API is used to query whether a given action has already been
+// performed and, if so, retrieve its result. Unlike the
+// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
+// which addresses blobs by their own content, the action cache addresses the
+// [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
+// digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
+// which produced them.
+//
+// The lifetime of entries in the action cache is implementation-specific, but
+// the server SHOULD assume that more recently used entries are more likely to
+// be used again. Additionally, action cache implementations SHOULD ensure that
+// any blobs referenced in the
+// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+// are still valid when returning a result.
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service ActionCache {
+ // Retrieve a cached execution result.
+ //
+ // Errors:
+ // * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+ rpc GetActionResult(GetActionResultRequest) returns (ActionResult) {
+ option (google.api.http) = { get: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" };
+ }
+
+ // Upload a new execution result.
+ //
+ // This method is intended for servers which implement the distributed cache
+ // independently of the
+ // [Execution][build.bazel.remote.execution.v2.Execution] API. As a
+ // result, it is OPTIONAL for servers to implement.
+ //
+ // In order to allow the server to perform access control based on the type of
+ // action, and to assist with client debugging, the client MUST first upload
+ // the [Action][build.bazel.remote.execution.v2.Execution] that produced the
+ // result, along with its
+ // [Command][build.bazel.remote.execution.v2.Command], into the
+ // `ContentAddressableStorage`.
+ //
+ // Errors:
+ // * `NOT_IMPLEMENTED`: This method is not supported by the server.
+ // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+ // entry to the cache.
+ rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) {
+ option (google.api.http) = { put: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" body: "action_result" };
+ }
+}
+
+// The CAS (content-addressable storage) is used to store the inputs to and
+// outputs from the execution service. Each piece of content is addressed by the
+// digest of its binary data.
+//
+// Most of the binary data stored in the CAS is opaque to the execution engine,
+// and is only used as a communication medium. In order to build an
+// [Action][build.bazel.remote.execution.v2.Action],
+// however, the client will need to also upload the
+// [Command][build.bazel.remote.execution.v2.Command] and input root
+// [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
+// The Command and Directory messages must be marshalled to wire format and then
+// uploaded under the hash as with any other piece of content. In practice, the
+// input root directory is likely to refer to other Directories in its
+// hierarchy, which must also each be uploaded on their own.
+//
+// For small file uploads the client should group them together and call
+// [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
+// on chunks of no more than 10 MiB. For large uploads, the client must use the
+// [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+// `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+// where `instance_name` is as described in the next paragraph, `uuid` is a
+// version 4 UUID generated by the client, and `hash` and `size` are the
+// [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
+// `uuid` is used only to avoid collisions when multiple clients try to upload
+// the same file (or the same client tries to upload the file multiple times at
+// once on different threads), so the client MAY reuse the `uuid` for uploading
+// different blobs. The `resource_name` may optionally have a trailing filename
+// (or other metadata) for a client to use if it is storing URLs, as in
+// `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+// after the `size` is ignored.
+//
+// A single server MAY support multiple instances of the execution system, each
+// with their own workers, storage, cache, etc. The exact relationship between
+// instances is up to the server. If the server does, then the `instance_name`
+// is an identifier, possibly containing multiple path segments, used to
+// distinguish between the various instances on the server, in a manner defined
+// by the server. For servers which do not support multiple instances, then the
+// `instance_name` is the empty path and the leading slash is omitted, so that
+// the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+//
+// When attempting an upload, if another client has already completed the upload
+// (which may occur in the middle of a single upload if another client uploads
+// the same blob concurrently), the request will terminate immediately with
+// a response whose `committed_size` is the full size of the uploaded file
+// (regardless of how much data was transmitted by the client). If the client
+// completes the upload but the
+// [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
+// `INVALID_ARGUMENT` error will be returned. In either case, the client should
+// not attempt to retry the upload.
+//
+// For downloading blobs, the client must use the
+// [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+// a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+// `instance_name` is the instance name (see above), and `hash` and `size` are
+// the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+//
+// The lifetime of entries in the CAS is implementation specific, but it SHOULD
+// be long enough to allow for newly-added and recently looked-up entries to be
+// used in subsequent calls (e.g. to
+// [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service ContentAddressableStorage {
+ // Determine if blobs are present in the CAS.
+ //
+ // Clients can use this API before uploading blobs to determine which ones are
+ // already present in the CAS and do not need to be uploaded again.
+ //
+ // There are no method-specific errors.
+ rpc FindMissingBlobs(FindMissingBlobsRequest) returns (FindMissingBlobsResponse) {
+ option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:findMissing" body: "*" };
+ }
+
+ // Upload many blobs at once.
+ //
+ // The server may enforce a limit of the combined total size of blobs
+ // to be uploaded using this API. This limit may be obtained using the
+ // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
+ // Requests exceeding the limit should either be split into smaller
+ // chunks or uploaded using the
+ // [ByteStream API][google.bytestream.ByteStream], as appropriate.
+ //
+ // This request is equivalent to calling a Bytestream `Write` request
+ // on each individual blob, in parallel. The requests may succeed or fail
+ // independently.
+ //
+ // Errors:
+ // * `INVALID_ARGUMENT`: The client attempted to upload more than the
+ // server supported limit.
+ //
+ // Individual requests may return the following errors, additionally:
+ // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
+ // * `INVALID_ARGUMENT`: The
+ // [Digest][build.bazel.remote.execution.v2.Digest] does not match the
+ // provided data.
+ rpc BatchUpdateBlobs(BatchUpdateBlobsRequest) returns (BatchUpdateBlobsResponse) {
+ option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchUpdate" body: "*" };
+ }
+
+ // Download many blobs at once.
+ //
+ // The server may enforce a limit of the combined total size of blobs
+ // to be downloaded using this API. This limit may be obtained using the
+ // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
+ // Requests exceeding the limit should either be split into smaller
+ // chunks or downloaded using the
+ // [ByteStream API][google.bytestream.ByteStream], as appropriate.
+ //
+ // This request is equivalent to calling a Bytestream `Read` request
+ // on each individual blob, in parallel. The requests may succeed or fail
+ // independently.
+ //
+ // Errors:
+ // * `INVALID_ARGUMENT`: The client attempted to read more than the
+ // server supported limit.
+ //
+ // Every error on individual read will be returned in the corresponding digest
+ // status.
+ rpc BatchReadBlobs(BatchReadBlobsRequest) returns (BatchReadBlobsResponse) {
+ option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchRead" body: "*" };
+ }
+
+ // Fetch the entire directory tree rooted at a node.
+ //
+ // This request must be targeted at a
+ // [Directory][build.bazel.remote.execution.v2.Directory] stored in the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+ // (CAS). The server will enumerate the `Directory` tree recursively and
+ // return every node descended from the root.
+ //
+ // The GetTreeRequest.page_token parameter can be used to skip ahead in
+ // the stream (e.g. when retrying a partially completed and aborted request),
+ // by setting it to a value taken from GetTreeResponse.next_page_token of the
+ // last successfully processed GetTreeResponse).
+ //
+ // The exact traversal order is unspecified and, unless retrieving subsequent
+ // pages from an earlier request, is not guaranteed to be stable across
+ // multiple invocations of `GetTree`.
+ //
+ // If part of the tree is missing from the CAS, the server will return the
+ // portion present and omit the rest.
+ //
+ // * `NOT_FOUND`: The requested tree root is not present in the CAS.
+ rpc GetTree(GetTreeRequest) returns (stream GetTreeResponse) {
+ option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" };
+ }
+}
+
+// The Capabilities service may be used by remote execution clients to query
+// various server properties, in order to self-configure or return meaningful
+// error messages.
+//
+// The query may include a particular `instance_name`, in which case the values
+// returned will pertain to that instance.
+service Capabilities {
+ // GetCapabilities returns the server capabilities configuration.
+ rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) {
+ option (google.api.http) = {
+ get: "/v2/{instance_name=**}/capabilities"
+ };
+ }
+}
+
+// An `Action` captures all the information about an execution which is required
+// to reproduce it.
+//
+// `Action`s are the core component of the [Execution] service. A single
+// `Action` represents a repeatable action that can be performed by the
+// execution service. `Action`s can be succinctly identified by the digest of
+// their wire format encoding and, once an `Action` has been executed, will be
+// cached in the action cache. Future requests can then use the cached result
+// rather than needing to run afresh.
+//
+// When a server completes execution of an
+// [Action][build.bazel.remote.execution.v2.Action], it MAY choose to
+// cache the [result][build.bazel.remote.execution.v2.ActionResult] in
+// the [ActionCache][build.bazel.remote.execution.v2.ActionCache] unless
+// `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
+// default, future calls to
+// [Execute][build.bazel.remote.execution.v2.Execution.Execute] the same
+// `Action` will also serve their results from the cache. Clients must take care
+// to understand the caching behaviour. Ideally, all `Action`s will be
+// reproducible so that serving a result from cache is always desirable and
+// correct.
+message Action {
+ // The digest of the [Command][build.bazel.remote.execution.v2.Command]
+ // to run, which MUST be present in the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ Digest command_digest = 1;
+
+ // The digest of the root
+ // [Directory][build.bazel.remote.execution.v2.Directory] for the input
+ // files. The files in the directory tree are available in the correct
+ // location on the build machine before the command is executed. The root
+ // directory, as well as every subdirectory and content blob referred to, MUST
+ // be in the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ Digest input_root_digest = 2;
+
+ reserved 3 to 5; // Used for fields moved to [Command][build.bazel.remote.execution.v2.Command].
+
+ // A timeout after which the execution should be killed. If the timeout is
+ // absent, then the client is specifying that the execution should continue
+ // as long as the server will let it. The server SHOULD impose a timeout if
+ // the client does not specify one, however, if the client does specify a
+ // timeout that is longer than the server's maximum timeout, the server MUST
+ // reject the request.
+ //
+ // The timeout is a part of the
+ // [Action][build.bazel.remote.execution.v2.Action] message, and
+ // therefore two `Actions` with different timeouts are different, even if they
+ // are otherwise identical. This is because, if they were not, running an
+ // `Action` with a lower timeout than is required might result in a cache hit
+ // from an execution run with a longer timeout, hiding the fact that the
+ // timeout is too short. By encoding it directly in the `Action`, a lower
+ // timeout will result in a cache miss and the execution timeout will fail
+ // immediately, rather than whenever the cache entry gets evicted.
+ google.protobuf.Duration timeout = 6;
+
+ // If true, then the `Action`'s result cannot be cached.
+ bool do_not_cache = 7;
+}
+
+// A `Command` is the actual command executed by a worker running an
+// [Action][build.bazel.remote.execution.v2.Action] and specifications of its
+// environment.
+//
+// Except as otherwise required, the environment (such as which system
+// libraries or binaries are available, and what filesystems are mounted where)
+// is defined by and specific to the implementation of the remote execution API.
+message Command {
+ // An `EnvironmentVariable` is one variable to set in the running program's
+ // environment.
+ message EnvironmentVariable {
+ // The variable name.
+ string name = 1;
+
+ // The variable value.
+ string value = 2;
+ }
+
+ // The arguments to the command. The first argument must be the path to the
+ // executable, which must be either a relative path, in which case it is
+ // evaluated with respect to the input root, or an absolute path.
+ repeated string arguments = 1;
+
+ // The environment variables to set when running the program. The worker may
+ // provide its own default environment variables; these defaults can be
+ // overridden using this field. Additional variables can also be specified.
+ //
+ // In order to ensure that equivalent `Command`s always hash to the same
+ // value, the environment variables MUST be lexicographically sorted by name.
+ // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
+ repeated EnvironmentVariable environment_variables = 2;
+
+ // A list of the output files that the client expects to retrieve from the
+ // action. Only the listed files, as well as directories listed in
+ // `output_directories`, will be returned to the client as output.
+ // Other files that may be created during command execution are discarded.
+ //
+ // The paths are relative to the working directory of the action execution.
+ // The paths are specified using a single forward slash (`/`) as a path
+ // separator, even if the execution platform natively uses a different
+ // separator. The path MUST NOT include a trailing slash, nor a leading slash,
+ // being a relative path.
+ //
+ // In order to ensure consistent hashing of the same Action, the output paths
+ // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
+ // bytes).
+ //
+ // An output file cannot be duplicated, be a parent of another output file, be
+ // a child of a listed output directory, or have the same path as any of the
+ // listed output directories.
+ repeated string output_files = 3;
+
+ // A list of the output directories that the client expects to retrieve from
+ // the action. Only the contents of the indicated directories (recursively
+ // including the contents of their subdirectories) will be
+ // returned, as well as files listed in `output_files`. Other files that may
+ // be created during command execution are discarded.
+ //
+ // The paths are relative to the working directory of the action execution.
+ // The paths are specified using a single forward slash (`/`) as a path
+ // separator, even if the execution platform natively uses a different
+ // separator. The path MUST NOT include a trailing slash, nor a leading slash,
+ // being a relative path. The special value of empty string is allowed,
+ // although not recommended, and can be used to capture the entire working
+ // directory tree, including inputs.
+ //
+ // In order to ensure consistent hashing of the same Action, the output paths
+ // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
+ // bytes).
+ //
+ // An output directory cannot be duplicated, be a parent of another output
+ // directory, be a parent of a listed output file, or have the same path as
+ // any of the listed output files.
+ repeated string output_directories = 4;
+
+ // The platform requirements for the execution environment. The server MAY
+ // choose to execute the action on any worker satisfying the requirements, so
+ // the client SHOULD ensure that running the action on any such worker will
+ // have the same result.
+ Platform platform = 5;
+
+ // The working directory, relative to the input root, for the command to run
+ // in. It must be a directory which exists in the input tree. If it is left
+ // empty, then the action is run in the input root.
+ string working_directory = 6;
+}
+
+// A `Platform` is a set of requirements, such as hardware, operating system, or
+// compiler toolchain, for an
+// [Action][build.bazel.remote.execution.v2.Action]'s execution
+// environment. A `Platform` is represented as a series of key-value pairs
+// representing the properties that are required of the platform.
+message Platform {
+ // A single property for the environment. The server is responsible for
+ // specifying the property `name`s that it accepts. If an unknown `name` is
+ // provided in the requirements for an
+ // [Action][build.bazel.remote.execution.v2.Action], the server SHOULD
+ // reject the execution request. If permitted by the server, the same `name`
+ // may occur multiple times.
+ //
+ // The server is also responsible for specifying the interpretation of
+ // property `value`s. For instance, a property describing how much RAM must be
+ // available may be interpreted as allowing a worker with 16GB to fulfill a
+ // request for 8GB, while a property describing the OS environment on which
+ // the action must be performed may require an exact match with the worker's
+ // OS.
+ //
+ // The server MAY use the `value` of one or more properties to determine how
+ // it sets up the execution environment, such as by making specific system
+ // files available to the worker.
+ message Property {
+ // The property name.
+ string name = 1;
+
+ // The property value.
+ string value = 2;
+ }
+
+ // The properties that make up this platform. In order to ensure that
+ // equivalent `Platform`s always hash to the same value, the properties MUST
+ // be lexicographically sorted by name, and then by value. Sorting of strings
+ // is done by code point, equivalently, by the UTF-8 bytes.
+ repeated Property properties = 1;
+}
+
+// A `Directory` represents a directory node in a file tree, containing zero or
+// more children [FileNodes][build.bazel.remote.execution.v2.FileNode],
+// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode] and
+// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode].
+// Each `Node` contains its name in the directory, either the digest of its
+// content (either a file blob or a `Directory` proto) or a symlink target, as
+// well as possibly some metadata about the file or directory.
+//
+// In order to ensure that two equivalent directory trees hash to the same
+// value, the following restrictions MUST be obeyed when constructing a
+// a `Directory`:
+// - Every child in the directory must have a path of exactly one segment.
+// Multiple levels of directory hierarchy may not be collapsed.
+// - Each child in the directory must have a unique path segment (file name).
+// - The files, directories and symlinks in the directory must each be sorted
+// in lexicographical order by path. The path strings must be sorted by code
+// point, equivalently, by UTF-8 bytes.
+//
+// A `Directory` that obeys the restrictions is said to be in canonical form.
+//
+// As an example, the following could be used for a file named `bar` and a
+// directory named `foo` with an executable file named `baz` (hashes shortened
+// for readability):
+//
+// ```json
+// // (Directory proto)
+// {
+// files: [
+// {
+// name: "bar",
+// digest: {
+// hash: "4a73bc9d03...",
+// size: 65534
+// }
+// }
+// ],
+// directories: [
+// {
+// name: "foo",
+// digest: {
+// hash: "4cf2eda940...",
+// size: 43
+// }
+// }
+// ]
+// }
+//
+// // (Directory proto with hash "4cf2eda940..." and size 43)
+// {
+// files: [
+// {
+// name: "baz",
+// digest: {
+// hash: "b2c941073e...",
+// size: 1294,
+// },
+// is_executable: true
+// }
+// ]
+// }
+// ```
+message Directory {
+ // The files in the directory.
+ repeated FileNode files = 1;
+
+ // The subdirectories in the directory.
+ repeated DirectoryNode directories = 2;
+
+ // The symlinks in the directory.
+ repeated SymlinkNode symlinks = 3;
+}
+
+// A `FileNode` represents a single file and associated metadata.
+message FileNode {
+ // The name of the file.
+ string name = 1;
+
+ // The digest of the file's content.
+ Digest digest = 2;
+
+ reserved 3; // Reserved to ensure wire-compatibility with `OutputFile`.
+
+ // True if file is executable, false otherwise.
+ bool is_executable = 4;
+}
+
+// A `DirectoryNode` represents a child of a
+// [Directory][build.bazel.remote.execution.v2.Directory] which is itself
+// a `Directory` and its associated metadata.
+message DirectoryNode {
+ // The name of the directory.
+ string name = 1;
+
+ // The digest of the
+ // [Directory][build.bazel.remote.execution.v2.Directory] object
+ // represented. See [Digest][build.bazel.remote.execution.v2.Digest]
+ // for information about how to take the digest of a proto message.
+ Digest digest = 2;
+}
+
+// A `SymlinkNode` represents a symbolic link.
+message SymlinkNode {
+ // The name of the symlink.
+ string name = 1;
+
+ // The target path of the symlink. The path separator is a forward slash `/`.
+ // The target path can be relative to the parent directory of the symlink or
+ // it can be an absolute path starting with `/`. Support for absolute paths
+ // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities]
+ // API. The canonical form forbids the substrings `/./` and `//` in the target
+ // path. `..` components are allowed anywhere in the target path.
+ string target = 2;
+}
+
+// A content digest. A digest for a given blob consists of the size of the blob
+// and its hash. The hash algorithm to use is defined by the server, but servers
+// SHOULD use SHA-256.
+//
+// The size is considered to be an integral part of the digest and cannot be
+// separated. That is, even if the `hash` field is correctly specified but
+// `size_bytes` is not, the server MUST reject the request.
+//
+// The reason for including the size in the digest is as follows: in a great
+// many cases, the server needs to know the size of the blob it is about to work
+// with prior to starting an operation with it, such as flattening Merkle tree
+// structures or streaming it to a worker. Technically, the server could
+// implement a separate metadata store, but this results in a significantly more
+// complicated implementation as opposed to having the client specify the size
+// up-front (or storing the size along with the digest in every message where
+// digests are embedded). This does mean that the API leaks some implementation
+// details of (what we consider to be) a reasonable server implementation, but
+// we consider this to be a worthwhile tradeoff.
+//
+// When a `Digest` is used to refer to a proto message, it always refers to the
+// message in binary encoded form. To ensure consistent hashing, clients and
+// servers MUST ensure that they serialize messages according to the following
+// rules, even if there are alternate valid encodings for the same message.
+// - Fields are serialized in tag order.
+// - There are no unknown fields.
+// - There are no duplicate fields.
+// - Fields are serialized according to the default semantics for their type.
+//
+// Most protocol buffer implementations will always follow these rules when
+// serializing, but care should be taken to avoid shortcuts. For instance,
+// concatenating two messages to merge them may produce duplicate fields.
+message Digest {
+ // The hash. In the case of SHA-256, it will always be a lowercase hex string
+ // exactly 64 characters long.
+ string hash = 1;
+
+ // The size of the blob, in bytes.
+ int64 size_bytes = 2;
+}
+
+// ExecutedActionMetadata contains details about a completed execution.
+message ExecutedActionMetadata {
+ // The name of the worker which ran the execution.
+ string worker = 1;
+
+ // When was the action added to the queue.
+ google.protobuf.Timestamp queued_timestamp = 2;
+
+ // When the worker received the action.
+ google.protobuf.Timestamp worker_start_timestamp = 3;
+
+ // When the worker completed the action, including all stages.
+ google.protobuf.Timestamp worker_completed_timestamp = 4;
+
+ // When the worker started fetching action inputs.
+ google.protobuf.Timestamp input_fetch_start_timestamp = 5;
+
+ // When the worker finished fetching action inputs.
+ google.protobuf.Timestamp input_fetch_completed_timestamp = 6;
+
+ // When the worker started executing the action command.
+ google.protobuf.Timestamp execution_start_timestamp = 7;
+
+ // When the worker completed executing the action command.
+ google.protobuf.Timestamp execution_completed_timestamp = 8;
+
+ // When the worker started uploading action outputs.
+ google.protobuf.Timestamp output_upload_start_timestamp = 9;
+
+ // When the worker finished uploading action outputs.
+ google.protobuf.Timestamp output_upload_completed_timestamp = 10;
+}
+
+// An ActionResult represents the result of an
+// [Action][build.bazel.remote.execution.v2.Action] being run.
+message ActionResult {
+ reserved 1; // Reserved for use as the resource name.
+
+ // The output files of the action. For each output file requested in the
+ // `output_files` field of the Action, if the corresponding file existed after
+ // the action completed, a single entry will be present in the output list.
+ //
+ // If the action does not produce the requested output, or produces a
+ // directory where a regular file is expected or vice versa, then that output
+ // will be omitted from the list. The server is free to arrange the output
+ // list as desired; clients MUST NOT assume that the output list is sorted.
+ repeated OutputFile output_files = 2;
+
+ // The output directories of the action. For each output directory requested
+ // in the `output_directories` field of the Action, if the corresponding
+ // directory existed after the action completed, a single entry will be
+ // present in the output list, which will contain the digest of a
+ // [Tree][build.bazel.remote.execution.v2.Tree] message containing the
+ // directory tree, and the path equal exactly to the corresponding Action
+ // output_directories member.
+ //
+ // As an example, suppose the Action had an output directory `a/b/dir` and the
+ // execution produced the following contents in `a/b/dir`: a file named `bar`
+ // and a directory named `foo` with an executable file named `baz`. Then,
+ // output_directory will contain (hashes shortened for readability):
+ //
+ // ```json
+ // // OutputDirectory proto:
+ // {
+ // path: "a/b/dir"
+ // tree_digest: {
+ // hash: "4a73bc9d03...",
+ // size: 55
+ // }
+ // }
+ // // Tree proto with hash "4a73bc9d03..." and size 55:
+ // {
+ // root: {
+ // files: [
+ // {
+ // name: "bar",
+ // digest: {
+ // hash: "4a73bc9d03...",
+ // size: 65534
+ // }
+ // }
+ // ],
+ // directories: [
+ // {
+ // name: "foo",
+ // digest: {
+ // hash: "4cf2eda940...",
+ // size: 43
+ // }
+ // }
+ // ]
+ // }
+ // children : {
+ // // (Directory proto with hash "4cf2eda940..." and size 43)
+ // files: [
+ // {
+ // name: "baz",
+ // digest: {
+ // hash: "b2c941073e...",
+ // size: 1294,
+ // },
+ // is_executable: true
+ // }
+ // ]
+ // }
+ // }
+ // ```
+ repeated OutputDirectory output_directories = 3;
+
+ // The exit code of the command.
+ int32 exit_code = 4;
+
+ // The standard output buffer of the action. The server will determine, based
+ // on the size of the buffer, whether to return it in raw form or to return
+ // a digest in `stdout_digest` that points to the buffer. If neither is set,
+ // then the buffer is empty. The client SHOULD NOT assume it will get one of
+ // the raw buffer or a digest on any given request and should be prepared to
+ // handle either.
+ bytes stdout_raw = 5;
+
+ // The digest for a blob containing the standard output of the action, which
+ // can be retrieved from the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ // See `stdout_raw` for when this will be set.
+ Digest stdout_digest = 6;
+
+ // The standard error buffer of the action. The server will determine, based
+ // on the size of the buffer, whether to return it in raw form or to return
+ // a digest in `stderr_digest` that points to the buffer. If neither is set,
+ // then the buffer is empty. The client SHOULD NOT assume it will get one of
+ // the raw buffer or a digest on any given request and should be prepared to
+ // handle either.
+ bytes stderr_raw = 7;
+
+ // The digest for a blob containing the standard error of the action, which
+ // can be retrieved from the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ // See `stderr_raw` for when this will be set.
+ Digest stderr_digest = 8;
+
+ // The details of the execution that originally produced this result.
+ ExecutedActionMetadata execution_metadata = 9;
+}
+
+// An `OutputFile` is similar to a
+// [FileNode][build.bazel.remote.execution.v2.FileNode], but it is used as an
+// output in an `ActionResult`. It allows a full file path rather than
+// only a name.
+//
+// `OutputFile` is binary-compatible with `FileNode`.
+message OutputFile {
+ // The full path of the file relative to the input root, including the
+ // filename. The path separator is a forward slash `/`. Since this is a
+ // relative path, it MUST NOT begin with a leading forward slash.
+ string path = 1;
+
+ // The digest of the file's content.
+ Digest digest = 2;
+
+ reserved 3; // Used for a removed field in an earlier version of the API.
+
+ // True if file is executable, false otherwise.
+ bool is_executable = 4;
+}
+
+// A `Tree` contains all the
+// [Directory][build.bazel.remote.execution.v2.Directory] protos in a
+// single directory Merkle tree, compressed into one message.
+message Tree {
+ // The root directory in the tree.
+ Directory root = 1;
+
+ // All the child directories: the directories referred to by the root and,
+ // recursively, all its children. In order to reconstruct the directory tree,
+ // the client must take the digests of each of the child directories and then
+ // build up a tree starting from the `root`.
+ repeated Directory children = 2;
+}
+
+// An `OutputDirectory` is the output in an `ActionResult` corresponding to a
+// directory's full contents rather than a single file.
+message OutputDirectory {
+ // The full path of the directory relative to the working directory. The path
+ // separator is a forward slash `/`. Since this is a relative path, it MUST
+ // NOT begin with a leading forward slash. The empty string value is allowed,
+ // and it denotes the entire working directory.
+ string path = 1;
+
+ reserved 2; // Used for a removed field in an earlier version of the API.
+
+ // The digest of the encoded
+ // [Tree][build.bazel.remote.execution.v2.Tree] proto containing the
+ // directory's contents.
+ Digest tree_digest = 3;
+}
+
+// An `ExecutionPolicy` can be used to control the scheduling of the action.
+message ExecutionPolicy {
+ // The priority (relative importance) of this action. Generally, a lower value
+ // means that the action should be run sooner than actions having a greater
+ // priority value, but the interpretation of a given value is server-
+ // dependent. A priority of 0 means the *default* priority. Priorities may be
+ // positive or negative, and such actions should run later or sooner than
+ // actions having the default priority, respectively. The particular semantics
+ // of this field is up to the server. In particular, every server will have
+ // their own supported range of priorities, and will decide how these map into
+ // scheduling policy.
+ int32 priority = 1;
+}
+
+// A `ResultsCachePolicy` is used for fine-grained control over how action
+// outputs are stored in the CAS and Action Cache.
+message ResultsCachePolicy {
+ // The priority (relative importance) of this content in the overall cache.
+ // Generally, a lower value means a longer retention time or other advantage,
+ // but the interpretation of a given value is server-dependent. A priority of
+ // 0 means a *default* value, decided by the server.
+ //
+ // The particular semantics of this field is up to the server. In particular,
+ // every server will have their own supported range of priorities, and will
+ // decide how these map into retention/eviction policy.
+ int32 priority = 1;
+}
+
+// A request message for
+// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute].
+message ExecuteRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // If true, the action will be executed anew even if its result was already
+ // present in the cache. If false, the result may be served from the
+ // [ActionCache][build.bazel.remote.execution.v2.ActionCache].
+ bool skip_cache_lookup = 3;
+
+ reserved 2, 4, 5; // Used for removed fields in an earlier version of the API.
+
+ // The digest of the [Action][build.bazel.remote.execution.v2.Action] to
+ // execute.
+ Digest action_digest = 6;
+
+ // An optional policy for execution of the action.
+ // The server will have a default policy if this is not provided.
+ ExecutionPolicy execution_policy = 7;
+
+ // An optional policy for the results of this execution in the remote cache.
+ // The server will have a default policy if this is not provided.
+ // This may be applied to both the ActionResult and the associated blobs.
+ ResultsCachePolicy results_cache_policy = 8;
+}
+
+// A `LogFile` is a log stored in the CAS.
+message LogFile {
+ // The digest of the log contents.
+ Digest digest = 1;
+
+ // This is a hint as to the purpose of the log, and is set to true if the log
+ // is human-readable text that can be usefully displayed to a user, and false
+ // otherwise. For instance, if a command-line client wishes to print the
+ // server logs to the terminal for a failed action, this allows it to avoid
+ // displaying a binary file.
+ bool human_readable = 2;
+}
+
+// The response message for
+// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute],
+// which will be contained in the [response
+// field][google.longrunning.Operation.response] of the
+// [Operation][google.longrunning.Operation].
+message ExecuteResponse {
+ // The result of the action.
+ ActionResult result = 1;
+
+ // True if the result was served from cache, false if it was executed.
+ bool cached_result = 2;
+
+ // If the status has a code other than `OK`, it indicates that the action did
+ // not finish execution. For example, if the operation times out during
+ // execution, the status will have a `DEADLINE_EXCEEDED` code. Servers MUST
+ // use this field for errors in execution, rather than the error field on the
+ // `Operation` object.
+ //
+ // If the status code is other than `OK`, then the result MUST NOT be cached.
+ // For an error status, the `result` field is optional; the server may
+ // populate the output-, stdout-, and stderr-related fields if it has any
+ // information available, such as the stdout and stderr of a timed-out action.
+ google.rpc.Status status = 3;
+
+ // An optional list of additional log outputs the server wishes to provide. A
+ // server can use this to return execution-specific logs however it wishes.
+ // This is intended primarily to make it easier for users to debug issues that
+ // may be outside of the actual job execution, such as by identifying the
+ // worker executing the action or by providing logs from the worker's setup
+ // phase. The keys SHOULD be human readable so that a client can display them
+ // to a user.
+ map<string, LogFile> server_logs = 4;
+}
+
+// Metadata about an ongoing
+// [execution][build.bazel.remote.execution.v2.Execution.Execute], which
+// will be contained in the [metadata
+// field][google.longrunning.Operation.response] of the
+// [Operation][google.longrunning.Operation].
+message ExecuteOperationMetadata {
+ // The current stage of execution.
+ enum Stage {
+ UNKNOWN = 0;
+
+ // Checking the result against the cache.
+ CACHE_CHECK = 1;
+
+ // Currently idle, awaiting a free machine to execute.
+ QUEUED = 2;
+
+ // Currently being executed by a worker.
+ EXECUTING = 3;
+
+ // Finished execution.
+ COMPLETED = 4;
+ }
+
+ Stage stage = 1;
+
+ // The digest of the [Action][build.bazel.remote.execution.v2.Action]
+ // being executed.
+ Digest action_digest = 2;
+
+ // If set, the client can use this name with
+ // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
+ // standard output.
+ string stdout_stream_name = 3;
+
+ // If set, the client can use this name with
+ // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
+ // standard error.
+ string stderr_stream_name = 4;
+}
+
+// A request message for
+// [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution].
+message WaitExecutionRequest {
+ // The name of the [Operation][google.longrunning.operations.v1.Operation]
+ // returned by [Execute][build.bazel.remote.execution.v2.Execution.Execute].
+ string name = 1;
+}
+
+// A request message for
+// [ActionCache.GetActionResult][build.bazel.remote.execution.v2.ActionCache.GetActionResult].
+message GetActionResultRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The digest of the [Action][build.bazel.remote.execution.v2.Action]
+ // whose result is requested.
+ Digest action_digest = 2;
+}
+
+// A request message for
+// [ActionCache.UpdateActionResult][build.bazel.remote.execution.v2.ActionCache.UpdateActionResult].
+message UpdateActionResultRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The digest of the [Action][build.bazel.remote.execution.v2.Action]
+ // whose result is being uploaded.
+ Digest action_digest = 2;
+
+ // The [ActionResult][build.bazel.remote.execution.v2.ActionResult]
+ // to store in the cache.
+ ActionResult action_result = 3;
+
+ // An optional policy for the results of this execution in the remote cache.
+ // The server will have a default policy if this is not provided.
+ // This may be applied to both the ActionResult and the associated blobs.
+ ResultsCachePolicy results_cache_policy = 4;
+}
+
+// A request message for
+// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs].
+message FindMissingBlobsRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // A list of the blobs to check.
+ repeated Digest blob_digests = 2;
+}
+
+// A response message for
+// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs].
+message FindMissingBlobsResponse {
+ // A list of the blobs requested *not* present in the storage.
+ repeated Digest missing_blob_digests = 2;
+}
+
+// A request message for
+// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+message BatchUpdateBlobsRequest {
+ // A request corresponding to a single blob that the client wants to upload.
+ message Request {
+ // The digest of the blob. This MUST be the digest of `data`.
+ Digest digest = 1;
+
+ // The raw binary data.
+ bytes data = 2;
+ }
+
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The individual upload requests.
+ repeated Request requests = 2;
+}
+
+// A response message for
+// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+message BatchUpdateBlobsResponse {
+ // A response corresponding to a single blob that the client tried to upload.
+ message Response {
+ // The blob digest to which this response corresponds.
+ Digest digest = 1;
+
+ // The result of attempting to upload that blob.
+ google.rpc.Status status = 2;
+ }
+
+ // The responses to the requests.
+ repeated Response responses = 1;
+}
+
+// A request message for
+// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
+message BatchReadBlobsRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The individual blob digests.
+ repeated Digest digests = 2;
+}
+
+// A response message for
+// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
+message BatchReadBlobsResponse {
+ // A response corresponding to a single blob that the client tried to upload.
+ message Response {
+ // The digest to which this response corresponds.
+ Digest digest = 1;
+
+ // The raw binary data.
+ bytes data = 2;
+
+ // The result of attempting to download that blob.
+ google.rpc.Status status = 3;
+ }
+
+ // The responses to the requests.
+ repeated Response responses = 1;
+}
+
+// A request message for
+// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree].
+message GetTreeRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The digest of the root, which must be an encoded
+ // [Directory][build.bazel.remote.execution.v2.Directory] message
+ // stored in the
+ // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ Digest root_digest = 2;
+
+ // A maximum page size to request. If present, the server will request no more
+ // than this many items. Regardless of whether a page size is specified, the
+ // server may place its own limit on the number of items to be returned and
+ // require the client to retrieve more items using a subsequent request.
+ int32 page_size = 3;
+
+ // A page token, which must be a value received in a previous
+ // [GetTreeResponse][build.bazel.remote.execution.v2.GetTreeResponse].
+ // If present, the server will use it to return the following page of results.
+ string page_token = 4;
+}
+
+// A response message for
+// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree].
+message GetTreeResponse {
+ // The directories descended from the requested root.
+ repeated Directory directories = 1;
+
+ // If present, signifies that there are more results which the client can
+ // retrieve by passing this as the page_token in a subsequent
+ // [request][build.bazel.remote.execution.v2.GetTreeRequest].
+ // If empty, signifies that this is the last page of results.
+ string next_page_token = 2;
+}
+
+// A request message for
+// [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities].
+message GetCapabilitiesRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+}
+
+// A response message for
+// [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities].
+message ServerCapabilities {
+ // Capabilities of the remote cache system.
+ CacheCapabilities cache_capabilities = 1;
+
+ // Capabilities of the remote execution system.
+ ExecutionCapabilities execution_capabilities = 2;
+
+ // Earliest RE API version supported, including deprecated versions.
+ build.bazel.semver.SemVer deprecated_api_version = 3;
+
+ // Earliest non-deprecated RE API version supported.
+ build.bazel.semver.SemVer low_api_version = 4;
+
+ // Latest RE API version supported.
+ build.bazel.semver.SemVer high_api_version = 5;
+}
+
+// The digest function used for converting values into keys for CAS and Action
+// Cache.
+enum DigestFunction {
+ UNKNOWN = 0;
+ SHA256 = 1;
+ SHA1 = 2;
+ MD5 = 3;
+}
+
+// Describes the server/instance capabilities for updating the action cache.
+message ActionCacheUpdateCapabilities {
+ bool update_enabled = 1;
+}
+
+// Allowed values for priority in
+// [ResultsCachePolicy][google.devtools.remoteexecution.v2.ResultsCachePolicy]
+// Used for querying both cache and execution valid priority ranges.
+message PriorityCapabilities {
+ // Supported range of priorities, including boundaries.
+ message PriorityRange {
+ int32 min_priority = 1;
+ int32 max_priority = 2;
+ }
+ repeated PriorityRange priorities = 1;
+}
+
+// Capabilities of the remote cache system.
+message CacheCapabilities {
+ // Describes how the server treats absolute symlink targets.
+ enum SymlinkAbsolutePathStrategy {
+ UNKNOWN = 0;
+
+ // Server will return an INVALID_ARGUMENT on input symlinks with absolute targets.
+ // If an action tries to create an output symlink with an absolute target, a
+ // FAILED_PRECONDITION will be returned.
+ DISALLOWED = 1;
+
+ // Server will allow symlink targets to escape the input root tree, possibly
+ // resulting in non-hermetic builds.
+ ALLOWED = 2;
+ }
+
+ // All the digest functions supported by the remote cache.
+ // Remote cache may support multiple digest functions simultaneously.
+ repeated DigestFunction digest_function = 1;
+
+ // Capabilities for updating the action cache.
+ ActionCacheUpdateCapabilities action_cache_update_capabilities = 2;
+
+ // Supported cache priority range for both CAS and ActionCache.
+ PriorityCapabilities cache_priority_capabilities = 3;
+
+ // Maximum total size of blobs to be uploaded/downloaded using
+ // batch methods. A value of 0 means no limit is set, although
+ // in practice there will always be a message size limitation
+ // of the protocol in use, e.g. GRPC.
+ int64 max_batch_total_size_bytes = 4;
+
+ // Whether absolute symlink targets are supported.
+ SymlinkAbsolutePathStrategy symlink_absolute_path_strategy = 5;
+}
+
+// Capabilities of the remote execution system.
+message ExecutionCapabilities {
+ // Remote execution may only support a single digest function.
+ DigestFunction digest_function = 1;
+
+ // Whether remote execution is enabled for the particular server/instance.
+ bool exec_enabled = 2;
+
+ // Supported execution priority range.
+ PriorityCapabilities execution_priority_capabilities = 3;
+}
+
+// Details for the tool used to call the API.
+message ToolDetails {
+ // Name of the tool, e.g. bazel.
+ string tool_name = 1;
+
+ // Version of the tool used for the request, e.g. 5.0.3.
+ string tool_version = 2;
+}
+
+// An optional Metadata to attach to any RPC request to tell the server about an
+// external context of the request. The server may use this for logging or other
+// purposes. To use it, the client attaches the header to the call using the
+// canonical proto serialization:
+// name: build.bazel.remote.execution.v2.requestmetadata-bin
+// contents: the base64 encoded binary RequestMetadata message.
+message RequestMetadata {
+ // The details for the tool invoking the requests.
+ ToolDetails tool_details = 1;
+
+ // An identifier that ties multiple requests to the same action.
+ // For example, multiple requests to the CAS, Action Cache, and Execution
+ // API are used in order to compile foo.cc.
+ string action_id = 2;
+
+ // An identifier that ties multiple actions together to a final result.
+ // For example, multiple actions are required to build and run foo_test.
+ string tool_invocation_id = 3;
+
+ // An identifier to tie multiple tool invocations together. For example,
+ // runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
+ string correlated_invocations_id = 4;
+}
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
new file mode 100644
index 000000000..46d59b184
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
@@ -0,0 +1,2660 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: build/bazel/remote/execution/v2/remote_execution.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.build.bazel.semver import semver_pb2 as build_dot_bazel_dot_semver_dot_semver__pb2
+from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
+from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='build/bazel/remote/execution/v2/remote_execution.proto',
+ package='build.bazel.remote.execution.v2',
+ syntax='proto3',
+ serialized_pb=_b('\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xd5\x01\n\x06\x41\x63tion\x12?\n\x0e\x63ommand_digest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x42\n\x11input_root_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12*\n\x07timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x14\n\x0c\x64o_not_cache\x18\x07 \x01(\x08J\x04\x08\x03\x10\x06\"\xb7\x02\n\x07\x43ommand\x12\x11\n\targuments\x18\x01 \x03(\t\x12[\n\x15\x65nvironment_variables\x18\x02 \x03(\x0b\x32<.build.bazel.remote.execution.v2.Command.EnvironmentVariable\x12\x14\n\x0coutput_files\x18\x03 \x03(\t\x12\x1a\n\x12output_directories\x18\x04 \x03(\t\x12;\n\x08platform\x18\x05 \x01(\x0b\x32).build.bazel.remote.execution.v2.Platform\x12\x19\n\x11working_directory\x18\x06 \x01(\t\x1a\x32\n\x13\x45nvironmentVariable\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"{\n\x08Platform\x12\x46\n\nproperties\x18\x01 \x03(\x0b\x32\x32.build.bazel.remote.execution.v2.Platform.Property\x1a\'\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xca\x01\n\tDirectory\x12\x38\n\x05\x66iles\x18\x01 \x03(\x0b\x32).build.bazel.remote.execution.v2.FileNode\x12\x43\n\x0b\x64irectories\x18\x02 \x03(\x0b\x32..build.bazel.remote.execution.v2.DirectoryNode\x12>\n\x08symlinks\x18\x03 \x03(\x0b\x32,.build.bazel.remote.execution.v2.SymlinkNode\"n\n\x08\x46ileNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"V\n\rDirectoryNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"+\n\x0bSymlinkNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"*\n\x06\x44igest\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x12\n\nsize_bytes\x18\x02 \x01(\x03\"\xec\x04\n\x16\x45xecutedActionMetadata\x12\x0e\n\x06worker\x18\x01 \x01(\t\x12\x34\n\x10queued_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16worker_start_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12>\n\x1aworker_completed_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1binput_fetch_start_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x43\n\x1finput_fetch_completed_timestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12=\n\x19\x65xecution_start_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1d\x65xecution_completed_timestamp\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1doutput_upload_start_timestamp\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x45\n!output_upload_completed_timestamp\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb5\x03\n\x0c\x41\x63tionResult\x12\x41\n\x0coutput_files\x18\x02 \x03(\x0b\x32+.build.bazel.remote.execution.v2.OutputFile\x12L\n\x12output_directories\x18\x03 \x03(\x0b\x32\x30.build.bazel.remote.execution.v2.OutputDirectory\x12\x11\n\texit_code\x18\x04 \x01(\x05\x12\x12\n\nstdout_raw\x18\x05 \x01(\x0c\x12>\n\rstdout_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x12\n\nstderr_raw\x18\x07 \x01(\x0c\x12>\n\rstderr_digest\x18\x08 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12S\n\x12\x65xecution_metadata\x18\t \x01(\x0b\x32\x37.build.bazel.remote.execution.v2.ExecutedActionMetadataJ\x04\x08\x01\x10\x02\"p\n\nOutputFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"~\n\x04Tree\x12\x38\n\x04root\x18\x01 \x01(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12<\n\x08\x63hildren\x18\x02 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\"c\n\x0fOutputDirectory\x12\x0c\n\x04path\x18\x01 \x01(\t\x12<\n\x0btree_digest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.DigestJ\x04\x08\x02\x10\x03\"#\n\x0f\x45xecutionPolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"&\n\x12ResultsCachePolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"\xb3\x02\n\x0e\x45xecuteRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x19\n\x11skip_cache_lookup\x18\x03 \x01(\x08\x12>\n\raction_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12J\n\x10\x65xecution_policy\x18\x07 \x01(\x0b\x32\x30.build.bazel.remote.execution.v2.ExecutionPolicy\x12Q\n\x14results_cache_policy\x18\x08 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicyJ\x04\x08\x02\x10\x03J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"Z\n\x07LogFile\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x16\n\x0ehuman_readable\x18\x02 \x01(\x08\"\xbf\x02\n\x0f\x45xecuteResponse\x12=\n\x06result\x18\x01 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12\x15\n\rcached_result\x18\x02 \x01(\x08\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12U\n\x0bserver_logs\x18\x04 \x03(\x0b\x32@.build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry\x1a[\n\x0fServerLogsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x37\n\x05value\x18\x02 \x01(\x0b\x32(.build.bazel.remote.execution.v2.LogFile:\x02\x38\x01\"\xb3\x02\n\x18\x45xecuteOperationMetadata\x12N\n\x05stage\x18\x01 \x01(\x0e\x32?.build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x1a\n\x12stdout_stream_name\x18\x03 \x01(\t\x12\x1a\n\x12stderr_stream_name\x18\x04 \x01(\t\"O\n\x05Stage\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0f\n\x0b\x43\x41\x43HE_CHECK\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\r\n\tEXECUTING\x10\x03\x12\r\n\tCOMPLETED\x10\x04\"$\n\x14WaitExecutionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"o\n\x16GetActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x8b\x02\n\x19UpdateActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x44\n\raction_result\x18\x03 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12Q\n\x14results_cache_policy\x18\x04 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicy\"o\n\x17\x46indMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"a\n\x18\x46indMissingBlobsResponse\x12\x45\n\x14missing_blob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xd6\x01\n\x17\x42\x61tchUpdateBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12R\n\x08requests\x18\x02 \x03(\x0b\x32@.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request\x1aP\n\x07Request\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\xda\x01\n\x18\x42\x61tchUpdateBlobsResponse\x12U\n\tresponses\x18\x01 \x03(\x0b\x32\x42.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response\x1ag\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"h\n\x15\x42\x61tchReadBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x38\n\x07\x64igests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xe4\x01\n\x16\x42\x61tchReadBlobsResponse\x12S\n\tresponses\x18\x01 \x03(\x0b\x32@.build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response\x1au\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\"\x8c\x01\n\x0eGetTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\"k\n\x0fGetTreeResponse\x12?\n\x0b\x64irectories\x18\x01 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"/\n\x16GetCapabilitiesRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\xe3\x02\n\x12ServerCapabilities\x12N\n\x12\x63\x61\x63he_capabilities\x18\x01 \x01(\x0b\x32\x32.build.bazel.remote.execution.v2.CacheCapabilities\x12V\n\x16\x65xecution_capabilities\x18\x02 \x01(\x0b\x32\x36.build.bazel.remote.execution.v2.ExecutionCapabilities\x12:\n\x16\x64\x65precated_api_version\x18\x03 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x33\n\x0flow_api_version\x18\x04 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x34\n\x10high_api_version\x18\x05 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\"7\n\x1d\x41\x63tionCacheUpdateCapabilities\x12\x16\n\x0eupdate_enabled\x18\x01 \x01(\x08\"\xac\x01\n\x14PriorityCapabilities\x12W\n\npriorities\x18\x01 \x03(\x0b\x32\x43.build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange\x1a;\n\rPriorityRange\x12\x14\n\x0cmin_priority\x18\x01 \x01(\x05\x12\x14\n\x0cmax_priority\x18\x02 \x01(\x05\"\x88\x04\n\x11\x43\x61\x63heCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x03(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12h\n action_cache_update_capabilities\x18\x02 \x01(\x0b\x32>.build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities\x12Z\n\x1b\x63\x61\x63he_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\x12\"\n\x1amax_batch_total_size_bytes\x18\x04 \x01(\x03\x12v\n\x1esymlink_absolute_path_strategy\x18\x05 \x01(\x0e\x32N.build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy\"G\n\x1bSymlinkAbsolutePathStrategy\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0e\n\nDISALLOWED\x10\x01\x12\x0b\n\x07\x41LLOWED\x10\x02\"\xd7\x01\n\x15\x45xecutionCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x01(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12\x14\n\x0c\x65xec_enabled\x18\x02 \x01(\x08\x12^\n\x1f\x65xecution_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\"6\n\x0bToolDetails\x12\x11\n\ttool_name\x18\x01 \x01(\t\x12\x14\n\x0ctool_version\x18\x02 \x01(\t\"\xa7\x01\n\x0fRequestMetadata\x12\x42\n\x0ctool_details\x18\x01 \x01(\x0b\x32,.build.bazel.remote.execution.v2.ToolDetails\x12\x11\n\taction_id\x18\x02 \x01(\t\x12\x1a\n\x12tool_invocation_id\x18\x03 \x01(\t\x12!\n\x19\x63orrelated_invocations_id\x18\x04 \x01(\t*<\n\x0e\x44igestFunction\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06SHA256\x10\x01\x12\x08\n\x04SHA1\x10\x02\x12\x07\n\x03MD5\x10\x03\x32\xb9\x02\n\tExecution\x12\x8e\x01\n\x07\x45xecute\x12/.build.bazel.remote.execution.v2.ExecuteRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/actions:execute:\x01*0\x01\x12\x9a\x01\n\rWaitExecution\x12\x35.build.bazel.remote.execution.v2.WaitExecutionRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{name=operations/**}:waitExecution:\x01*0\x01\x32\xd6\x03\n\x0b\x41\x63tionCache\x12\xd7\x01\n\x0fGetActionResult\x12\x37.build.bazel.remote.execution.v2.GetActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"\\\x82\xd3\xe4\x93\x02V\x12T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}\x12\xec\x01\n\x12UpdateActionResult\x12:.build.bazel.remote.execution.v2.UpdateActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"k\x82\xd3\xe4\x93\x02\x65\x1aT/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result2\x9b\x06\n\x19\x43ontentAddressableStorage\x12\xbc\x01\n\x10\x46indMissingBlobs\x12\x38.build.bazel.remote.execution.v2.FindMissingBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.FindMissingBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:findMissing:\x01*\x12\xbc\x01\n\x10\x42\x61tchUpdateBlobs\x12\x38.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:batchUpdate:\x01*\x12\xb4\x01\n\x0e\x42\x61tchReadBlobs\x12\x36.build.bazel.remote.execution.v2.BatchReadBlobsRequest\x1a\x37.build.bazel.remote.execution.v2.BatchReadBlobsResponse\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/blobs:batchRead:\x01*\x12\xc8\x01\n\x07GetTree\x12/.build.bazel.remote.execution.v2.GetTreeRequest\x1a\x30.build.bazel.remote.execution.v2.GetTreeResponse\"X\x82\xd3\xe4\x93\x02R\x12P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree0\x01\x32\xbd\x01\n\x0c\x43\x61pabilities\x12\xac\x01\n\x0fGetCapabilities\x12\x37.build.bazel.remote.execution.v2.GetCapabilitiesRequest\x1a\x33.build.bazel.remote.execution.v2.ServerCapabilities\"+\x82\xd3\xe4\x93\x02%\x12#/v2/{instance_name=**}/capabilitiesBr\n\x1f\x62uild.bazel.remote.execution.v2B\x14RemoteExecutionProtoP\x01Z\x0fremoteexecution\xa2\x02\x03REX\xaa\x02\x1f\x42uild.Bazel.Remote.Execution.V2b\x06proto3')
+ ,
+ dependencies=[build_dot_bazel_dot_semver_dot_semver__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
+
+_DIGESTFUNCTION = _descriptor.EnumDescriptor(
+ name='DigestFunction',
+ full_name='build.bazel.remote.execution.v2.DigestFunction',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNKNOWN', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SHA256', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SHA1', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='MD5', index=3, number=3,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=7213,
+ serialized_end=7273,
+)
+_sym_db.RegisterEnumDescriptor(_DIGESTFUNCTION)
+
+DigestFunction = enum_type_wrapper.EnumTypeWrapper(_DIGESTFUNCTION)
+UNKNOWN = 0
+SHA256 = 1
+SHA1 = 2
+MD5 = 3
+
+
+_EXECUTEOPERATIONMETADATA_STAGE = _descriptor.EnumDescriptor(
+ name='Stage',
+ full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNKNOWN', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CACHE_CHECK', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='QUEUED', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='EXECUTING', index=3, number=3,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='COMPLETED', index=4, number=4,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=3866,
+ serialized_end=3945,
+)
+_sym_db.RegisterEnumDescriptor(_EXECUTEOPERATIONMETADATA_STAGE)
+
+_CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY = _descriptor.EnumDescriptor(
+ name='SymlinkAbsolutePathStrategy',
+ full_name='build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UNKNOWN', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='DISALLOWED', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='ALLOWED', index=2, number=2,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=6696,
+ serialized_end=6767,
+)
+_sym_db.RegisterEnumDescriptor(_CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY)
+
+
+_ACTION = _descriptor.Descriptor(
+ name='Action',
+ full_name='build.bazel.remote.execution.v2.Action',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='command_digest', full_name='build.bazel.remote.execution.v2.Action.command_digest', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='input_root_digest', full_name='build.bazel.remote.execution.v2.Action.input_root_digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='timeout', full_name='build.bazel.remote.execution.v2.Action.timeout', index=2,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='do_not_cache', full_name='build.bazel.remote.execution.v2.Action.do_not_cache', index=3,
+ number=7, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=282,
+ serialized_end=495,
+)
+
+
+_COMMAND_ENVIRONMENTVARIABLE = _descriptor.Descriptor(
+ name='EnvironmentVariable',
+ full_name='build.bazel.remote.execution.v2.Command.EnvironmentVariable',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.Command.EnvironmentVariable.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='build.bazel.remote.execution.v2.Command.EnvironmentVariable.value', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=759,
+ serialized_end=809,
+)
+
+_COMMAND = _descriptor.Descriptor(
+ name='Command',
+ full_name='build.bazel.remote.execution.v2.Command',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='arguments', full_name='build.bazel.remote.execution.v2.Command.arguments', index=0,
+ number=1, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='environment_variables', full_name='build.bazel.remote.execution.v2.Command.environment_variables', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='output_files', full_name='build.bazel.remote.execution.v2.Command.output_files', index=2,
+ number=3, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='output_directories', full_name='build.bazel.remote.execution.v2.Command.output_directories', index=3,
+ number=4, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='platform', full_name='build.bazel.remote.execution.v2.Command.platform', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='working_directory', full_name='build.bazel.remote.execution.v2.Command.working_directory', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_COMMAND_ENVIRONMENTVARIABLE, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=498,
+ serialized_end=809,
+)
+
+
+_PLATFORM_PROPERTY = _descriptor.Descriptor(
+ name='Property',
+ full_name='build.bazel.remote.execution.v2.Platform.Property',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.Platform.Property.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='build.bazel.remote.execution.v2.Platform.Property.value', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=895,
+ serialized_end=934,
+)
+
+_PLATFORM = _descriptor.Descriptor(
+ name='Platform',
+ full_name='build.bazel.remote.execution.v2.Platform',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='properties', full_name='build.bazel.remote.execution.v2.Platform.properties', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_PLATFORM_PROPERTY, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=811,
+ serialized_end=934,
+)
+
+
+_DIRECTORY = _descriptor.Descriptor(
+ name='Directory',
+ full_name='build.bazel.remote.execution.v2.Directory',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='files', full_name='build.bazel.remote.execution.v2.Directory.files', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='directories', full_name='build.bazel.remote.execution.v2.Directory.directories', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='symlinks', full_name='build.bazel.remote.execution.v2.Directory.symlinks', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=937,
+ serialized_end=1139,
+)
+
+
+_FILENODE = _descriptor.Descriptor(
+ name='FileNode',
+ full_name='build.bazel.remote.execution.v2.FileNode',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.FileNode.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.FileNode.digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='is_executable', full_name='build.bazel.remote.execution.v2.FileNode.is_executable', index=2,
+ number=4, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1141,
+ serialized_end=1251,
+)
+
+
+_DIRECTORYNODE = _descriptor.Descriptor(
+ name='DirectoryNode',
+ full_name='build.bazel.remote.execution.v2.DirectoryNode',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.DirectoryNode.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.DirectoryNode.digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1253,
+ serialized_end=1339,
+)
+
+
+_SYMLINKNODE = _descriptor.Descriptor(
+ name='SymlinkNode',
+ full_name='build.bazel.remote.execution.v2.SymlinkNode',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.SymlinkNode.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='target', full_name='build.bazel.remote.execution.v2.SymlinkNode.target', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1341,
+ serialized_end=1384,
+)
+
+
+_DIGEST = _descriptor.Descriptor(
+ name='Digest',
+ full_name='build.bazel.remote.execution.v2.Digest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='hash', full_name='build.bazel.remote.execution.v2.Digest.hash', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='size_bytes', full_name='build.bazel.remote.execution.v2.Digest.size_bytes', index=1,
+ number=2, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1386,
+ serialized_end=1428,
+)
+
+
+_EXECUTEDACTIONMETADATA = _descriptor.Descriptor(
+ name='ExecutedActionMetadata',
+ full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='worker', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.worker', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='queued_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.queued_timestamp', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='worker_start_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.worker_start_timestamp', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='worker_completed_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.worker_completed_timestamp', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='input_fetch_start_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.input_fetch_start_timestamp', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='input_fetch_completed_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.input_fetch_completed_timestamp', index=5,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_start_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.execution_start_timestamp', index=6,
+ number=7, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_completed_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.execution_completed_timestamp', index=7,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='output_upload_start_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.output_upload_start_timestamp', index=8,
+ number=9, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='output_upload_completed_timestamp', full_name='build.bazel.remote.execution.v2.ExecutedActionMetadata.output_upload_completed_timestamp', index=9,
+ number=10, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1431,
+ serialized_end=2051,
+)
+
+
+_ACTIONRESULT = _descriptor.Descriptor(
+ name='ActionResult',
+ full_name='build.bazel.remote.execution.v2.ActionResult',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='output_files', full_name='build.bazel.remote.execution.v2.ActionResult.output_files', index=0,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='output_directories', full_name='build.bazel.remote.execution.v2.ActionResult.output_directories', index=1,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='exit_code', full_name='build.bazel.remote.execution.v2.ActionResult.exit_code', index=2,
+ number=4, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stdout_raw', full_name='build.bazel.remote.execution.v2.ActionResult.stdout_raw', index=3,
+ number=5, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stdout_digest', full_name='build.bazel.remote.execution.v2.ActionResult.stdout_digest', index=4,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stderr_raw', full_name='build.bazel.remote.execution.v2.ActionResult.stderr_raw', index=5,
+ number=7, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stderr_digest', full_name='build.bazel.remote.execution.v2.ActionResult.stderr_digest', index=6,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_metadata', full_name='build.bazel.remote.execution.v2.ActionResult.execution_metadata', index=7,
+ number=9, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2054,
+ serialized_end=2491,
+)
+
+
+_OUTPUTFILE = _descriptor.Descriptor(
+ name='OutputFile',
+ full_name='build.bazel.remote.execution.v2.OutputFile',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='path', full_name='build.bazel.remote.execution.v2.OutputFile.path', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.OutputFile.digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='is_executable', full_name='build.bazel.remote.execution.v2.OutputFile.is_executable', index=2,
+ number=4, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2493,
+ serialized_end=2605,
+)
+
+
+_TREE = _descriptor.Descriptor(
+ name='Tree',
+ full_name='build.bazel.remote.execution.v2.Tree',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='root', full_name='build.bazel.remote.execution.v2.Tree.root', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='children', full_name='build.bazel.remote.execution.v2.Tree.children', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2607,
+ serialized_end=2733,
+)
+
+
+_OUTPUTDIRECTORY = _descriptor.Descriptor(
+ name='OutputDirectory',
+ full_name='build.bazel.remote.execution.v2.OutputDirectory',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='path', full_name='build.bazel.remote.execution.v2.OutputDirectory.path', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='tree_digest', full_name='build.bazel.remote.execution.v2.OutputDirectory.tree_digest', index=1,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2735,
+ serialized_end=2834,
+)
+
+
+_EXECUTIONPOLICY = _descriptor.Descriptor(
+ name='ExecutionPolicy',
+ full_name='build.bazel.remote.execution.v2.ExecutionPolicy',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='priority', full_name='build.bazel.remote.execution.v2.ExecutionPolicy.priority', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2836,
+ serialized_end=2871,
+)
+
+
+_RESULTSCACHEPOLICY = _descriptor.Descriptor(
+ name='ResultsCachePolicy',
+ full_name='build.bazel.remote.execution.v2.ResultsCachePolicy',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='priority', full_name='build.bazel.remote.execution.v2.ResultsCachePolicy.priority', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2873,
+ serialized_end=2911,
+)
+
+
+_EXECUTEREQUEST = _descriptor.Descriptor(
+ name='ExecuteRequest',
+ full_name='build.bazel.remote.execution.v2.ExecuteRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.ExecuteRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='skip_cache_lookup', full_name='build.bazel.remote.execution.v2.ExecuteRequest.skip_cache_lookup', index=1,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_digest', full_name='build.bazel.remote.execution.v2.ExecuteRequest.action_digest', index=2,
+ number=6, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_policy', full_name='build.bazel.remote.execution.v2.ExecuteRequest.execution_policy', index=3,
+ number=7, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='results_cache_policy', full_name='build.bazel.remote.execution.v2.ExecuteRequest.results_cache_policy', index=4,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2914,
+ serialized_end=3221,
+)
+
+
+_LOGFILE = _descriptor.Descriptor(
+ name='LogFile',
+ full_name='build.bazel.remote.execution.v2.LogFile',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.LogFile.digest', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='human_readable', full_name='build.bazel.remote.execution.v2.LogFile.human_readable', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3223,
+ serialized_end=3313,
+)
+
+
+_EXECUTERESPONSE_SERVERLOGSENTRY = _descriptor.Descriptor(
+ name='ServerLogsEntry',
+ full_name='build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='key', full_name='build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry.key', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry.value', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3544,
+ serialized_end=3635,
+)
+
+_EXECUTERESPONSE = _descriptor.Descriptor(
+ name='ExecuteResponse',
+ full_name='build.bazel.remote.execution.v2.ExecuteResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='result', full_name='build.bazel.remote.execution.v2.ExecuteResponse.result', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='cached_result', full_name='build.bazel.remote.execution.v2.ExecuteResponse.cached_result', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='status', full_name='build.bazel.remote.execution.v2.ExecuteResponse.status', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='server_logs', full_name='build.bazel.remote.execution.v2.ExecuteResponse.server_logs', index=3,
+ number=4, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_EXECUTERESPONSE_SERVERLOGSENTRY, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3316,
+ serialized_end=3635,
+)
+
+
+_EXECUTEOPERATIONMETADATA = _descriptor.Descriptor(
+ name='ExecuteOperationMetadata',
+ full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='stage', full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.stage', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_digest', full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.action_digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stdout_stream_name', full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.stdout_stream_name', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='stderr_stream_name', full_name='build.bazel.remote.execution.v2.ExecuteOperationMetadata.stderr_stream_name', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _EXECUTEOPERATIONMETADATA_STAGE,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3638,
+ serialized_end=3945,
+)
+
+
+_WAITEXECUTIONREQUEST = _descriptor.Descriptor(
+ name='WaitExecutionRequest',
+ full_name='build.bazel.remote.execution.v2.WaitExecutionRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='build.bazel.remote.execution.v2.WaitExecutionRequest.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3947,
+ serialized_end=3983,
+)
+
+
+_GETACTIONRESULTREQUEST = _descriptor.Descriptor(
+ name='GetActionResultRequest',
+ full_name='build.bazel.remote.execution.v2.GetActionResultRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.GetActionResultRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_digest', full_name='build.bazel.remote.execution.v2.GetActionResultRequest.action_digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=3985,
+ serialized_end=4096,
+)
+
+
+_UPDATEACTIONRESULTREQUEST = _descriptor.Descriptor(
+ name='UpdateActionResultRequest',
+ full_name='build.bazel.remote.execution.v2.UpdateActionResultRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.UpdateActionResultRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_digest', full_name='build.bazel.remote.execution.v2.UpdateActionResultRequest.action_digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_result', full_name='build.bazel.remote.execution.v2.UpdateActionResultRequest.action_result', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='results_cache_policy', full_name='build.bazel.remote.execution.v2.UpdateActionResultRequest.results_cache_policy', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4099,
+ serialized_end=4366,
+)
+
+
+_FINDMISSINGBLOBSREQUEST = _descriptor.Descriptor(
+ name='FindMissingBlobsRequest',
+ full_name='build.bazel.remote.execution.v2.FindMissingBlobsRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.FindMissingBlobsRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='blob_digests', full_name='build.bazel.remote.execution.v2.FindMissingBlobsRequest.blob_digests', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4368,
+ serialized_end=4479,
+)
+
+
+_FINDMISSINGBLOBSRESPONSE = _descriptor.Descriptor(
+ name='FindMissingBlobsResponse',
+ full_name='build.bazel.remote.execution.v2.FindMissingBlobsResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='missing_blob_digests', full_name='build.bazel.remote.execution.v2.FindMissingBlobsResponse.missing_blob_digests', index=0,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4481,
+ serialized_end=4578,
+)
+
+
+_BATCHUPDATEBLOBSREQUEST_REQUEST = _descriptor.Descriptor(
+ name='Request',
+ full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request.digest', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='data', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request.data', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4715,
+ serialized_end=4795,
+)
+
+_BATCHUPDATEBLOBSREQUEST = _descriptor.Descriptor(
+ name='BatchUpdateBlobsRequest',
+ full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='requests', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.requests', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_BATCHUPDATEBLOBSREQUEST_REQUEST, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4581,
+ serialized_end=4795,
+)
+
+
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE = _descriptor.Descriptor(
+ name='Response',
+ full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response.digest', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='status', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response.status', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4913,
+ serialized_end=5016,
+)
+
+_BATCHUPDATEBLOBSRESPONSE = _descriptor.Descriptor(
+ name='BatchUpdateBlobsResponse',
+ full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='responses', full_name='build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.responses', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_BATCHUPDATEBLOBSRESPONSE_RESPONSE, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=4798,
+ serialized_end=5016,
+)
+
+
+_BATCHREADBLOBSREQUEST = _descriptor.Descriptor(
+ name='BatchReadBlobsRequest',
+ full_name='build.bazel.remote.execution.v2.BatchReadBlobsRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.BatchReadBlobsRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='digests', full_name='build.bazel.remote.execution.v2.BatchReadBlobsRequest.digests', index=1,
+ number=2, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5018,
+ serialized_end=5122,
+)
+
+
+_BATCHREADBLOBSRESPONSE_RESPONSE = _descriptor.Descriptor(
+ name='Response',
+ full_name='build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response.digest', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='data', full_name='build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response.data', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='status', full_name='build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response.status', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5236,
+ serialized_end=5353,
+)
+
+_BATCHREADBLOBSRESPONSE = _descriptor.Descriptor(
+ name='BatchReadBlobsResponse',
+ full_name='build.bazel.remote.execution.v2.BatchReadBlobsResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='responses', full_name='build.bazel.remote.execution.v2.BatchReadBlobsResponse.responses', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_BATCHREADBLOBSRESPONSE_RESPONSE, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5125,
+ serialized_end=5353,
+)
+
+
+_GETTREEREQUEST = _descriptor.Descriptor(
+ name='GetTreeRequest',
+ full_name='build.bazel.remote.execution.v2.GetTreeRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.GetTreeRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='root_digest', full_name='build.bazel.remote.execution.v2.GetTreeRequest.root_digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_size', full_name='build.bazel.remote.execution.v2.GetTreeRequest.page_size', index=2,
+ number=3, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_token', full_name='build.bazel.remote.execution.v2.GetTreeRequest.page_token', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5356,
+ serialized_end=5496,
+)
+
+
+_GETTREERESPONSE = _descriptor.Descriptor(
+ name='GetTreeResponse',
+ full_name='build.bazel.remote.execution.v2.GetTreeResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='directories', full_name='build.bazel.remote.execution.v2.GetTreeResponse.directories', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='next_page_token', full_name='build.bazel.remote.execution.v2.GetTreeResponse.next_page_token', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5498,
+ serialized_end=5605,
+)
+
+
+_GETCAPABILITIESREQUEST = _descriptor.Descriptor(
+ name='GetCapabilitiesRequest',
+ full_name='build.bazel.remote.execution.v2.GetCapabilitiesRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='build.bazel.remote.execution.v2.GetCapabilitiesRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5607,
+ serialized_end=5654,
+)
+
+
+_SERVERCAPABILITIES = _descriptor.Descriptor(
+ name='ServerCapabilities',
+ full_name='build.bazel.remote.execution.v2.ServerCapabilities',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='cache_capabilities', full_name='build.bazel.remote.execution.v2.ServerCapabilities.cache_capabilities', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_capabilities', full_name='build.bazel.remote.execution.v2.ServerCapabilities.execution_capabilities', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='deprecated_api_version', full_name='build.bazel.remote.execution.v2.ServerCapabilities.deprecated_api_version', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='low_api_version', full_name='build.bazel.remote.execution.v2.ServerCapabilities.low_api_version', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='high_api_version', full_name='build.bazel.remote.execution.v2.ServerCapabilities.high_api_version', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=5657,
+ serialized_end=6012,
+)
+
+
+_ACTIONCACHEUPDATECAPABILITIES = _descriptor.Descriptor(
+ name='ActionCacheUpdateCapabilities',
+ full_name='build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='update_enabled', full_name='build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities.update_enabled', index=0,
+ number=1, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=6014,
+ serialized_end=6069,
+)
+
+
+_PRIORITYCAPABILITIES_PRIORITYRANGE = _descriptor.Descriptor(
+ name='PriorityRange',
+ full_name='build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='min_priority', full_name='build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange.min_priority', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='max_priority', full_name='build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange.max_priority', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=6185,
+ serialized_end=6244,
+)
+
+_PRIORITYCAPABILITIES = _descriptor.Descriptor(
+ name='PriorityCapabilities',
+ full_name='build.bazel.remote.execution.v2.PriorityCapabilities',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='priorities', full_name='build.bazel.remote.execution.v2.PriorityCapabilities.priorities', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_PRIORITYCAPABILITIES_PRIORITYRANGE, ],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=6072,
+ serialized_end=6244,
+)
+
+
+_CACHECAPABILITIES = _descriptor.Descriptor(
+ name='CacheCapabilities',
+ full_name='build.bazel.remote.execution.v2.CacheCapabilities',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='digest_function', full_name='build.bazel.remote.execution.v2.CacheCapabilities.digest_function', index=0,
+ number=1, type=14, cpp_type=8, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_cache_update_capabilities', full_name='build.bazel.remote.execution.v2.CacheCapabilities.action_cache_update_capabilities', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='cache_priority_capabilities', full_name='build.bazel.remote.execution.v2.CacheCapabilities.cache_priority_capabilities', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='max_batch_total_size_bytes', full_name='build.bazel.remote.execution.v2.CacheCapabilities.max_batch_total_size_bytes', index=3,
+ number=4, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='symlink_absolute_path_strategy', full_name='build.bazel.remote.execution.v2.CacheCapabilities.symlink_absolute_path_strategy', index=4,
+ number=5, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ _CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY,
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=6247,
+ serialized_end=6767,
+)
+
+
+_EXECUTIONCAPABILITIES = _descriptor.Descriptor(
+ name='ExecutionCapabilities',
+ full_name='build.bazel.remote.execution.v2.ExecutionCapabilities',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='digest_function', full_name='build.bazel.remote.execution.v2.ExecutionCapabilities.digest_function', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='exec_enabled', full_name='build.bazel.remote.execution.v2.ExecutionCapabilities.exec_enabled', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='execution_priority_capabilities', full_name='build.bazel.remote.execution.v2.ExecutionCapabilities.execution_priority_capabilities', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=6770,
+ serialized_end=6985,
+)
+
+
+_TOOLDETAILS = _descriptor.Descriptor(
+ name='ToolDetails',
+ full_name='build.bazel.remote.execution.v2.ToolDetails',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='tool_name', full_name='build.bazel.remote.execution.v2.ToolDetails.tool_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='tool_version', full_name='build.bazel.remote.execution.v2.ToolDetails.tool_version', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=6987,
+ serialized_end=7041,
+)
+
+
+_REQUESTMETADATA = _descriptor.Descriptor(
+ name='RequestMetadata',
+ full_name='build.bazel.remote.execution.v2.RequestMetadata',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='tool_details', full_name='build.bazel.remote.execution.v2.RequestMetadata.tool_details', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='action_id', full_name='build.bazel.remote.execution.v2.RequestMetadata.action_id', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='tool_invocation_id', full_name='build.bazel.remote.execution.v2.RequestMetadata.tool_invocation_id', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='correlated_invocations_id', full_name='build.bazel.remote.execution.v2.RequestMetadata.correlated_invocations_id', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=7044,
+ serialized_end=7211,
+)
+
+_ACTION.fields_by_name['command_digest'].message_type = _DIGEST
+_ACTION.fields_by_name['input_root_digest'].message_type = _DIGEST
+_ACTION.fields_by_name['timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
+_COMMAND_ENVIRONMENTVARIABLE.containing_type = _COMMAND
+_COMMAND.fields_by_name['environment_variables'].message_type = _COMMAND_ENVIRONMENTVARIABLE
+_COMMAND.fields_by_name['platform'].message_type = _PLATFORM
+_PLATFORM_PROPERTY.containing_type = _PLATFORM
+_PLATFORM.fields_by_name['properties'].message_type = _PLATFORM_PROPERTY
+_DIRECTORY.fields_by_name['files'].message_type = _FILENODE
+_DIRECTORY.fields_by_name['directories'].message_type = _DIRECTORYNODE
+_DIRECTORY.fields_by_name['symlinks'].message_type = _SYMLINKNODE
+_FILENODE.fields_by_name['digest'].message_type = _DIGEST
+_DIRECTORYNODE.fields_by_name['digest'].message_type = _DIGEST
+_EXECUTEDACTIONMETADATA.fields_by_name['queued_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['worker_start_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['worker_completed_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['input_fetch_start_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['input_fetch_completed_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['execution_start_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['execution_completed_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['output_upload_start_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_EXECUTEDACTIONMETADATA.fields_by_name['output_upload_completed_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
+_ACTIONRESULT.fields_by_name['output_files'].message_type = _OUTPUTFILE
+_ACTIONRESULT.fields_by_name['output_directories'].message_type = _OUTPUTDIRECTORY
+_ACTIONRESULT.fields_by_name['stdout_digest'].message_type = _DIGEST
+_ACTIONRESULT.fields_by_name['stderr_digest'].message_type = _DIGEST
+_ACTIONRESULT.fields_by_name['execution_metadata'].message_type = _EXECUTEDACTIONMETADATA
+_OUTPUTFILE.fields_by_name['digest'].message_type = _DIGEST
+_TREE.fields_by_name['root'].message_type = _DIRECTORY
+_TREE.fields_by_name['children'].message_type = _DIRECTORY
+_OUTPUTDIRECTORY.fields_by_name['tree_digest'].message_type = _DIGEST
+_EXECUTEREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
+_EXECUTEREQUEST.fields_by_name['execution_policy'].message_type = _EXECUTIONPOLICY
+_EXECUTEREQUEST.fields_by_name['results_cache_policy'].message_type = _RESULTSCACHEPOLICY
+_LOGFILE.fields_by_name['digest'].message_type = _DIGEST
+_EXECUTERESPONSE_SERVERLOGSENTRY.fields_by_name['value'].message_type = _LOGFILE
+_EXECUTERESPONSE_SERVERLOGSENTRY.containing_type = _EXECUTERESPONSE
+_EXECUTERESPONSE.fields_by_name['result'].message_type = _ACTIONRESULT
+_EXECUTERESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_EXECUTERESPONSE.fields_by_name['server_logs'].message_type = _EXECUTERESPONSE_SERVERLOGSENTRY
+_EXECUTEOPERATIONMETADATA.fields_by_name['stage'].enum_type = _EXECUTEOPERATIONMETADATA_STAGE
+_EXECUTEOPERATIONMETADATA.fields_by_name['action_digest'].message_type = _DIGEST
+_EXECUTEOPERATIONMETADATA_STAGE.containing_type = _EXECUTEOPERATIONMETADATA
+_GETACTIONRESULTREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
+_UPDATEACTIONRESULTREQUEST.fields_by_name['action_digest'].message_type = _DIGEST
+_UPDATEACTIONRESULTREQUEST.fields_by_name['action_result'].message_type = _ACTIONRESULT
+_UPDATEACTIONRESULTREQUEST.fields_by_name['results_cache_policy'].message_type = _RESULTSCACHEPOLICY
+_FINDMISSINGBLOBSREQUEST.fields_by_name['blob_digests'].message_type = _DIGEST
+_FINDMISSINGBLOBSRESPONSE.fields_by_name['missing_blob_digests'].message_type = _DIGEST
+_BATCHUPDATEBLOBSREQUEST_REQUEST.fields_by_name['digest'].message_type = _DIGEST
+_BATCHUPDATEBLOBSREQUEST_REQUEST.containing_type = _BATCHUPDATEBLOBSREQUEST
+_BATCHUPDATEBLOBSREQUEST.fields_by_name['requests'].message_type = _BATCHUPDATEBLOBSREQUEST_REQUEST
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE.fields_by_name['digest'].message_type = _DIGEST
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_BATCHUPDATEBLOBSRESPONSE_RESPONSE.containing_type = _BATCHUPDATEBLOBSRESPONSE
+_BATCHUPDATEBLOBSRESPONSE.fields_by_name['responses'].message_type = _BATCHUPDATEBLOBSRESPONSE_RESPONSE
+_BATCHREADBLOBSREQUEST.fields_by_name['digests'].message_type = _DIGEST
+_BATCHREADBLOBSRESPONSE_RESPONSE.fields_by_name['digest'].message_type = _DIGEST
+_BATCHREADBLOBSRESPONSE_RESPONSE.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_BATCHREADBLOBSRESPONSE_RESPONSE.containing_type = _BATCHREADBLOBSRESPONSE
+_BATCHREADBLOBSRESPONSE.fields_by_name['responses'].message_type = _BATCHREADBLOBSRESPONSE_RESPONSE
+_GETTREEREQUEST.fields_by_name['root_digest'].message_type = _DIGEST
+_GETTREERESPONSE.fields_by_name['directories'].message_type = _DIRECTORY
+_SERVERCAPABILITIES.fields_by_name['cache_capabilities'].message_type = _CACHECAPABILITIES
+_SERVERCAPABILITIES.fields_by_name['execution_capabilities'].message_type = _EXECUTIONCAPABILITIES
+_SERVERCAPABILITIES.fields_by_name['deprecated_api_version'].message_type = build_dot_bazel_dot_semver_dot_semver__pb2._SEMVER
+_SERVERCAPABILITIES.fields_by_name['low_api_version'].message_type = build_dot_bazel_dot_semver_dot_semver__pb2._SEMVER
+_SERVERCAPABILITIES.fields_by_name['high_api_version'].message_type = build_dot_bazel_dot_semver_dot_semver__pb2._SEMVER
+_PRIORITYCAPABILITIES_PRIORITYRANGE.containing_type = _PRIORITYCAPABILITIES
+_PRIORITYCAPABILITIES.fields_by_name['priorities'].message_type = _PRIORITYCAPABILITIES_PRIORITYRANGE
+_CACHECAPABILITIES.fields_by_name['digest_function'].enum_type = _DIGESTFUNCTION
+_CACHECAPABILITIES.fields_by_name['action_cache_update_capabilities'].message_type = _ACTIONCACHEUPDATECAPABILITIES
+_CACHECAPABILITIES.fields_by_name['cache_priority_capabilities'].message_type = _PRIORITYCAPABILITIES
+_CACHECAPABILITIES.fields_by_name['symlink_absolute_path_strategy'].enum_type = _CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY
+_CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY.containing_type = _CACHECAPABILITIES
+_EXECUTIONCAPABILITIES.fields_by_name['digest_function'].enum_type = _DIGESTFUNCTION
+_EXECUTIONCAPABILITIES.fields_by_name['execution_priority_capabilities'].message_type = _PRIORITYCAPABILITIES
+_REQUESTMETADATA.fields_by_name['tool_details'].message_type = _TOOLDETAILS
+DESCRIPTOR.message_types_by_name['Action'] = _ACTION
+DESCRIPTOR.message_types_by_name['Command'] = _COMMAND
+DESCRIPTOR.message_types_by_name['Platform'] = _PLATFORM
+DESCRIPTOR.message_types_by_name['Directory'] = _DIRECTORY
+DESCRIPTOR.message_types_by_name['FileNode'] = _FILENODE
+DESCRIPTOR.message_types_by_name['DirectoryNode'] = _DIRECTORYNODE
+DESCRIPTOR.message_types_by_name['SymlinkNode'] = _SYMLINKNODE
+DESCRIPTOR.message_types_by_name['Digest'] = _DIGEST
+DESCRIPTOR.message_types_by_name['ExecutedActionMetadata'] = _EXECUTEDACTIONMETADATA
+DESCRIPTOR.message_types_by_name['ActionResult'] = _ACTIONRESULT
+DESCRIPTOR.message_types_by_name['OutputFile'] = _OUTPUTFILE
+DESCRIPTOR.message_types_by_name['Tree'] = _TREE
+DESCRIPTOR.message_types_by_name['OutputDirectory'] = _OUTPUTDIRECTORY
+DESCRIPTOR.message_types_by_name['ExecutionPolicy'] = _EXECUTIONPOLICY
+DESCRIPTOR.message_types_by_name['ResultsCachePolicy'] = _RESULTSCACHEPOLICY
+DESCRIPTOR.message_types_by_name['ExecuteRequest'] = _EXECUTEREQUEST
+DESCRIPTOR.message_types_by_name['LogFile'] = _LOGFILE
+DESCRIPTOR.message_types_by_name['ExecuteResponse'] = _EXECUTERESPONSE
+DESCRIPTOR.message_types_by_name['ExecuteOperationMetadata'] = _EXECUTEOPERATIONMETADATA
+DESCRIPTOR.message_types_by_name['WaitExecutionRequest'] = _WAITEXECUTIONREQUEST
+DESCRIPTOR.message_types_by_name['GetActionResultRequest'] = _GETACTIONRESULTREQUEST
+DESCRIPTOR.message_types_by_name['UpdateActionResultRequest'] = _UPDATEACTIONRESULTREQUEST
+DESCRIPTOR.message_types_by_name['FindMissingBlobsRequest'] = _FINDMISSINGBLOBSREQUEST
+DESCRIPTOR.message_types_by_name['FindMissingBlobsResponse'] = _FINDMISSINGBLOBSRESPONSE
+DESCRIPTOR.message_types_by_name['BatchUpdateBlobsRequest'] = _BATCHUPDATEBLOBSREQUEST
+DESCRIPTOR.message_types_by_name['BatchUpdateBlobsResponse'] = _BATCHUPDATEBLOBSRESPONSE
+DESCRIPTOR.message_types_by_name['BatchReadBlobsRequest'] = _BATCHREADBLOBSREQUEST
+DESCRIPTOR.message_types_by_name['BatchReadBlobsResponse'] = _BATCHREADBLOBSRESPONSE
+DESCRIPTOR.message_types_by_name['GetTreeRequest'] = _GETTREEREQUEST
+DESCRIPTOR.message_types_by_name['GetTreeResponse'] = _GETTREERESPONSE
+DESCRIPTOR.message_types_by_name['GetCapabilitiesRequest'] = _GETCAPABILITIESREQUEST
+DESCRIPTOR.message_types_by_name['ServerCapabilities'] = _SERVERCAPABILITIES
+DESCRIPTOR.message_types_by_name['ActionCacheUpdateCapabilities'] = _ACTIONCACHEUPDATECAPABILITIES
+DESCRIPTOR.message_types_by_name['PriorityCapabilities'] = _PRIORITYCAPABILITIES
+DESCRIPTOR.message_types_by_name['CacheCapabilities'] = _CACHECAPABILITIES
+DESCRIPTOR.message_types_by_name['ExecutionCapabilities'] = _EXECUTIONCAPABILITIES
+DESCRIPTOR.message_types_by_name['ToolDetails'] = _TOOLDETAILS
+DESCRIPTOR.message_types_by_name['RequestMetadata'] = _REQUESTMETADATA
+DESCRIPTOR.enum_types_by_name['DigestFunction'] = _DIGESTFUNCTION
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Action = _reflection.GeneratedProtocolMessageType('Action', (_message.Message,), dict(
+ DESCRIPTOR = _ACTION,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Action)
+ ))
+_sym_db.RegisterMessage(Action)
+
+Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), dict(
+
+ EnvironmentVariable = _reflection.GeneratedProtocolMessageType('EnvironmentVariable', (_message.Message,), dict(
+ DESCRIPTOR = _COMMAND_ENVIRONMENTVARIABLE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Command.EnvironmentVariable)
+ ))
+ ,
+ DESCRIPTOR = _COMMAND,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Command)
+ ))
+_sym_db.RegisterMessage(Command)
+_sym_db.RegisterMessage(Command.EnvironmentVariable)
+
+Platform = _reflection.GeneratedProtocolMessageType('Platform', (_message.Message,), dict(
+
+ Property = _reflection.GeneratedProtocolMessageType('Property', (_message.Message,), dict(
+ DESCRIPTOR = _PLATFORM_PROPERTY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Platform.Property)
+ ))
+ ,
+ DESCRIPTOR = _PLATFORM,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Platform)
+ ))
+_sym_db.RegisterMessage(Platform)
+_sym_db.RegisterMessage(Platform.Property)
+
+Directory = _reflection.GeneratedProtocolMessageType('Directory', (_message.Message,), dict(
+ DESCRIPTOR = _DIRECTORY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Directory)
+ ))
+_sym_db.RegisterMessage(Directory)
+
+FileNode = _reflection.GeneratedProtocolMessageType('FileNode', (_message.Message,), dict(
+ DESCRIPTOR = _FILENODE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FileNode)
+ ))
+_sym_db.RegisterMessage(FileNode)
+
+DirectoryNode = _reflection.GeneratedProtocolMessageType('DirectoryNode', (_message.Message,), dict(
+ DESCRIPTOR = _DIRECTORYNODE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.DirectoryNode)
+ ))
+_sym_db.RegisterMessage(DirectoryNode)
+
+SymlinkNode = _reflection.GeneratedProtocolMessageType('SymlinkNode', (_message.Message,), dict(
+ DESCRIPTOR = _SYMLINKNODE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.SymlinkNode)
+ ))
+_sym_db.RegisterMessage(SymlinkNode)
+
+Digest = _reflection.GeneratedProtocolMessageType('Digest', (_message.Message,), dict(
+ DESCRIPTOR = _DIGEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Digest)
+ ))
+_sym_db.RegisterMessage(Digest)
+
+ExecutedActionMetadata = _reflection.GeneratedProtocolMessageType('ExecutedActionMetadata', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTEDACTIONMETADATA,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutedActionMetadata)
+ ))
+_sym_db.RegisterMessage(ExecutedActionMetadata)
+
+ActionResult = _reflection.GeneratedProtocolMessageType('ActionResult', (_message.Message,), dict(
+ DESCRIPTOR = _ACTIONRESULT,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ActionResult)
+ ))
+_sym_db.RegisterMessage(ActionResult)
+
+OutputFile = _reflection.GeneratedProtocolMessageType('OutputFile', (_message.Message,), dict(
+ DESCRIPTOR = _OUTPUTFILE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.OutputFile)
+ ))
+_sym_db.RegisterMessage(OutputFile)
+
+Tree = _reflection.GeneratedProtocolMessageType('Tree', (_message.Message,), dict(
+ DESCRIPTOR = _TREE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Tree)
+ ))
+_sym_db.RegisterMessage(Tree)
+
+OutputDirectory = _reflection.GeneratedProtocolMessageType('OutputDirectory', (_message.Message,), dict(
+ DESCRIPTOR = _OUTPUTDIRECTORY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.OutputDirectory)
+ ))
+_sym_db.RegisterMessage(OutputDirectory)
+
+ExecutionPolicy = _reflection.GeneratedProtocolMessageType('ExecutionPolicy', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTIONPOLICY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutionPolicy)
+ ))
+_sym_db.RegisterMessage(ExecutionPolicy)
+
+ResultsCachePolicy = _reflection.GeneratedProtocolMessageType('ResultsCachePolicy', (_message.Message,), dict(
+ DESCRIPTOR = _RESULTSCACHEPOLICY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ResultsCachePolicy)
+ ))
+_sym_db.RegisterMessage(ResultsCachePolicy)
+
+ExecuteRequest = _reflection.GeneratedProtocolMessageType('ExecuteRequest', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTEREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteRequest)
+ ))
+_sym_db.RegisterMessage(ExecuteRequest)
+
+LogFile = _reflection.GeneratedProtocolMessageType('LogFile', (_message.Message,), dict(
+ DESCRIPTOR = _LOGFILE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.LogFile)
+ ))
+_sym_db.RegisterMessage(LogFile)
+
+ExecuteResponse = _reflection.GeneratedProtocolMessageType('ExecuteResponse', (_message.Message,), dict(
+
+ ServerLogsEntry = _reflection.GeneratedProtocolMessageType('ServerLogsEntry', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTERESPONSE_SERVERLOGSENTRY,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry)
+ ))
+ ,
+ DESCRIPTOR = _EXECUTERESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteResponse)
+ ))
+_sym_db.RegisterMessage(ExecuteResponse)
+_sym_db.RegisterMessage(ExecuteResponse.ServerLogsEntry)
+
+ExecuteOperationMetadata = _reflection.GeneratedProtocolMessageType('ExecuteOperationMetadata', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTEOPERATIONMETADATA,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteOperationMetadata)
+ ))
+_sym_db.RegisterMessage(ExecuteOperationMetadata)
+
+WaitExecutionRequest = _reflection.GeneratedProtocolMessageType('WaitExecutionRequest', (_message.Message,), dict(
+ DESCRIPTOR = _WAITEXECUTIONREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.WaitExecutionRequest)
+ ))
+_sym_db.RegisterMessage(WaitExecutionRequest)
+
+GetActionResultRequest = _reflection.GeneratedProtocolMessageType('GetActionResultRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETACTIONRESULTREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetActionResultRequest)
+ ))
+_sym_db.RegisterMessage(GetActionResultRequest)
+
+UpdateActionResultRequest = _reflection.GeneratedProtocolMessageType('UpdateActionResultRequest', (_message.Message,), dict(
+ DESCRIPTOR = _UPDATEACTIONRESULTREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.UpdateActionResultRequest)
+ ))
+_sym_db.RegisterMessage(UpdateActionResultRequest)
+
+FindMissingBlobsRequest = _reflection.GeneratedProtocolMessageType('FindMissingBlobsRequest', (_message.Message,), dict(
+ DESCRIPTOR = _FINDMISSINGBLOBSREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FindMissingBlobsRequest)
+ ))
+_sym_db.RegisterMessage(FindMissingBlobsRequest)
+
+FindMissingBlobsResponse = _reflection.GeneratedProtocolMessageType('FindMissingBlobsResponse', (_message.Message,), dict(
+ DESCRIPTOR = _FINDMISSINGBLOBSRESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FindMissingBlobsResponse)
+ ))
+_sym_db.RegisterMessage(FindMissingBlobsResponse)
+
+BatchUpdateBlobsRequest = _reflection.GeneratedProtocolMessageType('BatchUpdateBlobsRequest', (_message.Message,), dict(
+
+ Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
+ DESCRIPTOR = _BATCHUPDATEBLOBSREQUEST_REQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request)
+ ))
+ ,
+ DESCRIPTOR = _BATCHUPDATEBLOBSREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsRequest)
+ ))
+_sym_db.RegisterMessage(BatchUpdateBlobsRequest)
+_sym_db.RegisterMessage(BatchUpdateBlobsRequest.Request)
+
+BatchUpdateBlobsResponse = _reflection.GeneratedProtocolMessageType('BatchUpdateBlobsResponse', (_message.Message,), dict(
+
+ Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
+ DESCRIPTOR = _BATCHUPDATEBLOBSRESPONSE_RESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response)
+ ))
+ ,
+ DESCRIPTOR = _BATCHUPDATEBLOBSRESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsResponse)
+ ))
+_sym_db.RegisterMessage(BatchUpdateBlobsResponse)
+_sym_db.RegisterMessage(BatchUpdateBlobsResponse.Response)
+
+BatchReadBlobsRequest = _reflection.GeneratedProtocolMessageType('BatchReadBlobsRequest', (_message.Message,), dict(
+ DESCRIPTOR = _BATCHREADBLOBSREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchReadBlobsRequest)
+ ))
+_sym_db.RegisterMessage(BatchReadBlobsRequest)
+
+BatchReadBlobsResponse = _reflection.GeneratedProtocolMessageType('BatchReadBlobsResponse', (_message.Message,), dict(
+
+ Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
+ DESCRIPTOR = _BATCHREADBLOBSRESPONSE_RESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response)
+ ))
+ ,
+ DESCRIPTOR = _BATCHREADBLOBSRESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchReadBlobsResponse)
+ ))
+_sym_db.RegisterMessage(BatchReadBlobsResponse)
+_sym_db.RegisterMessage(BatchReadBlobsResponse.Response)
+
+GetTreeRequest = _reflection.GeneratedProtocolMessageType('GetTreeRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETTREEREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetTreeRequest)
+ ))
+_sym_db.RegisterMessage(GetTreeRequest)
+
+GetTreeResponse = _reflection.GeneratedProtocolMessageType('GetTreeResponse', (_message.Message,), dict(
+ DESCRIPTOR = _GETTREERESPONSE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetTreeResponse)
+ ))
+_sym_db.RegisterMessage(GetTreeResponse)
+
+GetCapabilitiesRequest = _reflection.GeneratedProtocolMessageType('GetCapabilitiesRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETCAPABILITIESREQUEST,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetCapabilitiesRequest)
+ ))
+_sym_db.RegisterMessage(GetCapabilitiesRequest)
+
+ServerCapabilities = _reflection.GeneratedProtocolMessageType('ServerCapabilities', (_message.Message,), dict(
+ DESCRIPTOR = _SERVERCAPABILITIES,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ServerCapabilities)
+ ))
+_sym_db.RegisterMessage(ServerCapabilities)
+
+ActionCacheUpdateCapabilities = _reflection.GeneratedProtocolMessageType('ActionCacheUpdateCapabilities', (_message.Message,), dict(
+ DESCRIPTOR = _ACTIONCACHEUPDATECAPABILITIES,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities)
+ ))
+_sym_db.RegisterMessage(ActionCacheUpdateCapabilities)
+
+PriorityCapabilities = _reflection.GeneratedProtocolMessageType('PriorityCapabilities', (_message.Message,), dict(
+
+ PriorityRange = _reflection.GeneratedProtocolMessageType('PriorityRange', (_message.Message,), dict(
+ DESCRIPTOR = _PRIORITYCAPABILITIES_PRIORITYRANGE,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange)
+ ))
+ ,
+ DESCRIPTOR = _PRIORITYCAPABILITIES,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.PriorityCapabilities)
+ ))
+_sym_db.RegisterMessage(PriorityCapabilities)
+_sym_db.RegisterMessage(PriorityCapabilities.PriorityRange)
+
+CacheCapabilities = _reflection.GeneratedProtocolMessageType('CacheCapabilities', (_message.Message,), dict(
+ DESCRIPTOR = _CACHECAPABILITIES,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.CacheCapabilities)
+ ))
+_sym_db.RegisterMessage(CacheCapabilities)
+
+ExecutionCapabilities = _reflection.GeneratedProtocolMessageType('ExecutionCapabilities', (_message.Message,), dict(
+ DESCRIPTOR = _EXECUTIONCAPABILITIES,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutionCapabilities)
+ ))
+_sym_db.RegisterMessage(ExecutionCapabilities)
+
+ToolDetails = _reflection.GeneratedProtocolMessageType('ToolDetails', (_message.Message,), dict(
+ DESCRIPTOR = _TOOLDETAILS,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ToolDetails)
+ ))
+_sym_db.RegisterMessage(ToolDetails)
+
+RequestMetadata = _reflection.GeneratedProtocolMessageType('RequestMetadata', (_message.Message,), dict(
+ DESCRIPTOR = _REQUESTMETADATA,
+ __module__ = 'build.bazel.remote.execution.v2.remote_execution_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.RequestMetadata)
+ ))
+_sym_db.RegisterMessage(RequestMetadata)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\037build.bazel.remote.execution.v2B\024RemoteExecutionProtoP\001Z\017remoteexecution\242\002\003REX\252\002\037Build.Bazel.Remote.Execution.V2'))
+_EXECUTERESPONSE_SERVERLOGSENTRY.has_options = True
+_EXECUTERESPONSE_SERVERLOGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
+
+_EXECUTION = _descriptor.ServiceDescriptor(
+ name='Execution',
+ full_name='build.bazel.remote.execution.v2.Execution',
+ file=DESCRIPTOR,
+ index=0,
+ options=None,
+ serialized_start=7276,
+ serialized_end=7589,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='Execute',
+ full_name='build.bazel.remote.execution.v2.Execution.Execute',
+ index=0,
+ containing_service=None,
+ input_type=_EXECUTEREQUEST,
+ output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002+\"&/v2/{instance_name=**}/actions:execute:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='WaitExecution',
+ full_name='build.bazel.remote.execution.v2.Execution.WaitExecution',
+ index=1,
+ containing_service=None,
+ input_type=_WAITEXECUTIONREQUEST,
+ output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002+\"&/v2/{name=operations/**}:waitExecution:\001*')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_EXECUTION)
+
+DESCRIPTOR.services_by_name['Execution'] = _EXECUTION
+
+
+_ACTIONCACHE = _descriptor.ServiceDescriptor(
+ name='ActionCache',
+ full_name='build.bazel.remote.execution.v2.ActionCache',
+ file=DESCRIPTOR,
+ index=1,
+ options=None,
+ serialized_start=7592,
+ serialized_end=8062,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='GetActionResult',
+ full_name='build.bazel.remote.execution.v2.ActionCache.GetActionResult',
+ index=0,
+ containing_service=None,
+ input_type=_GETACTIONRESULTREQUEST,
+ output_type=_ACTIONRESULT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002V\022T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='UpdateActionResult',
+ full_name='build.bazel.remote.execution.v2.ActionCache.UpdateActionResult',
+ index=1,
+ containing_service=None,
+ input_type=_UPDATEACTIONRESULTREQUEST,
+ output_type=_ACTIONRESULT,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002e\032T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_ACTIONCACHE)
+
+DESCRIPTOR.services_by_name['ActionCache'] = _ACTIONCACHE
+
+
+_CONTENTADDRESSABLESTORAGE = _descriptor.ServiceDescriptor(
+ name='ContentAddressableStorage',
+ full_name='build.bazel.remote.execution.v2.ContentAddressableStorage',
+ file=DESCRIPTOR,
+ index=2,
+ options=None,
+ serialized_start=8065,
+ serialized_end=8860,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='FindMissingBlobs',
+ full_name='build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs',
+ index=0,
+ containing_service=None,
+ input_type=_FINDMISSINGBLOBSREQUEST,
+ output_type=_FINDMISSINGBLOBSRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002-\"(/v2/{instance_name=**}/blobs:findMissing:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='BatchUpdateBlobs',
+ full_name='build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs',
+ index=1,
+ containing_service=None,
+ input_type=_BATCHUPDATEBLOBSREQUEST,
+ output_type=_BATCHUPDATEBLOBSRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002-\"(/v2/{instance_name=**}/blobs:batchUpdate:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='BatchReadBlobs',
+ full_name='build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs',
+ index=2,
+ containing_service=None,
+ input_type=_BATCHREADBLOBSREQUEST,
+ output_type=_BATCHREADBLOBSRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002+\"&/v2/{instance_name=**}/blobs:batchRead:\001*')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='GetTree',
+ full_name='build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree',
+ index=3,
+ containing_service=None,
+ input_type=_GETTREEREQUEST,
+ output_type=_GETTREERESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002R\022P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_CONTENTADDRESSABLESTORAGE)
+
+DESCRIPTOR.services_by_name['ContentAddressableStorage'] = _CONTENTADDRESSABLESTORAGE
+
+
+_CAPABILITIES = _descriptor.ServiceDescriptor(
+ name='Capabilities',
+ full_name='build.bazel.remote.execution.v2.Capabilities',
+ file=DESCRIPTOR,
+ index=3,
+ options=None,
+ serialized_start=8863,
+ serialized_end=9052,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='GetCapabilities',
+ full_name='build.bazel.remote.execution.v2.Capabilities.GetCapabilities',
+ index=0,
+ containing_service=None,
+ input_type=_GETCAPABILITIESREQUEST,
+ output_type=_SERVERCAPABILITIES,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002%\022#/v2/{instance_name=**}/capabilities')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_CAPABILITIES)
+
+DESCRIPTOR.services_by_name['Capabilities'] = _CAPABILITIES
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
new file mode 100644
index 000000000..3769a680d
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
@@ -0,0 +1,593 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2
+from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
+
+
+class ExecutionStub(object):
+ """The Remote Execution API is used to execute an
+ [Action][build.bazel.remote.execution.v2.Action] on the remote
+ workers.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.Execute = channel.unary_stream(
+ '/build.bazel.remote.execution.v2.Execution/Execute',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
+ self.WaitExecution = channel.unary_stream(
+ '/build.bazel.remote.execution.v2.Execution/WaitExecution',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
+
+
+class ExecutionServicer(object):
+ """The Remote Execution API is used to execute an
+ [Action][build.bazel.remote.execution.v2.Action] on the remote
+ workers.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def Execute(self, request, context):
+ """Execute an action remotely.
+
+ In order to execute an action, the client must first upload all of the
+ inputs, the
+ [Command][build.bazel.remote.execution.v2.Command] to run, and the
+ [Action][build.bazel.remote.execution.v2.Action] into the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+ It then calls `Execute` with an `action_digest` referring to them. The
+ server will run the action and eventually return the result.
+
+ The input `Action`'s fields MUST meet the various canonicalization
+ requirements specified in the documentation for their types so that it has
+ the same digest as other logically equivalent `Action`s. The server MAY
+ enforce the requirements and return errors if a non-canonical input is
+ received. It MAY also proceed without verifying some or all of the
+ requirements, such as for performance reasons. If the server does not
+ verify the requirement, then it will treat the `Action` as distinct from
+ another logically equivalent action if they hash differently.
+
+ Returns a stream of
+ [google.longrunning.Operation][google.longrunning.Operation] messages
+ describing the resulting execution, with eventual `response`
+ [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The
+ `metadata` on the operation is of type
+ [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata].
+
+ If the client remains connected after the first response is returned after
+ the server, then updates are streamed as if the client had called
+ [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]
+ until the execution completes or the request reaches an error. The
+ operation can also be queried using [Operations
+ API][google.longrunning.Operations.GetOperation].
+
+ The server NEED NOT implement other methods or functionality of the
+ Operations API.
+
+ Errors discovered during creation of the `Operation` will be reported
+ as gRPC Status errors, while errors that occurred while running the
+ action will be reported in the `status` field of the `ExecuteResponse`. The
+ server MUST NOT set the `error` field of the `Operation` proto.
+ The possible errors include:
+ * `INVALID_ARGUMENT`: One or more arguments are invalid.
+ * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
+ action requested, such as a missing input or command or no worker being
+ available. The client may be able to fix the errors and retry.
+ * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
+ the action.
+ * `UNAVAILABLE`: Due to a transient condition, such as all workers being
+ occupied (and the server does not support a queue), the action could not
+ be started. The client should retry.
+ * `INTERNAL`: An internal error occurred in the execution engine or the
+ worker.
+ * `DEADLINE_EXCEEDED`: The execution timed out.
+
+ In the case of a missing input or command, the server SHOULD additionally
+ send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
+ where, for each requested blob not present in the CAS, there is a
+ `Violation` with a `type` of `MISSING` and a `subject` of
+ `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def WaitExecution(self, request, context):
+ """Wait for an execution operation to complete. When the client initially
+ makes the request, the server immediately responds with the current status
+ of the execution. The server will leave the request stream open until the
+ operation completes, and then respond with the completed operation. The
+ server MAY choose to stream additional updates as execution progresses,
+ such as to provide an update as to the state of the execution.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ExecutionServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'Execute': grpc.unary_stream_rpc_method_handler(
+ servicer.Execute,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ 'WaitExecution': grpc.unary_stream_rpc_method_handler(
+ servicer.WaitExecution,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.Execution', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+class ActionCacheStub(object):
+ """The action cache API is used to query whether a given action has already been
+ performed and, if so, retrieve its result. Unlike the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
+ which addresses blobs by their own content, the action cache addresses the
+ [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
+ digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
+ which produced them.
+
+ The lifetime of entries in the action cache is implementation-specific, but
+ the server SHOULD assume that more recently used entries are more likely to
+ be used again. Additionally, action cache implementations SHOULD ensure that
+ any blobs referenced in the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+ are still valid when returning a result.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetActionResult = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ActionCache/GetActionResult',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
+ )
+ self.UpdateActionResult = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ActionCache/UpdateActionResult',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
+ )
+
+
+class ActionCacheServicer(object):
+ """The action cache API is used to query whether a given action has already been
+ performed and, if so, retrieve its result. Unlike the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
+ which addresses blobs by their own content, the action cache addresses the
+ [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
+ digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
+ which produced them.
+
+ The lifetime of entries in the action cache is implementation-specific, but
+ the server SHOULD assume that more recently used entries are more likely to
+ be used again. Additionally, action cache implementations SHOULD ensure that
+ any blobs referenced in the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+ are still valid when returning a result.
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def GetActionResult(self, request, context):
+ """Retrieve a cached execution result.
+
+ Errors:
+ * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def UpdateActionResult(self, request, context):
+ """Upload a new execution result.
+
+ This method is intended for servers which implement the distributed cache
+ independently of the
+ [Execution][build.bazel.remote.execution.v2.Execution] API. As a
+ result, it is OPTIONAL for servers to implement.
+
+ In order to allow the server to perform access control based on the type of
+ action, and to assist with client debugging, the client MUST first upload
+ the [Action][build.bazel.remote.execution.v2.Execution] that produced the
+ result, along with its
+ [Command][build.bazel.remote.execution.v2.Command], into the
+ `ContentAddressableStorage`.
+
+ Errors:
+ * `NOT_IMPLEMENTED`: This method is not supported by the server.
+ * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+ entry to the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ActionCacheServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'GetActionResult': grpc.unary_unary_rpc_method_handler(
+ servicer.GetActionResult,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString,
+ ),
+ 'UpdateActionResult': grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateActionResult,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.ActionCache', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+class ContentAddressableStorageStub(object):
+ """The CAS (content-addressable storage) is used to store the inputs to and
+ outputs from the execution service. Each piece of content is addressed by the
+ digest of its binary data.
+
+ Most of the binary data stored in the CAS is opaque to the execution engine,
+ and is only used as a communication medium. In order to build an
+ [Action][build.bazel.remote.execution.v2.Action],
+ however, the client will need to also upload the
+ [Command][build.bazel.remote.execution.v2.Command] and input root
+ [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
+ The Command and Directory messages must be marshalled to wire format and then
+ uploaded under the hash as with any other piece of content. In practice, the
+ input root directory is likely to refer to other Directories in its
+ hierarchy, which must also each be uploaded on their own.
+
+ For small file uploads the client should group them together and call
+ [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
+ on chunks of no more than 10 MiB. For large uploads, the client must use the
+ [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+ `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+ where `instance_name` is as described in the next paragraph, `uuid` is a
+ version 4 UUID generated by the client, and `hash` and `size` are the
+ [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
+ `uuid` is used only to avoid collisions when multiple clients try to upload
+ the same file (or the same client tries to upload the file multiple times at
+ once on different threads), so the client MAY reuse the `uuid` for uploading
+ different blobs. The `resource_name` may optionally have a trailing filename
+ (or other metadata) for a client to use if it is storing URLs, as in
+ `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+ after the `size` is ignored.
+
+ A single server MAY support multiple instances of the execution system, each
+ with their own workers, storage, cache, etc. The exact relationship between
+ instances is up to the server. If the server does, then the `instance_name`
+ is an identifier, possibly containing multiple path segments, used to
+ distinguish between the various instances on the server, in a manner defined
+ by the server. For servers which do not support multiple instances, then the
+ `instance_name` is the empty path and the leading slash is omitted, so that
+ the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+
+ When attempting an upload, if another client has already completed the upload
+ (which may occur in the middle of a single upload if another client uploads
+ the same blob concurrently), the request will terminate immediately with
+ a response whose `committed_size` is the full size of the uploaded file
+ (regardless of how much data was transmitted by the client). If the client
+ completes the upload but the
+ [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
+ `INVALID_ARGUMENT` error will be returned. In either case, the client should
+ not attempt to retry the upload.
+
+ For downloading blobs, the client must use the
+ [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+ a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+ `instance_name` is the instance name (see above), and `hash` and `size` are
+ the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+
+ The lifetime of entries in the CAS is implementation specific, but it SHOULD
+ be long enough to allow for newly-added and recently looked-up entries to be
+ used in subsequent calls (e.g. to
+ [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.FindMissingBlobs = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/FindMissingBlobs',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.FromString,
+ )
+ self.BatchUpdateBlobs = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchUpdateBlobs',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.FromString,
+ )
+ self.BatchReadBlobs = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchReadBlobs',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.FromString,
+ )
+ self.GetTree = channel.unary_stream(
+ '/build.bazel.remote.execution.v2.ContentAddressableStorage/GetTree',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.FromString,
+ )
+
+
+class ContentAddressableStorageServicer(object):
+ """The CAS (content-addressable storage) is used to store the inputs to and
+ outputs from the execution service. Each piece of content is addressed by the
+ digest of its binary data.
+
+ Most of the binary data stored in the CAS is opaque to the execution engine,
+ and is only used as a communication medium. In order to build an
+ [Action][build.bazel.remote.execution.v2.Action],
+ however, the client will need to also upload the
+ [Command][build.bazel.remote.execution.v2.Command] and input root
+ [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
+ The Command and Directory messages must be marshalled to wire format and then
+ uploaded under the hash as with any other piece of content. In practice, the
+ input root directory is likely to refer to other Directories in its
+ hierarchy, which must also each be uploaded on their own.
+
+ For small file uploads the client should group them together and call
+ [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
+ on chunks of no more than 10 MiB. For large uploads, the client must use the
+ [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+ `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+ where `instance_name` is as described in the next paragraph, `uuid` is a
+ version 4 UUID generated by the client, and `hash` and `size` are the
+ [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
+ `uuid` is used only to avoid collisions when multiple clients try to upload
+ the same file (or the same client tries to upload the file multiple times at
+ once on different threads), so the client MAY reuse the `uuid` for uploading
+ different blobs. The `resource_name` may optionally have a trailing filename
+ (or other metadata) for a client to use if it is storing URLs, as in
+ `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+ after the `size` is ignored.
+
+ A single server MAY support multiple instances of the execution system, each
+ with their own workers, storage, cache, etc. The exact relationship between
+ instances is up to the server. If the server does, then the `instance_name`
+ is an identifier, possibly containing multiple path segments, used to
+ distinguish between the various instances on the server, in a manner defined
+ by the server. For servers which do not support multiple instances, then the
+ `instance_name` is the empty path and the leading slash is omitted, so that
+ the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+
+ When attempting an upload, if another client has already completed the upload
+ (which may occur in the middle of a single upload if another client uploads
+ the same blob concurrently), the request will terminate immediately with
+ a response whose `committed_size` is the full size of the uploaded file
+ (regardless of how much data was transmitted by the client). If the client
+ completes the upload but the
+ [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
+ `INVALID_ARGUMENT` error will be returned. In either case, the client should
+ not attempt to retry the upload.
+
+ For downloading blobs, the client must use the
+ [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+ a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+ `instance_name` is the instance name (see above), and `hash` and `size` are
+ the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+
+ The lifetime of entries in the CAS is implementation specific, but it SHOULD
+ be long enough to allow for newly-added and recently looked-up entries to be
+ used in subsequent calls (e.g. to
+ [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
+
+ As with other services in the Remote Execution API, any call may return an
+ error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+ information about when the client should retry the request; clients SHOULD
+ respect the information provided.
+ """
+
+ def FindMissingBlobs(self, request, context):
+ """Determine if blobs are present in the CAS.
+
+ Clients can use this API before uploading blobs to determine which ones are
+ already present in the CAS and do not need to be uploaded again.
+
+ There are no method-specific errors.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def BatchUpdateBlobs(self, request, context):
+ """Upload many blobs at once.
+
+ The server may enforce a limit of the combined total size of blobs
+ to be uploaded using this API. This limit may be obtained using the
+ [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
+ Requests exceeding the limit should either be split into smaller
+ chunks or uploaded using the
+ [ByteStream API][google.bytestream.ByteStream], as appropriate.
+
+ This request is equivalent to calling a Bytestream `Write` request
+ on each individual blob, in parallel. The requests may succeed or fail
+ independently.
+
+ Errors:
+ * `INVALID_ARGUMENT`: The client attempted to upload more than the
+ server supported limit.
+
+ Individual requests may return the following errors, additionally:
+ * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
+ * `INVALID_ARGUMENT`: The
+ [Digest][build.bazel.remote.execution.v2.Digest] does not match the
+ provided data.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def BatchReadBlobs(self, request, context):
+ """Download many blobs at once.
+
+ The server may enforce a limit of the combined total size of blobs
+ to be downloaded using this API. This limit may be obtained using the
+ [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
+ Requests exceeding the limit should either be split into smaller
+ chunks or downloaded using the
+ [ByteStream API][google.bytestream.ByteStream], as appropriate.
+
+ This request is equivalent to calling a Bytestream `Read` request
+ on each individual blob, in parallel. The requests may succeed or fail
+ independently.
+
+ Errors:
+ * `INVALID_ARGUMENT`: The client attempted to read more than the
+ server supported limit.
+
+ Every error on individual read will be returned in the corresponding digest
+ status.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetTree(self, request, context):
+ """Fetch the entire directory tree rooted at a node.
+
+ This request must be targeted at a
+ [Directory][build.bazel.remote.execution.v2.Directory] stored in the
+ [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+ (CAS). The server will enumerate the `Directory` tree recursively and
+ return every node descended from the root.
+
+ The GetTreeRequest.page_token parameter can be used to skip ahead in
+ the stream (e.g. when retrying a partially completed and aborted request),
+ by setting it to a value taken from GetTreeResponse.next_page_token of the
+ last successfully processed GetTreeResponse).
+
+ The exact traversal order is unspecified and, unless retrieving subsequent
+ pages from an earlier request, is not guaranteed to be stable across
+ multiple invocations of `GetTree`.
+
+ If part of the tree is missing from the CAS, the server will return the
+ portion present and omit the rest.
+
+ * `NOT_FOUND`: The requested tree root is not present in the CAS.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ContentAddressableStorageServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'FindMissingBlobs': grpc.unary_unary_rpc_method_handler(
+ servicer.FindMissingBlobs,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.SerializeToString,
+ ),
+ 'BatchUpdateBlobs': grpc.unary_unary_rpc_method_handler(
+ servicer.BatchUpdateBlobs,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.SerializeToString,
+ ),
+ 'BatchReadBlobs': grpc.unary_unary_rpc_method_handler(
+ servicer.BatchReadBlobs,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.SerializeToString,
+ ),
+ 'GetTree': grpc.unary_stream_rpc_method_handler(
+ servicer.GetTree,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.ContentAddressableStorage', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+class CapabilitiesStub(object):
+ """The Capabilities service may be used by remote execution clients to query
+ various server properties, in order to self-configure or return meaningful
+ error messages.
+
+ The query may include a particular `instance_name`, in which case the values
+ returned will pertain to that instance.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetCapabilities = channel.unary_unary(
+ '/build.bazel.remote.execution.v2.Capabilities/GetCapabilities',
+ request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.SerializeToString,
+ response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.FromString,
+ )
+
+
+class CapabilitiesServicer(object):
+ """The Capabilities service may be used by remote execution clients to query
+ various server properties, in order to self-configure or return meaningful
+ error messages.
+
+ The query may include a particular `instance_name`, in which case the values
+ returned will pertain to that instance.
+ """
+
+ def GetCapabilities(self, request, context):
+ """GetCapabilities returns the server capabilities configuration.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_CapabilitiesServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'GetCapabilities': grpc.unary_unary_rpc_method_handler(
+ servicer.GetCapabilities,
+ request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.FromString,
+ response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'build.bazel.remote.execution.v2.Capabilities', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/buildstream/_protos/build/bazel/semver/__init__.py b/src/buildstream/_protos/build/bazel/semver/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/semver/__init__.py
diff --git a/src/buildstream/_protos/build/bazel/semver/semver.proto b/src/buildstream/_protos/build/bazel/semver/semver.proto
new file mode 100644
index 000000000..2caf76bcc
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/semver/semver.proto
@@ -0,0 +1,24 @@
+// Copyright 2018 The Bazel Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package build.bazel.semver;
+
+message SemVer {
+ int32 major = 1;
+ int32 minor = 2;
+ int32 patch = 3;
+ string prerelease = 4;
+}
diff --git a/src/buildstream/_protos/build/bazel/semver/semver_pb2.py b/src/buildstream/_protos/build/bazel/semver/semver_pb2.py
new file mode 100644
index 000000000..a36cf722a
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/semver/semver_pb2.py
@@ -0,0 +1,90 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: build/bazel/semver/semver.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='build/bazel/semver/semver.proto',
+ package='build.bazel.semver',
+ syntax='proto3',
+ serialized_pb=_b('\n\x1f\x62uild/bazel/semver/semver.proto\x12\x12\x62uild.bazel.semver\"I\n\x06SemVer\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\x12\n\nprerelease\x18\x04 \x01(\tb\x06proto3')
+)
+
+
+
+
+_SEMVER = _descriptor.Descriptor(
+ name='SemVer',
+ full_name='build.bazel.semver.SemVer',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='major', full_name='build.bazel.semver.SemVer.major', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='minor', full_name='build.bazel.semver.SemVer.minor', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='patch', full_name='build.bazel.semver.SemVer.patch', index=2,
+ number=3, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='prerelease', full_name='build.bazel.semver.SemVer.prerelease', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=55,
+ serialized_end=128,
+)
+
+DESCRIPTOR.message_types_by_name['SemVer'] = _SEMVER
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+SemVer = _reflection.GeneratedProtocolMessageType('SemVer', (_message.Message,), dict(
+ DESCRIPTOR = _SEMVER,
+ __module__ = 'build.bazel.semver.semver_pb2'
+ # @@protoc_insertion_point(class_scope:build.bazel.semver.SemVer)
+ ))
+_sym_db.RegisterMessage(SemVer)
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/build/bazel/semver/semver_pb2_grpc.py b/src/buildstream/_protos/build/bazel/semver/semver_pb2_grpc.py
new file mode 100644
index 000000000..a89435267
--- /dev/null
+++ b/src/buildstream/_protos/build/bazel/semver/semver_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/src/buildstream/_protos/buildstream/__init__.py b/src/buildstream/_protos/buildstream/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/buildstream/__init__.py
diff --git a/src/buildstream/_protos/buildstream/v2/__init__.py b/src/buildstream/_protos/buildstream/v2/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/buildstream/v2/__init__.py
diff --git a/src/buildstream/_protos/buildstream/v2/artifact.proto b/src/buildstream/_protos/buildstream/v2/artifact.proto
new file mode 100644
index 000000000..56ddbca6b
--- /dev/null
+++ b/src/buildstream/_protos/buildstream/v2/artifact.proto
@@ -0,0 +1,88 @@
+// Copyright 2019 Bloomberg Finance LP
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Authors
+// Raoul Hidalgo Charman <raoul.hidalgo.charman@gmail.com>
+
+syntax = "proto3";
+
+package buildstream.v2;
+
+import "build/bazel/remote/execution/v2/remote_execution.proto";
+import "google/api/annotations.proto";
+
+service ArtifactService {
+ // Retrieves an Artifact message
+ //
+ // Errors:
+ // * `NOT_FOUND`: Artifact not found on server
+ rpc GetArtifact(GetArtifactRequest) returns (Artifact) {}
+
+ // Sets an Artifact message
+ //
+ // Errors:
+ // * `FAILED_PRECONDITION`: Files specified in upload aren't present in CAS
+ rpc UpdateArtifact(UpdateArtifactRequest) returns (Artifact) {}
+}
+
+message Artifact {
+ // This version number must always be present and can be used to
+ // further indicate presence or absence of parts of the proto at a
+ // later date. It only needs incrementing if a change to what is
+ // *mandatory* changes.
+ int32 version = 1;
+ // Core metadata
+ bool build_success = 2;
+ string build_error = 3; // optional
+ string build_error_details = 4;
+ string strong_key = 5;
+ string weak_key = 6;
+ bool was_workspaced = 7;
+ // digest of a Directory
+ build.bazel.remote.execution.v2.Digest files = 8;
+
+ // Information about the build dependencies
+ message Dependency {
+ string element_name = 1;
+ string cache_key = 2;
+ bool was_workspaced = 3;
+ };
+ repeated Dependency build_deps = 9;
+
+ // The public data is a yaml file which is stored into the CAS
+ // Digest is of a directory
+ build.bazel.remote.execution.v2.Digest public_data = 10;
+
+ // The logs are stored in the CAS
+ message LogFile {
+ string name = 1;
+ // digest of a file
+ build.bazel.remote.execution.v2.Digest digest = 2;
+ };
+ repeated LogFile logs = 11; // Zero or more log files here
+
+ // digest of a directory
+ build.bazel.remote.execution.v2.Digest buildtree = 12; // optional
+}
+
+message GetArtifactRequest {
+ string instance_name = 1;
+ string cache_key = 2;
+}
+
+message UpdateArtifactRequest {
+ string instance_name = 1;
+ string cache_key = 2;
+ Artifact artifact = 3;
+}
diff --git a/src/buildstream/_protos/buildstream/v2/artifact_pb2.py b/src/buildstream/_protos/buildstream/v2/artifact_pb2.py
new file mode 100644
index 000000000..c56d1ae8a
--- /dev/null
+++ b/src/buildstream/_protos/buildstream/v2/artifact_pb2.py
@@ -0,0 +1,387 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: buildstream/v2/artifact.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2
+from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='buildstream/v2/artifact.proto',
+ package='buildstream.v2',
+ syntax='proto3',
+ serialized_options=None,
+ serialized_pb=_b('\n\x1d\x62uildstream/v2/artifact.proto\x12\x0e\x62uildstream.v2\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"\xde\x04\n\x08\x41rtifact\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x15\n\rbuild_success\x18\x02 \x01(\x08\x12\x13\n\x0b\x62uild_error\x18\x03 \x01(\t\x12\x1b\n\x13\x62uild_error_details\x18\x04 \x01(\t\x12\x12\n\nstrong_key\x18\x05 \x01(\t\x12\x10\n\x08weak_key\x18\x06 \x01(\t\x12\x16\n\x0ewas_workspaced\x18\x07 \x01(\x08\x12\x36\n\x05\x66iles\x18\x08 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x37\n\nbuild_deps\x18\t \x03(\x0b\x32#.buildstream.v2.Artifact.Dependency\x12<\n\x0bpublic_data\x18\n \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12.\n\x04logs\x18\x0b \x03(\x0b\x32 .buildstream.v2.Artifact.LogFile\x12:\n\tbuildtree\x18\x0c \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x1aM\n\nDependency\x12\x14\n\x0c\x65lement_name\x18\x01 \x01(\t\x12\x11\n\tcache_key\x18\x02 \x01(\t\x12\x16\n\x0ewas_workspaced\x18\x03 \x01(\x08\x1aP\n\x07LogFile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\">\n\x12GetArtifactRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x11\n\tcache_key\x18\x02 \x01(\t\"m\n\x15UpdateArtifactRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x11\n\tcache_key\x18\x02 \x01(\t\x12*\n\x08\x61rtifact\x18\x03 \x01(\x0b\x32\x18.buildstream.v2.Artifact2\xb5\x01\n\x0f\x41rtifactService\x12M\n\x0bGetArtifact\x12\".buildstream.v2.GetArtifactRequest\x1a\x18.buildstream.v2.Artifact\"\x00\x12S\n\x0eUpdateArtifact\x12%.buildstream.v2.UpdateArtifactRequest\x1a\x18.buildstream.v2.Artifact\"\x00\x62\x06proto3')
+ ,
+ dependencies=[build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
+
+
+
+
+_ARTIFACT_DEPENDENCY = _descriptor.Descriptor(
+ name='Dependency',
+ full_name='buildstream.v2.Artifact.Dependency',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='element_name', full_name='buildstream.v2.Artifact.Dependency.element_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='cache_key', full_name='buildstream.v2.Artifact.Dependency.cache_key', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='was_workspaced', full_name='buildstream.v2.Artifact.Dependency.was_workspaced', index=2,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=583,
+ serialized_end=660,
+)
+
+_ARTIFACT_LOGFILE = _descriptor.Descriptor(
+ name='LogFile',
+ full_name='buildstream.v2.Artifact.LogFile',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='buildstream.v2.Artifact.LogFile.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='buildstream.v2.Artifact.LogFile.digest', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=662,
+ serialized_end=742,
+)
+
+_ARTIFACT = _descriptor.Descriptor(
+ name='Artifact',
+ full_name='buildstream.v2.Artifact',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='version', full_name='buildstream.v2.Artifact.version', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='build_success', full_name='buildstream.v2.Artifact.build_success', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='build_error', full_name='buildstream.v2.Artifact.build_error', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='build_error_details', full_name='buildstream.v2.Artifact.build_error_details', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='strong_key', full_name='buildstream.v2.Artifact.strong_key', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='weak_key', full_name='buildstream.v2.Artifact.weak_key', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='was_workspaced', full_name='buildstream.v2.Artifact.was_workspaced', index=6,
+ number=7, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='files', full_name='buildstream.v2.Artifact.files', index=7,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='build_deps', full_name='buildstream.v2.Artifact.build_deps', index=8,
+ number=9, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='public_data', full_name='buildstream.v2.Artifact.public_data', index=9,
+ number=10, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='logs', full_name='buildstream.v2.Artifact.logs', index=10,
+ number=11, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='buildtree', full_name='buildstream.v2.Artifact.buildtree', index=11,
+ number=12, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_ARTIFACT_DEPENDENCY, _ARTIFACT_LOGFILE, ],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=136,
+ serialized_end=742,
+)
+
+
+_GETARTIFACTREQUEST = _descriptor.Descriptor(
+ name='GetArtifactRequest',
+ full_name='buildstream.v2.GetArtifactRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='buildstream.v2.GetArtifactRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='cache_key', full_name='buildstream.v2.GetArtifactRequest.cache_key', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=744,
+ serialized_end=806,
+)
+
+
+_UPDATEARTIFACTREQUEST = _descriptor.Descriptor(
+ name='UpdateArtifactRequest',
+ full_name='buildstream.v2.UpdateArtifactRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='buildstream.v2.UpdateArtifactRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='cache_key', full_name='buildstream.v2.UpdateArtifactRequest.cache_key', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='artifact', full_name='buildstream.v2.UpdateArtifactRequest.artifact', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=808,
+ serialized_end=917,
+)
+
+_ARTIFACT_DEPENDENCY.containing_type = _ARTIFACT
+_ARTIFACT_LOGFILE.fields_by_name['digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_ARTIFACT_LOGFILE.containing_type = _ARTIFACT
+_ARTIFACT.fields_by_name['files'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_ARTIFACT.fields_by_name['build_deps'].message_type = _ARTIFACT_DEPENDENCY
+_ARTIFACT.fields_by_name['public_data'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_ARTIFACT.fields_by_name['logs'].message_type = _ARTIFACT_LOGFILE
+_ARTIFACT.fields_by_name['buildtree'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_UPDATEARTIFACTREQUEST.fields_by_name['artifact'].message_type = _ARTIFACT
+DESCRIPTOR.message_types_by_name['Artifact'] = _ARTIFACT
+DESCRIPTOR.message_types_by_name['GetArtifactRequest'] = _GETARTIFACTREQUEST
+DESCRIPTOR.message_types_by_name['UpdateArtifactRequest'] = _UPDATEARTIFACTREQUEST
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Artifact = _reflection.GeneratedProtocolMessageType('Artifact', (_message.Message,), dict(
+
+ Dependency = _reflection.GeneratedProtocolMessageType('Dependency', (_message.Message,), dict(
+ DESCRIPTOR = _ARTIFACT_DEPENDENCY,
+ __module__ = 'buildstream.v2.artifact_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.Artifact.Dependency)
+ ))
+ ,
+
+ LogFile = _reflection.GeneratedProtocolMessageType('LogFile', (_message.Message,), dict(
+ DESCRIPTOR = _ARTIFACT_LOGFILE,
+ __module__ = 'buildstream.v2.artifact_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.Artifact.LogFile)
+ ))
+ ,
+ DESCRIPTOR = _ARTIFACT,
+ __module__ = 'buildstream.v2.artifact_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.Artifact)
+ ))
+_sym_db.RegisterMessage(Artifact)
+_sym_db.RegisterMessage(Artifact.Dependency)
+_sym_db.RegisterMessage(Artifact.LogFile)
+
+GetArtifactRequest = _reflection.GeneratedProtocolMessageType('GetArtifactRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETARTIFACTREQUEST,
+ __module__ = 'buildstream.v2.artifact_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.GetArtifactRequest)
+ ))
+_sym_db.RegisterMessage(GetArtifactRequest)
+
+UpdateArtifactRequest = _reflection.GeneratedProtocolMessageType('UpdateArtifactRequest', (_message.Message,), dict(
+ DESCRIPTOR = _UPDATEARTIFACTREQUEST,
+ __module__ = 'buildstream.v2.artifact_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.UpdateArtifactRequest)
+ ))
+_sym_db.RegisterMessage(UpdateArtifactRequest)
+
+
+
+_ARTIFACTSERVICE = _descriptor.ServiceDescriptor(
+ name='ArtifactService',
+ full_name='buildstream.v2.ArtifactService',
+ file=DESCRIPTOR,
+ index=0,
+ serialized_options=None,
+ serialized_start=920,
+ serialized_end=1101,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='GetArtifact',
+ full_name='buildstream.v2.ArtifactService.GetArtifact',
+ index=0,
+ containing_service=None,
+ input_type=_GETARTIFACTREQUEST,
+ output_type=_ARTIFACT,
+ serialized_options=None,
+ ),
+ _descriptor.MethodDescriptor(
+ name='UpdateArtifact',
+ full_name='buildstream.v2.ArtifactService.UpdateArtifact',
+ index=1,
+ containing_service=None,
+ input_type=_UPDATEARTIFACTREQUEST,
+ output_type=_ARTIFACT,
+ serialized_options=None,
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_ARTIFACTSERVICE)
+
+DESCRIPTOR.services_by_name['ArtifactService'] = _ARTIFACTSERVICE
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/buildstream/v2/artifact_pb2_grpc.py b/src/buildstream/_protos/buildstream/v2/artifact_pb2_grpc.py
new file mode 100644
index 000000000..d355146af
--- /dev/null
+++ b/src/buildstream/_protos/buildstream/v2/artifact_pb2_grpc.py
@@ -0,0 +1,68 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream._protos.buildstream.v2 import artifact_pb2 as buildstream_dot_v2_dot_artifact__pb2
+
+
+class ArtifactServiceStub(object):
+ # missing associated documentation comment in .proto file
+ pass
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetArtifact = channel.unary_unary(
+ '/buildstream.v2.ArtifactService/GetArtifact',
+ request_serializer=buildstream_dot_v2_dot_artifact__pb2.GetArtifactRequest.SerializeToString,
+ response_deserializer=buildstream_dot_v2_dot_artifact__pb2.Artifact.FromString,
+ )
+ self.UpdateArtifact = channel.unary_unary(
+ '/buildstream.v2.ArtifactService/UpdateArtifact',
+ request_serializer=buildstream_dot_v2_dot_artifact__pb2.UpdateArtifactRequest.SerializeToString,
+ response_deserializer=buildstream_dot_v2_dot_artifact__pb2.Artifact.FromString,
+ )
+
+
+class ArtifactServiceServicer(object):
+ # missing associated documentation comment in .proto file
+ pass
+
+ def GetArtifact(self, request, context):
+ """Retrieves an Artifact message
+
+ Errors:
+ * `NOT_FOUND`: Artifact not found on server
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def UpdateArtifact(self, request, context):
+ """Sets an Artifact message
+
+ Errors:
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ArtifactServiceServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'GetArtifact': grpc.unary_unary_rpc_method_handler(
+ servicer.GetArtifact,
+ request_deserializer=buildstream_dot_v2_dot_artifact__pb2.GetArtifactRequest.FromString,
+ response_serializer=buildstream_dot_v2_dot_artifact__pb2.Artifact.SerializeToString,
+ ),
+ 'UpdateArtifact': grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateArtifact,
+ request_deserializer=buildstream_dot_v2_dot_artifact__pb2.UpdateArtifactRequest.FromString,
+ response_serializer=buildstream_dot_v2_dot_artifact__pb2.Artifact.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'buildstream.v2.ArtifactService', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/buildstream/_protos/buildstream/v2/buildstream.proto b/src/buildstream/_protos/buildstream/v2/buildstream.proto
new file mode 100644
index 000000000..f283d6f3f
--- /dev/null
+++ b/src/buildstream/_protos/buildstream/v2/buildstream.proto
@@ -0,0 +1,95 @@
+// Copyright 2018 Codethink Limited
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package buildstream.v2;
+
+import "build/bazel/remote/execution/v2/remote_execution.proto";
+import "google/api/annotations.proto";
+
+service ReferenceStorage {
+ // Retrieve a CAS [Directory][build.bazel.remote.execution.v2.Directory]
+ // digest by name.
+ //
+ // Errors:
+ // * `NOT_FOUND`: The requested reference is not in the cache.
+ rpc GetReference(GetReferenceRequest) returns (GetReferenceResponse) {
+ option (google.api.http) = { get: "/v2/{instance_name=**}/buildstream/refs/{key}" };
+ }
+
+ // Associate a name with a CAS [Directory][build.bazel.remote.execution.v2.Directory]
+ // digest.
+ //
+ // Errors:
+ // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+ // entry to the cache.
+ rpc UpdateReference(UpdateReferenceRequest) returns (UpdateReferenceResponse) {
+ option (google.api.http) = { put: "/v2/{instance_name=**}/buildstream/refs/{key}" body: "digest" };
+ }
+
+ rpc Status(StatusRequest) returns (StatusResponse) {
+ option (google.api.http) = { put: "/v2/{instance_name=**}/buildstream/refs:status" };
+ }
+}
+
+message GetReferenceRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The name of the reference.
+ string key = 2;
+}
+
+message GetReferenceResponse {
+ // The digest of the CAS [Directory][build.bazel.remote.execution.v2.Directory].
+ build.bazel.remote.execution.v2.Digest digest = 1;
+}
+
+message UpdateReferenceRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+
+ // The name of the reference.
+ repeated string keys = 2;
+
+ // The digest of the CAS [Directory][build.bazel.remote.execution.v2.Directory]
+ // to store in the cache.
+ build.bazel.remote.execution.v2.Digest digest = 3;
+}
+
+message UpdateReferenceResponse {
+}
+
+message StatusRequest {
+ // The instance of the execution system to operate against. A server may
+ // support multiple instances of the execution system (with their own workers,
+ // storage, caches, etc.). The server MAY require use of this field to select
+ // between them in an implementation-defined fashion, otherwise it can be
+ // omitted.
+ string instance_name = 1;
+}
+
+message StatusResponse {
+ // Whether reference updates are allowed for the connected client.
+ bool allow_updates = 1;
+}
diff --git a/src/buildstream/_protos/buildstream/v2/buildstream_pb2.py b/src/buildstream/_protos/buildstream/v2/buildstream_pb2.py
new file mode 100644
index 000000000..57fdae49d
--- /dev/null
+++ b/src/buildstream/_protos/buildstream/v2/buildstream_pb2.py
@@ -0,0 +1,325 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: buildstream/v2/buildstream.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2
+from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='buildstream/v2/buildstream.proto',
+ package='buildstream.v2',
+ syntax='proto3',
+ serialized_pb=_b('\n buildstream/v2/buildstream.proto\x12\x0e\x62uildstream.v2\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"9\n\x13GetReferenceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"O\n\x14GetReferenceResponse\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"v\n\x16UpdateReferenceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04keys\x18\x02 \x03(\t\x12\x37\n\x06\x64igest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x19\n\x17UpdateReferenceResponse\"&\n\rStatusRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\'\n\x0eStatusResponse\x12\x15\n\rallow_updates\x18\x01 \x01(\x08\x32\xca\x03\n\x10ReferenceStorage\x12\x90\x01\n\x0cGetReference\x12#.buildstream.v2.GetReferenceRequest\x1a$.buildstream.v2.GetReferenceResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v2/{instance_name=**}/buildstream/refs/{key}\x12\xa1\x01\n\x0fUpdateReference\x12&.buildstream.v2.UpdateReferenceRequest\x1a\'.buildstream.v2.UpdateReferenceResponse\"=\x82\xd3\xe4\x93\x02\x37\x1a-/v2/{instance_name=**}/buildstream/refs/{key}:\x06\x64igest\x12\x7f\n\x06Status\x12\x1d.buildstream.v2.StatusRequest\x1a\x1e.buildstream.v2.StatusResponse\"6\x82\xd3\xe4\x93\x02\x30\x1a./v2/{instance_name=**}/buildstream/refs:statusb\x06proto3')
+ ,
+ dependencies=[build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
+
+
+
+
+_GETREFERENCEREQUEST = _descriptor.Descriptor(
+ name='GetReferenceRequest',
+ full_name='buildstream.v2.GetReferenceRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='buildstream.v2.GetReferenceRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='key', full_name='buildstream.v2.GetReferenceRequest.key', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=138,
+ serialized_end=195,
+)
+
+
+_GETREFERENCERESPONSE = _descriptor.Descriptor(
+ name='GetReferenceResponse',
+ full_name='buildstream.v2.GetReferenceResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='buildstream.v2.GetReferenceResponse.digest', index=0,
+ number=1, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=197,
+ serialized_end=276,
+)
+
+
+_UPDATEREFERENCEREQUEST = _descriptor.Descriptor(
+ name='UpdateReferenceRequest',
+ full_name='buildstream.v2.UpdateReferenceRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='buildstream.v2.UpdateReferenceRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='keys', full_name='buildstream.v2.UpdateReferenceRequest.keys', index=1,
+ number=2, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='digest', full_name='buildstream.v2.UpdateReferenceRequest.digest', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=278,
+ serialized_end=396,
+)
+
+
+_UPDATEREFERENCERESPONSE = _descriptor.Descriptor(
+ name='UpdateReferenceResponse',
+ full_name='buildstream.v2.UpdateReferenceResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=398,
+ serialized_end=423,
+)
+
+
+_STATUSREQUEST = _descriptor.Descriptor(
+ name='StatusRequest',
+ full_name='buildstream.v2.StatusRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='instance_name', full_name='buildstream.v2.StatusRequest.instance_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=425,
+ serialized_end=463,
+)
+
+
+_STATUSRESPONSE = _descriptor.Descriptor(
+ name='StatusResponse',
+ full_name='buildstream.v2.StatusResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='allow_updates', full_name='buildstream.v2.StatusResponse.allow_updates', index=0,
+ number=1, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=465,
+ serialized_end=504,
+)
+
+_GETREFERENCERESPONSE.fields_by_name['digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+_UPDATEREFERENCEREQUEST.fields_by_name['digest'].message_type = build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2._DIGEST
+DESCRIPTOR.message_types_by_name['GetReferenceRequest'] = _GETREFERENCEREQUEST
+DESCRIPTOR.message_types_by_name['GetReferenceResponse'] = _GETREFERENCERESPONSE
+DESCRIPTOR.message_types_by_name['UpdateReferenceRequest'] = _UPDATEREFERENCEREQUEST
+DESCRIPTOR.message_types_by_name['UpdateReferenceResponse'] = _UPDATEREFERENCERESPONSE
+DESCRIPTOR.message_types_by_name['StatusRequest'] = _STATUSREQUEST
+DESCRIPTOR.message_types_by_name['StatusResponse'] = _STATUSRESPONSE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+GetReferenceRequest = _reflection.GeneratedProtocolMessageType('GetReferenceRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETREFERENCEREQUEST,
+ __module__ = 'buildstream.v2.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.GetReferenceRequest)
+ ))
+_sym_db.RegisterMessage(GetReferenceRequest)
+
+GetReferenceResponse = _reflection.GeneratedProtocolMessageType('GetReferenceResponse', (_message.Message,), dict(
+ DESCRIPTOR = _GETREFERENCERESPONSE,
+ __module__ = 'buildstream.v2.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.GetReferenceResponse)
+ ))
+_sym_db.RegisterMessage(GetReferenceResponse)
+
+UpdateReferenceRequest = _reflection.GeneratedProtocolMessageType('UpdateReferenceRequest', (_message.Message,), dict(
+ DESCRIPTOR = _UPDATEREFERENCEREQUEST,
+ __module__ = 'buildstream.v2.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.UpdateReferenceRequest)
+ ))
+_sym_db.RegisterMessage(UpdateReferenceRequest)
+
+UpdateReferenceResponse = _reflection.GeneratedProtocolMessageType('UpdateReferenceResponse', (_message.Message,), dict(
+ DESCRIPTOR = _UPDATEREFERENCERESPONSE,
+ __module__ = 'buildstream.v2.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.UpdateReferenceResponse)
+ ))
+_sym_db.RegisterMessage(UpdateReferenceResponse)
+
+StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), dict(
+ DESCRIPTOR = _STATUSREQUEST,
+ __module__ = 'buildstream.v2.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.StatusRequest)
+ ))
+_sym_db.RegisterMessage(StatusRequest)
+
+StatusResponse = _reflection.GeneratedProtocolMessageType('StatusResponse', (_message.Message,), dict(
+ DESCRIPTOR = _STATUSRESPONSE,
+ __module__ = 'buildstream.v2.buildstream_pb2'
+ # @@protoc_insertion_point(class_scope:buildstream.v2.StatusResponse)
+ ))
+_sym_db.RegisterMessage(StatusResponse)
+
+
+
+_REFERENCESTORAGE = _descriptor.ServiceDescriptor(
+ name='ReferenceStorage',
+ full_name='buildstream.v2.ReferenceStorage',
+ file=DESCRIPTOR,
+ index=0,
+ options=None,
+ serialized_start=507,
+ serialized_end=965,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='GetReference',
+ full_name='buildstream.v2.ReferenceStorage.GetReference',
+ index=0,
+ containing_service=None,
+ input_type=_GETREFERENCEREQUEST,
+ output_type=_GETREFERENCERESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002/\022-/v2/{instance_name=**}/buildstream/refs/{key}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='UpdateReference',
+ full_name='buildstream.v2.ReferenceStorage.UpdateReference',
+ index=1,
+ containing_service=None,
+ input_type=_UPDATEREFERENCEREQUEST,
+ output_type=_UPDATEREFERENCERESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0027\032-/v2/{instance_name=**}/buildstream/refs/{key}:\006digest')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='Status',
+ full_name='buildstream.v2.ReferenceStorage.Status',
+ index=2,
+ containing_service=None,
+ input_type=_STATUSREQUEST,
+ output_type=_STATUSRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0020\032./v2/{instance_name=**}/buildstream/refs:status')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_REFERENCESTORAGE)
+
+DESCRIPTOR.services_by_name['ReferenceStorage'] = _REFERENCESTORAGE
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py b/src/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py
new file mode 100644
index 000000000..b3e653493
--- /dev/null
+++ b/src/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py
@@ -0,0 +1,89 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream._protos.buildstream.v2 import buildstream_pb2 as buildstream_dot_v2_dot_buildstream__pb2
+
+
+class ReferenceStorageStub(object):
+ # missing associated documentation comment in .proto file
+ pass
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetReference = channel.unary_unary(
+ '/buildstream.v2.ReferenceStorage/GetReference',
+ request_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.SerializeToString,
+ response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.FromString,
+ )
+ self.UpdateReference = channel.unary_unary(
+ '/buildstream.v2.ReferenceStorage/UpdateReference',
+ request_serializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.SerializeToString,
+ response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.FromString,
+ )
+ self.Status = channel.unary_unary(
+ '/buildstream.v2.ReferenceStorage/Status',
+ request_serializer=buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.SerializeToString,
+ response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.FromString,
+ )
+
+
+class ReferenceStorageServicer(object):
+ # missing associated documentation comment in .proto file
+ pass
+
+ def GetReference(self, request, context):
+ """Retrieve a CAS [Directory][build.bazel.remote.execution.v2.Directory]
+ digest by name.
+
+ Errors:
+ * `NOT_FOUND`: The requested reference is not in the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def UpdateReference(self, request, context):
+ """Associate a name with a CAS [Directory][build.bazel.remote.execution.v2.Directory]
+ digest.
+
+ Errors:
+ * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+ entry to the cache.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Status(self, request, context):
+ # missing associated documentation comment in .proto file
+ pass
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ReferenceStorageServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'GetReference': grpc.unary_unary_rpc_method_handler(
+ servicer.GetReference,
+ request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.FromString,
+ response_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.SerializeToString,
+ ),
+ 'UpdateReference': grpc.unary_unary_rpc_method_handler(
+ servicer.UpdateReference,
+ request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.FromString,
+ response_serializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.SerializeToString,
+ ),
+ 'Status': grpc.unary_unary_rpc_method_handler(
+ servicer.Status,
+ request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.FromString,
+ response_serializer=buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'buildstream.v2.ReferenceStorage', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/buildstream/_protos/google/__init__.py b/src/buildstream/_protos/google/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/google/__init__.py
diff --git a/src/buildstream/_protos/google/api/__init__.py b/src/buildstream/_protos/google/api/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/google/api/__init__.py
diff --git a/src/buildstream/_protos/google/api/annotations.proto b/src/buildstream/_protos/google/api/annotations.proto
new file mode 100644
index 000000000..85c361b47
--- /dev/null
+++ b/src/buildstream/_protos/google/api/annotations.proto
@@ -0,0 +1,31 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/http.proto";
+import "google/protobuf/descriptor.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+extend google.protobuf.MethodOptions {
+ // See `HttpRule`.
+ HttpRule http = 72295728;
+}
diff --git a/src/buildstream/_protos/google/api/annotations_pb2.py b/src/buildstream/_protos/google/api/annotations_pb2.py
new file mode 100644
index 000000000..092c46de7
--- /dev/null
+++ b/src/buildstream/_protos/google/api/annotations_pb2.py
@@ -0,0 +1,46 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/api/annotations.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.google.api import http_pb2 as google_dot_api_dot_http__pb2
+from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/api/annotations.proto',
+ package='google.api',
+ syntax='proto3',
+ serialized_pb=_b('\n\x1cgoogle/api/annotations.proto\x12\ngoogle.api\x1a\x15google/api/http.proto\x1a google/protobuf/descriptor.proto:E\n\x04http\x12\x1e.google.protobuf.MethodOptions\x18\xb0\xca\xbc\" \x01(\x0b\x32\x14.google.api.HttpRuleBn\n\x0e\x63om.google.apiB\x10\x41nnotationsProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xa2\x02\x04GAPIb\x06proto3')
+ ,
+ dependencies=[google_dot_api_dot_http__pb2.DESCRIPTOR,google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
+
+
+HTTP_FIELD_NUMBER = 72295728
+http = _descriptor.FieldDescriptor(
+ name='http', full_name='google.api.http', index=0,
+ number=72295728, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=True, extension_scope=None,
+ options=None, file=DESCRIPTOR)
+
+DESCRIPTOR.extensions_by_name['http'] = http
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+http.message_type = google_dot_api_dot_http__pb2._HTTPRULE
+google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(http)
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\020AnnotationsProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\242\002\004GAPI'))
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/google/api/annotations_pb2_grpc.py b/src/buildstream/_protos/google/api/annotations_pb2_grpc.py
new file mode 100644
index 000000000..a89435267
--- /dev/null
+++ b/src/buildstream/_protos/google/api/annotations_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/src/buildstream/_protos/google/api/http.proto b/src/buildstream/_protos/google/api/http.proto
new file mode 100644
index 000000000..78d515d4b
--- /dev/null
+++ b/src/buildstream/_protos/google/api/http.proto
@@ -0,0 +1,313 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "HttpProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Defines the HTTP configuration for an API service. It contains a list of
+// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
+// to one or more HTTP REST API methods.
+message Http {
+ // A list of HTTP configuration rules that apply to individual API methods.
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ repeated HttpRule rules = 1;
+
+ // When set to true, URL path parmeters will be fully URI-decoded except in
+ // cases of single segment matches in reserved expansion, where "%2F" will be
+ // left encoded.
+ //
+ // The default behavior is to not decode RFC 6570 reserved characters in multi
+ // segment matches.
+ bool fully_decode_reserved_expansion = 2;
+}
+
+// `HttpRule` defines the mapping of an RPC method to one or more HTTP
+// REST API methods. The mapping specifies how different portions of the RPC
+// request message are mapped to URL path, URL query parameters, and
+// HTTP request body. The mapping is typically specified as an
+// `google.api.http` annotation on the RPC method,
+// see "google/api/annotations.proto" for details.
+//
+// The mapping consists of a field specifying the path template and
+// method kind. The path template can refer to fields in the request
+// message, as in the example below which describes a REST GET
+// operation on a resource collection of messages:
+//
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
+// }
+// }
+// message GetMessageRequest {
+// message SubMessage {
+// string subfield = 1;
+// }
+// string message_id = 1; // mapped to the URL
+// SubMessage sub = 2; // `sub.subfield` is url-mapped
+// }
+// message Message {
+// string text = 1; // content of the resource
+// }
+//
+// The same http annotation can alternatively be expressed inside the
+// `GRPC API Configuration` YAML file.
+//
+// http:
+// rules:
+// - selector: <proto_package_name>.Messaging.GetMessage
+// get: /v1/messages/{message_id}/{sub.subfield}
+//
+// This definition enables an automatic, bidrectional mapping of HTTP
+// JSON to RPC. Example:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
+//
+// In general, not only fields but also field paths can be referenced
+// from a path pattern. Fields mapped to the path pattern cannot be
+// repeated and must have a primitive (non-message) type.
+//
+// Any fields in the request message which are not bound by the path
+// pattern automatically become (optional) HTTP query
+// parameters. Assume the following definition of the request message:
+//
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http).get = "/v1/messages/{message_id}";
+// }
+// }
+// message GetMessageRequest {
+// message SubMessage {
+// string subfield = 1;
+// }
+// string message_id = 1; // mapped to the URL
+// int64 revision = 2; // becomes a parameter
+// SubMessage sub = 3; // `sub.subfield` becomes a parameter
+// }
+//
+//
+// This enables a HTTP JSON to RPC mapping as below:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
+//
+// Note that fields which are mapped to HTTP parameters must have a
+// primitive type or a repeated primitive type. Message types are not
+// allowed. In the case of a repeated type, the parameter can be
+// repeated in the URL, as in `...?param=A&param=B`.
+//
+// For HTTP method kinds which allow a request body, the `body` field
+// specifies the mapping. Consider a REST update method on the
+// message resource collection:
+//
+//
+// service Messaging {
+// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// put: "/v1/messages/{message_id}"
+// body: "message"
+// };
+// }
+// }
+// message UpdateMessageRequest {
+// string message_id = 1; // mapped to the URL
+// Message message = 2; // mapped to the body
+// }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled, where the
+// representation of the JSON in the request body is determined by
+// protos JSON encoding:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
+//
+// The special name `*` can be used in the body mapping to define that
+// every field not bound by the path template should be mapped to the
+// request body. This enables the following alternative definition of
+// the update method:
+//
+// service Messaging {
+// rpc UpdateMessage(Message) returns (Message) {
+// option (google.api.http) = {
+// put: "/v1/messages/{message_id}"
+// body: "*"
+// };
+// }
+// }
+// message Message {
+// string message_id = 1;
+// string text = 2;
+// }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
+//
+// Note that when using `*` in the body mapping, it is not possible to
+// have HTTP parameters, as all fields not bound by the path end in
+// the body. This makes this option more rarely used in practice of
+// defining REST APIs. The common usage of `*` is in custom methods
+// which don't use the URL at all for transferring data.
+//
+// It is possible to define multiple HTTP methods for one RPC by using
+// the `additional_bindings` option. Example:
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// get: "/v1/messages/{message_id}"
+// additional_bindings {
+// get: "/v1/users/{user_id}/messages/{message_id}"
+// }
+// };
+// }
+// }
+// message GetMessageRequest {
+// string message_id = 1;
+// string user_id = 2;
+// }
+//
+//
+// This enables the following two alternative HTTP JSON to RPC
+// mappings:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
+// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
+//
+// # Rules for HTTP mapping
+//
+// The rules for mapping HTTP path, query parameters, and body fields
+// to the request message are as follows:
+//
+// 1. The `body` field specifies either `*` or a field path, or is
+// omitted. If omitted, it indicates there is no HTTP request body.
+// 2. Leaf fields (recursive expansion of nested messages in the
+// request) can be classified into three types:
+// (a) Matched in the URL template.
+// (b) Covered by body (if body is `*`, everything except (a) fields;
+// else everything under the body field)
+// (c) All other fields.
+// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
+// 4. Any body sent with an HTTP request can contain only (b) fields.
+//
+// The syntax of the path template is as follows:
+//
+// Template = "/" Segments [ Verb ] ;
+// Segments = Segment { "/" Segment } ;
+// Segment = "*" | "**" | LITERAL | Variable ;
+// Variable = "{" FieldPath [ "=" Segments ] "}" ;
+// FieldPath = IDENT { "." IDENT } ;
+// Verb = ":" LITERAL ;
+//
+// The syntax `*` matches a single path segment. The syntax `**` matches zero
+// or more path segments, which must be the last part of the path except the
+// `Verb`. The syntax `LITERAL` matches literal text in the path.
+//
+// The syntax `Variable` matches part of the URL path as specified by its
+// template. A variable template must not contain other variables. If a variable
+// matches a single path segment, its template may be omitted, e.g. `{var}`
+// is equivalent to `{var=*}`.
+//
+// If a variable contains exactly one path segment, such as `"{var}"` or
+// `"{var=*}"`, when such a variable is expanded into a URL path, all characters
+// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the
+// Discovery Document as `{var}`.
+//
+// If a variable contains one or more path segments, such as `"{var=foo/*}"`
+// or `"{var=**}"`, when such a variable is expanded into a URL path, all
+// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables
+// show up in the Discovery Document as `{+var}`.
+//
+// NOTE: While the single segment variable matches the semantics of
+// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2
+// Simple String Expansion, the multi segment variable **does not** match
+// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion
+// does not expand special characters like `?` and `#`, which would lead
+// to invalid URLs.
+//
+// NOTE: the field paths in variables and in the `body` must not refer to
+// repeated fields or map fields.
+message HttpRule {
+ // Selects methods to which this rule applies.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // Determines the URL pattern is matched by this rules. This pattern can be
+ // used with any of the {get|put|post|delete|patch} methods. A custom method
+ // can be defined using the 'custom' field.
+ oneof pattern {
+ // Used for listing and getting information about resources.
+ string get = 2;
+
+ // Used for updating a resource.
+ string put = 3;
+
+ // Used for creating a resource.
+ string post = 4;
+
+ // Used for deleting a resource.
+ string delete = 5;
+
+ // Used for updating a resource.
+ string patch = 6;
+
+ // The custom pattern is used for specifying an HTTP method that is not
+ // included in the `pattern` field, such as HEAD, or "*" to leave the
+ // HTTP method unspecified for this rule. The wild-card rule is useful
+ // for services that provide content to Web (HTML) clients.
+ CustomHttpPattern custom = 8;
+ }
+
+ // The name of the request field whose value is mapped to the HTTP body, or
+ // `*` for mapping all fields not captured by the path pattern to the HTTP
+ // body. NOTE: the referred field must not be a repeated field and must be
+ // present at the top-level of request message type.
+ string body = 7;
+
+ // Additional HTTP bindings for the selector. Nested bindings must
+ // not contain an `additional_bindings` field themselves (that is,
+ // the nesting may only be one level deep).
+ repeated HttpRule additional_bindings = 11;
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+message CustomHttpPattern {
+ // The name of this custom HTTP verb.
+ string kind = 1;
+
+ // The path matched by this custom verb.
+ string path = 2;
+}
diff --git a/src/buildstream/_protos/google/api/http_pb2.py b/src/buildstream/_protos/google/api/http_pb2.py
new file mode 100644
index 000000000..aad9ddb97
--- /dev/null
+++ b/src/buildstream/_protos/google/api/http_pb2.py
@@ -0,0 +1,243 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/api/http.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/api/http.proto',
+ package='google.api',
+ syntax='proto3',
+ serialized_pb=_b('\n\x15google/api/http.proto\x12\ngoogle.api\"T\n\x04Http\x12#\n\x05rules\x18\x01 \x03(\x0b\x32\x14.google.api.HttpRule\x12\'\n\x1f\x66ully_decode_reserved_expansion\x18\x02 \x01(\x08\"\xea\x01\n\x08HttpRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12\r\n\x03get\x18\x02 \x01(\tH\x00\x12\r\n\x03put\x18\x03 \x01(\tH\x00\x12\x0e\n\x04post\x18\x04 \x01(\tH\x00\x12\x10\n\x06\x64\x65lete\x18\x05 \x01(\tH\x00\x12\x0f\n\x05patch\x18\x06 \x01(\tH\x00\x12/\n\x06\x63ustom\x18\x08 \x01(\x0b\x32\x1d.google.api.CustomHttpPatternH\x00\x12\x0c\n\x04\x62ody\x18\x07 \x01(\t\x12\x31\n\x13\x61\x64\x64itional_bindings\x18\x0b \x03(\x0b\x32\x14.google.api.HttpRuleB\t\n\x07pattern\"/\n\x11\x43ustomHttpPattern\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\tBj\n\x0e\x63om.google.apiB\tHttpProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xf8\x01\x01\xa2\x02\x04GAPIb\x06proto3')
+)
+
+
+
+
+_HTTP = _descriptor.Descriptor(
+ name='Http',
+ full_name='google.api.Http',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='rules', full_name='google.api.Http.rules', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='fully_decode_reserved_expansion', full_name='google.api.Http.fully_decode_reserved_expansion', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=37,
+ serialized_end=121,
+)
+
+
+_HTTPRULE = _descriptor.Descriptor(
+ name='HttpRule',
+ full_name='google.api.HttpRule',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='selector', full_name='google.api.HttpRule.selector', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='get', full_name='google.api.HttpRule.get', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='put', full_name='google.api.HttpRule.put', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='post', full_name='google.api.HttpRule.post', index=3,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='delete', full_name='google.api.HttpRule.delete', index=4,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='patch', full_name='google.api.HttpRule.patch', index=5,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='custom', full_name='google.api.HttpRule.custom', index=6,
+ number=8, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='body', full_name='google.api.HttpRule.body', index=7,
+ number=7, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='additional_bindings', full_name='google.api.HttpRule.additional_bindings', index=8,
+ number=11, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ _descriptor.OneofDescriptor(
+ name='pattern', full_name='google.api.HttpRule.pattern',
+ index=0, containing_type=None, fields=[]),
+ ],
+ serialized_start=124,
+ serialized_end=358,
+)
+
+
+_CUSTOMHTTPPATTERN = _descriptor.Descriptor(
+ name='CustomHttpPattern',
+ full_name='google.api.CustomHttpPattern',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='kind', full_name='google.api.CustomHttpPattern.kind', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='path', full_name='google.api.CustomHttpPattern.path', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=360,
+ serialized_end=407,
+)
+
+_HTTP.fields_by_name['rules'].message_type = _HTTPRULE
+_HTTPRULE.fields_by_name['custom'].message_type = _CUSTOMHTTPPATTERN
+_HTTPRULE.fields_by_name['additional_bindings'].message_type = _HTTPRULE
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['get'])
+_HTTPRULE.fields_by_name['get'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['put'])
+_HTTPRULE.fields_by_name['put'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['post'])
+_HTTPRULE.fields_by_name['post'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['delete'])
+_HTTPRULE.fields_by_name['delete'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['patch'])
+_HTTPRULE.fields_by_name['patch'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+_HTTPRULE.oneofs_by_name['pattern'].fields.append(
+ _HTTPRULE.fields_by_name['custom'])
+_HTTPRULE.fields_by_name['custom'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
+DESCRIPTOR.message_types_by_name['Http'] = _HTTP
+DESCRIPTOR.message_types_by_name['HttpRule'] = _HTTPRULE
+DESCRIPTOR.message_types_by_name['CustomHttpPattern'] = _CUSTOMHTTPPATTERN
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Http = _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), dict(
+ DESCRIPTOR = _HTTP,
+ __module__ = 'google.api.http_pb2'
+ # @@protoc_insertion_point(class_scope:google.api.Http)
+ ))
+_sym_db.RegisterMessage(Http)
+
+HttpRule = _reflection.GeneratedProtocolMessageType('HttpRule', (_message.Message,), dict(
+ DESCRIPTOR = _HTTPRULE,
+ __module__ = 'google.api.http_pb2'
+ # @@protoc_insertion_point(class_scope:google.api.HttpRule)
+ ))
+_sym_db.RegisterMessage(HttpRule)
+
+CustomHttpPattern = _reflection.GeneratedProtocolMessageType('CustomHttpPattern', (_message.Message,), dict(
+ DESCRIPTOR = _CUSTOMHTTPPATTERN,
+ __module__ = 'google.api.http_pb2'
+ # @@protoc_insertion_point(class_scope:google.api.CustomHttpPattern)
+ ))
+_sym_db.RegisterMessage(CustomHttpPattern)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\tHttpProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\370\001\001\242\002\004GAPI'))
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/google/api/http_pb2_grpc.py b/src/buildstream/_protos/google/api/http_pb2_grpc.py
new file mode 100644
index 000000000..a89435267
--- /dev/null
+++ b/src/buildstream/_protos/google/api/http_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/src/buildstream/_protos/google/bytestream/__init__.py b/src/buildstream/_protos/google/bytestream/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/google/bytestream/__init__.py
diff --git a/src/buildstream/_protos/google/bytestream/bytestream.proto b/src/buildstream/_protos/google/bytestream/bytestream.proto
new file mode 100644
index 000000000..85e386fc2
--- /dev/null
+++ b/src/buildstream/_protos/google/bytestream/bytestream.proto
@@ -0,0 +1,181 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.bytestream;
+
+import "google/api/annotations.proto";
+import "google/protobuf/wrappers.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/bytestream;bytestream";
+option java_outer_classname = "ByteStreamProto";
+option java_package = "com.google.bytestream";
+
+
+// #### Introduction
+//
+// The Byte Stream API enables a client to read and write a stream of bytes to
+// and from a resource. Resources have names, and these names are supplied in
+// the API calls below to identify the resource that is being read from or
+// written to.
+//
+// All implementations of the Byte Stream API export the interface defined here:
+//
+// * `Read()`: Reads the contents of a resource.
+//
+// * `Write()`: Writes the contents of a resource. The client can call `Write()`
+// multiple times with the same resource and can check the status of the write
+// by calling `QueryWriteStatus()`.
+//
+// #### Service parameters and metadata
+//
+// The ByteStream API provides no direct way to access/modify any metadata
+// associated with the resource.
+//
+// #### Errors
+//
+// The errors returned by the service are in the Google canonical error space.
+service ByteStream {
+ // `Read()` is used to retrieve the contents of a resource as a sequence
+ // of bytes. The bytes are returned in a sequence of responses, and the
+ // responses are delivered as the results of a server-side streaming RPC.
+ rpc Read(ReadRequest) returns (stream ReadResponse);
+
+ // `Write()` is used to send the contents of a resource as a sequence of
+ // bytes. The bytes are sent in a sequence of request protos of a client-side
+ // streaming RPC.
+ //
+ // A `Write()` action is resumable. If there is an error or the connection is
+ // broken during the `Write()`, the client should check the status of the
+ // `Write()` by calling `QueryWriteStatus()` and continue writing from the
+ // returned `committed_size`. This may be less than the amount of data the
+ // client previously sent.
+ //
+ // Calling `Write()` on a resource name that was previously written and
+ // finalized could cause an error, depending on whether the underlying service
+ // allows over-writing of previously written resources.
+ //
+ // When the client closes the request channel, the service will respond with
+ // a `WriteResponse`. The service will not view the resource as `complete`
+ // until the client has sent a `WriteRequest` with `finish_write` set to
+ // `true`. Sending any requests on a stream after sending a request with
+ // `finish_write` set to `true` will cause an error. The client **should**
+ // check the `WriteResponse` it receives to determine how much data the
+ // service was able to commit and whether the service views the resource as
+ // `complete` or not.
+ rpc Write(stream WriteRequest) returns (WriteResponse);
+
+ // `QueryWriteStatus()` is used to find the `committed_size` for a resource
+ // that is being written, which can then be used as the `write_offset` for
+ // the next `Write()` call.
+ //
+ // If the resource does not exist (i.e., the resource has been deleted, or the
+ // first `Write()` has not yet reached the service), this method returns the
+ // error `NOT_FOUND`.
+ //
+ // The client **may** call `QueryWriteStatus()` at any time to determine how
+ // much data has been processed for this resource. This is useful if the
+ // client is buffering data and needs to know which data can be safely
+ // evicted. For any sequence of `QueryWriteStatus()` calls for a given
+ // resource name, the sequence of returned `committed_size` values will be
+ // non-decreasing.
+ rpc QueryWriteStatus(QueryWriteStatusRequest) returns (QueryWriteStatusResponse);
+}
+
+// Request object for ByteStream.Read.
+message ReadRequest {
+ // The name of the resource to read.
+ string resource_name = 1;
+
+ // The offset for the first byte to return in the read, relative to the start
+ // of the resource.
+ //
+ // A `read_offset` that is negative or greater than the size of the resource
+ // will cause an `OUT_OF_RANGE` error.
+ int64 read_offset = 2;
+
+ // The maximum number of `data` bytes the server is allowed to return in the
+ // sum of all `ReadResponse` messages. A `read_limit` of zero indicates that
+ // there is no limit, and a negative `read_limit` will cause an error.
+ //
+ // If the stream returns fewer bytes than allowed by the `read_limit` and no
+ // error occurred, the stream includes all data from the `read_offset` to the
+ // end of the resource.
+ int64 read_limit = 3;
+}
+
+// Response object for ByteStream.Read.
+message ReadResponse {
+ // A portion of the data for the resource. The service **may** leave `data`
+ // empty for any given `ReadResponse`. This enables the service to inform the
+ // client that the request is still live while it is running an operation to
+ // generate more data.
+ bytes data = 10;
+}
+
+// Request object for ByteStream.Write.
+message WriteRequest {
+ // The name of the resource to write. This **must** be set on the first
+ // `WriteRequest` of each `Write()` action. If it is set on subsequent calls,
+ // it **must** match the value of the first request.
+ string resource_name = 1;
+
+ // The offset from the beginning of the resource at which the data should be
+ // written. It is required on all `WriteRequest`s.
+ //
+ // In the first `WriteRequest` of a `Write()` action, it indicates
+ // the initial offset for the `Write()` call. The value **must** be equal to
+ // the `committed_size` that a call to `QueryWriteStatus()` would return.
+ //
+ // On subsequent calls, this value **must** be set and **must** be equal to
+ // the sum of the first `write_offset` and the sizes of all `data` bundles
+ // sent previously on this stream.
+ //
+ // An incorrect value will cause an error.
+ int64 write_offset = 2;
+
+ // If `true`, this indicates that the write is complete. Sending any
+ // `WriteRequest`s subsequent to one in which `finish_write` is `true` will
+ // cause an error.
+ bool finish_write = 3;
+
+ // A portion of the data for the resource. The client **may** leave `data`
+ // empty for any given `WriteRequest`. This enables the client to inform the
+ // service that the request is still live while it is running an operation to
+ // generate more data.
+ bytes data = 10;
+}
+
+// Response object for ByteStream.Write.
+message WriteResponse {
+ // The number of bytes that have been processed for the given resource.
+ int64 committed_size = 1;
+}
+
+// Request object for ByteStream.QueryWriteStatus.
+message QueryWriteStatusRequest {
+ // The name of the resource whose write status is being requested.
+ string resource_name = 1;
+}
+
+// Response object for ByteStream.QueryWriteStatus.
+message QueryWriteStatusResponse {
+ // The number of bytes that have been processed for the given resource.
+ int64 committed_size = 1;
+
+ // `complete` is `true` only if the client has sent a `WriteRequest` with
+ // `finish_write` set to true, and the server has processed that request.
+ bool complete = 2;
+}
diff --git a/src/buildstream/_protos/google/bytestream/bytestream_pb2.py b/src/buildstream/_protos/google/bytestream/bytestream_pb2.py
new file mode 100644
index 000000000..c8487d6a0
--- /dev/null
+++ b/src/buildstream/_protos/google/bytestream/bytestream_pb2.py
@@ -0,0 +1,353 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/bytestream/bytestream.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/bytestream/bytestream.proto',
+ package='google.bytestream',
+ syntax='proto3',
+ serialized_pb=_b('\n\"google/bytestream/bytestream.proto\x12\x11google.bytestream\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/wrappers.proto\"M\n\x0bReadRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x13\n\x0bread_offset\x18\x02 \x01(\x03\x12\x12\n\nread_limit\x18\x03 \x01(\x03\"\x1c\n\x0cReadResponse\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\x0c\"_\n\x0cWriteRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x14\n\x0cwrite_offset\x18\x02 \x01(\x03\x12\x14\n\x0c\x66inish_write\x18\x03 \x01(\x08\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\x0c\"\'\n\rWriteResponse\x12\x16\n\x0e\x63ommitted_size\x18\x01 \x01(\x03\"0\n\x17QueryWriteStatusRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\"D\n\x18QueryWriteStatusResponse\x12\x16\n\x0e\x63ommitted_size\x18\x01 \x01(\x03\x12\x10\n\x08\x63omplete\x18\x02 \x01(\x08\x32\x92\x02\n\nByteStream\x12I\n\x04Read\x12\x1e.google.bytestream.ReadRequest\x1a\x1f.google.bytestream.ReadResponse0\x01\x12L\n\x05Write\x12\x1f.google.bytestream.WriteRequest\x1a .google.bytestream.WriteResponse(\x01\x12k\n\x10QueryWriteStatus\x12*.google.bytestream.QueryWriteStatusRequest\x1a+.google.bytestream.QueryWriteStatusResponseBe\n\x15\x63om.google.bytestreamB\x0f\x42yteStreamProtoZ;google.golang.org/genproto/googleapis/bytestream;bytestreamb\x06proto3')
+ ,
+ dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,])
+
+
+
+
+_READREQUEST = _descriptor.Descriptor(
+ name='ReadRequest',
+ full_name='google.bytestream.ReadRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='resource_name', full_name='google.bytestream.ReadRequest.resource_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='read_offset', full_name='google.bytestream.ReadRequest.read_offset', index=1,
+ number=2, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='read_limit', full_name='google.bytestream.ReadRequest.read_limit', index=2,
+ number=3, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=119,
+ serialized_end=196,
+)
+
+
+_READRESPONSE = _descriptor.Descriptor(
+ name='ReadResponse',
+ full_name='google.bytestream.ReadResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='data', full_name='google.bytestream.ReadResponse.data', index=0,
+ number=10, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=198,
+ serialized_end=226,
+)
+
+
+_WRITEREQUEST = _descriptor.Descriptor(
+ name='WriteRequest',
+ full_name='google.bytestream.WriteRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='resource_name', full_name='google.bytestream.WriteRequest.resource_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='write_offset', full_name='google.bytestream.WriteRequest.write_offset', index=1,
+ number=2, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='finish_write', full_name='google.bytestream.WriteRequest.finish_write', index=2,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='data', full_name='google.bytestream.WriteRequest.data', index=3,
+ number=10, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b(""),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=228,
+ serialized_end=323,
+)
+
+
+_WRITERESPONSE = _descriptor.Descriptor(
+ name='WriteResponse',
+ full_name='google.bytestream.WriteResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='committed_size', full_name='google.bytestream.WriteResponse.committed_size', index=0,
+ number=1, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=325,
+ serialized_end=364,
+)
+
+
+_QUERYWRITESTATUSREQUEST = _descriptor.Descriptor(
+ name='QueryWriteStatusRequest',
+ full_name='google.bytestream.QueryWriteStatusRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='resource_name', full_name='google.bytestream.QueryWriteStatusRequest.resource_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=366,
+ serialized_end=414,
+)
+
+
+_QUERYWRITESTATUSRESPONSE = _descriptor.Descriptor(
+ name='QueryWriteStatusResponse',
+ full_name='google.bytestream.QueryWriteStatusResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='committed_size', full_name='google.bytestream.QueryWriteStatusResponse.committed_size', index=0,
+ number=1, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='complete', full_name='google.bytestream.QueryWriteStatusResponse.complete', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=416,
+ serialized_end=484,
+)
+
+DESCRIPTOR.message_types_by_name['ReadRequest'] = _READREQUEST
+DESCRIPTOR.message_types_by_name['ReadResponse'] = _READRESPONSE
+DESCRIPTOR.message_types_by_name['WriteRequest'] = _WRITEREQUEST
+DESCRIPTOR.message_types_by_name['WriteResponse'] = _WRITERESPONSE
+DESCRIPTOR.message_types_by_name['QueryWriteStatusRequest'] = _QUERYWRITESTATUSREQUEST
+DESCRIPTOR.message_types_by_name['QueryWriteStatusResponse'] = _QUERYWRITESTATUSRESPONSE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+ReadRequest = _reflection.GeneratedProtocolMessageType('ReadRequest', (_message.Message,), dict(
+ DESCRIPTOR = _READREQUEST,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.ReadRequest)
+ ))
+_sym_db.RegisterMessage(ReadRequest)
+
+ReadResponse = _reflection.GeneratedProtocolMessageType('ReadResponse', (_message.Message,), dict(
+ DESCRIPTOR = _READRESPONSE,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.ReadResponse)
+ ))
+_sym_db.RegisterMessage(ReadResponse)
+
+WriteRequest = _reflection.GeneratedProtocolMessageType('WriteRequest', (_message.Message,), dict(
+ DESCRIPTOR = _WRITEREQUEST,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.WriteRequest)
+ ))
+_sym_db.RegisterMessage(WriteRequest)
+
+WriteResponse = _reflection.GeneratedProtocolMessageType('WriteResponse', (_message.Message,), dict(
+ DESCRIPTOR = _WRITERESPONSE,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.WriteResponse)
+ ))
+_sym_db.RegisterMessage(WriteResponse)
+
+QueryWriteStatusRequest = _reflection.GeneratedProtocolMessageType('QueryWriteStatusRequest', (_message.Message,), dict(
+ DESCRIPTOR = _QUERYWRITESTATUSREQUEST,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.QueryWriteStatusRequest)
+ ))
+_sym_db.RegisterMessage(QueryWriteStatusRequest)
+
+QueryWriteStatusResponse = _reflection.GeneratedProtocolMessageType('QueryWriteStatusResponse', (_message.Message,), dict(
+ DESCRIPTOR = _QUERYWRITESTATUSRESPONSE,
+ __module__ = 'google.bytestream.bytestream_pb2'
+ # @@protoc_insertion_point(class_scope:google.bytestream.QueryWriteStatusResponse)
+ ))
+_sym_db.RegisterMessage(QueryWriteStatusResponse)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\025com.google.bytestreamB\017ByteStreamProtoZ;google.golang.org/genproto/googleapis/bytestream;bytestream'))
+
+_BYTESTREAM = _descriptor.ServiceDescriptor(
+ name='ByteStream',
+ full_name='google.bytestream.ByteStream',
+ file=DESCRIPTOR,
+ index=0,
+ options=None,
+ serialized_start=487,
+ serialized_end=761,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='Read',
+ full_name='google.bytestream.ByteStream.Read',
+ index=0,
+ containing_service=None,
+ input_type=_READREQUEST,
+ output_type=_READRESPONSE,
+ options=None,
+ ),
+ _descriptor.MethodDescriptor(
+ name='Write',
+ full_name='google.bytestream.ByteStream.Write',
+ index=1,
+ containing_service=None,
+ input_type=_WRITEREQUEST,
+ output_type=_WRITERESPONSE,
+ options=None,
+ ),
+ _descriptor.MethodDescriptor(
+ name='QueryWriteStatus',
+ full_name='google.bytestream.ByteStream.QueryWriteStatus',
+ index=2,
+ containing_service=None,
+ input_type=_QUERYWRITESTATUSREQUEST,
+ output_type=_QUERYWRITESTATUSRESPONSE,
+ options=None,
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_BYTESTREAM)
+
+DESCRIPTOR.services_by_name['ByteStream'] = _BYTESTREAM
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py b/src/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py
new file mode 100644
index 000000000..ef993e040
--- /dev/null
+++ b/src/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py
@@ -0,0 +1,160 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream._protos.google.bytestream import bytestream_pb2 as google_dot_bytestream_dot_bytestream__pb2
+
+
+class ByteStreamStub(object):
+ """#### Introduction
+
+ The Byte Stream API enables a client to read and write a stream of bytes to
+ and from a resource. Resources have names, and these names are supplied in
+ the API calls below to identify the resource that is being read from or
+ written to.
+
+ All implementations of the Byte Stream API export the interface defined here:
+
+ * `Read()`: Reads the contents of a resource.
+
+ * `Write()`: Writes the contents of a resource. The client can call `Write()`
+ multiple times with the same resource and can check the status of the write
+ by calling `QueryWriteStatus()`.
+
+ #### Service parameters and metadata
+
+ The ByteStream API provides no direct way to access/modify any metadata
+ associated with the resource.
+
+ #### Errors
+
+ The errors returned by the service are in the Google canonical error space.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.Read = channel.unary_stream(
+ '/google.bytestream.ByteStream/Read',
+ request_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.SerializeToString,
+ response_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.FromString,
+ )
+ self.Write = channel.stream_unary(
+ '/google.bytestream.ByteStream/Write',
+ request_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.SerializeToString,
+ response_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.FromString,
+ )
+ self.QueryWriteStatus = channel.unary_unary(
+ '/google.bytestream.ByteStream/QueryWriteStatus',
+ request_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.SerializeToString,
+ response_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.FromString,
+ )
+
+
+class ByteStreamServicer(object):
+ """#### Introduction
+
+ The Byte Stream API enables a client to read and write a stream of bytes to
+ and from a resource. Resources have names, and these names are supplied in
+ the API calls below to identify the resource that is being read from or
+ written to.
+
+ All implementations of the Byte Stream API export the interface defined here:
+
+ * `Read()`: Reads the contents of a resource.
+
+ * `Write()`: Writes the contents of a resource. The client can call `Write()`
+ multiple times with the same resource and can check the status of the write
+ by calling `QueryWriteStatus()`.
+
+ #### Service parameters and metadata
+
+ The ByteStream API provides no direct way to access/modify any metadata
+ associated with the resource.
+
+ #### Errors
+
+ The errors returned by the service are in the Google canonical error space.
+ """
+
+ def Read(self, request, context):
+ """`Read()` is used to retrieve the contents of a resource as a sequence
+ of bytes. The bytes are returned in a sequence of responses, and the
+ responses are delivered as the results of a server-side streaming RPC.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def Write(self, request_iterator, context):
+ """`Write()` is used to send the contents of a resource as a sequence of
+ bytes. The bytes are sent in a sequence of request protos of a client-side
+ streaming RPC.
+
+ A `Write()` action is resumable. If there is an error or the connection is
+ broken during the `Write()`, the client should check the status of the
+ `Write()` by calling `QueryWriteStatus()` and continue writing from the
+ returned `committed_size`. This may be less than the amount of data the
+ client previously sent.
+
+ Calling `Write()` on a resource name that was previously written and
+ finalized could cause an error, depending on whether the underlying service
+ allows over-writing of previously written resources.
+
+ When the client closes the request channel, the service will respond with
+ a `WriteResponse`. The service will not view the resource as `complete`
+ until the client has sent a `WriteRequest` with `finish_write` set to
+ `true`. Sending any requests on a stream after sending a request with
+ `finish_write` set to `true` will cause an error. The client **should**
+ check the `WriteResponse` it receives to determine how much data the
+ service was able to commit and whether the service views the resource as
+ `complete` or not.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def QueryWriteStatus(self, request, context):
+ """`QueryWriteStatus()` is used to find the `committed_size` for a resource
+ that is being written, which can then be used as the `write_offset` for
+ the next `Write()` call.
+
+ If the resource does not exist (i.e., the resource has been deleted, or the
+ first `Write()` has not yet reached the service), this method returns the
+ error `NOT_FOUND`.
+
+ The client **may** call `QueryWriteStatus()` at any time to determine how
+ much data has been processed for this resource. This is useful if the
+ client is buffering data and needs to know which data can be safely
+ evicted. For any sequence of `QueryWriteStatus()` calls for a given
+ resource name, the sequence of returned `committed_size` values will be
+ non-decreasing.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_ByteStreamServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'Read': grpc.unary_stream_rpc_method_handler(
+ servicer.Read,
+ request_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.FromString,
+ response_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.SerializeToString,
+ ),
+ 'Write': grpc.stream_unary_rpc_method_handler(
+ servicer.Write,
+ request_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.FromString,
+ response_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.SerializeToString,
+ ),
+ 'QueryWriteStatus': grpc.unary_unary_rpc_method_handler(
+ servicer.QueryWriteStatus,
+ request_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.FromString,
+ response_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'google.bytestream.ByteStream', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/buildstream/_protos/google/longrunning/__init__.py b/src/buildstream/_protos/google/longrunning/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/google/longrunning/__init__.py
diff --git a/src/buildstream/_protos/google/longrunning/operations.proto b/src/buildstream/_protos/google/longrunning/operations.proto
new file mode 100644
index 000000000..76fef29c3
--- /dev/null
+++ b/src/buildstream/_protos/google/longrunning/operations.proto
@@ -0,0 +1,160 @@
+// Copyright 2016 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.longrunning;
+
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/empty.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Google.LongRunning";
+option go_package = "google.golang.org/genproto/googleapis/longrunning;longrunning";
+option java_multiple_files = true;
+option java_outer_classname = "OperationsProto";
+option java_package = "com.google.longrunning";
+option php_namespace = "Google\\LongRunning";
+
+
+// Manages long-running operations with an API service.
+//
+// When an API method normally takes long time to complete, it can be designed
+// to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+// interface to receive the real response asynchronously by polling the
+// operation resource, or pass the operation resource to another API (such as
+// Google Cloud Pub/Sub API) to receive the response. Any API service that
+// returns long-running operations should implement the `Operations` interface
+// so developers can have a consistent client experience.
+service Operations {
+ // Lists operations that match the specified filter in the request. If the
+ // server doesn't support this method, it returns `UNIMPLEMENTED`.
+ //
+ // NOTE: the `name` binding below allows API services to override the binding
+ // to use different resource name schemes, such as `users/*/operations`.
+ rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
+ option (google.api.http) = { get: "/v1/{name=operations}" };
+ }
+
+ // Gets the latest state of a long-running operation. Clients can use this
+ // method to poll the operation result at intervals as recommended by the API
+ // service.
+ rpc GetOperation(GetOperationRequest) returns (Operation) {
+ option (google.api.http) = { get: "/v1/{name=operations/**}" };
+ }
+
+ // Deletes a long-running operation. This method indicates that the client is
+ // no longer interested in the operation result. It does not cancel the
+ // operation. If the server doesn't support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`.
+ rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { delete: "/v1/{name=operations/**}" };
+ }
+
+ // Starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not
+ // guaranteed. If the server doesn't support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`. Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ // corresponding to `Code.CANCELLED`.
+ rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
+ option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" };
+ }
+}
+
+// This resource represents a long-running operation that is the result of a
+// network API call.
+message Operation {
+ // The server-assigned name, which is only unique within the same service that
+ // originally returns it. If you use the default HTTP mapping, the
+ // `name` should have the format of `operations/some/unique/name`.
+ string name = 1;
+
+ // Service-specific metadata associated with the operation. It typically
+ // contains progress information and common metadata such as create time.
+ // Some services might not provide such metadata. Any method that returns a
+ // long-running operation should document the metadata type, if any.
+ google.protobuf.Any metadata = 2;
+
+ // If the value is `false`, it means the operation is still in progress.
+ // If true, the operation is completed, and either `error` or `response` is
+ // available.
+ bool done = 3;
+
+ // The operation result, which can be either an `error` or a valid `response`.
+ // If `done` == `false`, neither `error` nor `response` is set.
+ // If `done` == `true`, exactly one of `error` or `response` is set.
+ oneof result {
+ // The error result of the operation in case of failure or cancellation.
+ google.rpc.Status error = 4;
+
+ // The normal response of the operation in case of success. If the original
+ // method returns no data on success, such as `Delete`, the response is
+ // `google.protobuf.Empty`. If the original method is standard
+ // `Get`/`Create`/`Update`, the response should be the resource. For other
+ // methods, the response should have the type `XxxResponse`, where `Xxx`
+ // is the original method name. For example, if the original method name
+ // is `TakeSnapshot()`, the inferred response type is
+ // `TakeSnapshotResponse`.
+ google.protobuf.Any response = 5;
+ }
+}
+
+// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
+message GetOperationRequest {
+ // The name of the operation resource.
+ string name = 1;
+}
+
+// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
+message ListOperationsRequest {
+ // The name of the operation collection.
+ string name = 4;
+
+ // The standard list filter.
+ string filter = 1;
+
+ // The standard list page size.
+ int32 page_size = 2;
+
+ // The standard list page token.
+ string page_token = 3;
+}
+
+// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
+message ListOperationsResponse {
+ // A list of operations that matches the specified filter in the request.
+ repeated Operation operations = 1;
+
+ // The standard List next-page token.
+ string next_page_token = 2;
+}
+
+// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
+message CancelOperationRequest {
+ // The name of the operation resource to be cancelled.
+ string name = 1;
+}
+
+// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
+message DeleteOperationRequest {
+ // The name of the operation resource to be deleted.
+ string name = 1;
+}
+
diff --git a/src/buildstream/_protos/google/longrunning/operations_pb2.py b/src/buildstream/_protos/google/longrunning/operations_pb2.py
new file mode 100644
index 000000000..9fd89937f
--- /dev/null
+++ b/src/buildstream/_protos/google/longrunning/operations_pb2.py
@@ -0,0 +1,391 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/longrunning/operations.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/longrunning/operations.proto',
+ package='google.longrunning',
+ syntax='proto3',
+ serialized_pb=_b('\n#google/longrunning/operations.proto\x12\x12google.longrunning\x1a\x1cgoogle/api/annotations.proto\x1a\x19google/protobuf/any.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x17google/rpc/status.proto\"\xa8\x01\n\tOperation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x08metadata\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12\x0c\n\x04\x64one\x18\x03 \x01(\x08\x12#\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x12.google.rpc.StatusH\x00\x12(\n\x08response\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x08\n\x06result\"#\n\x13GetOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\\\n\x15ListOperationsRequest\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"d\n\x16ListOperationsResponse\x12\x31\n\noperations\x18\x01 \x03(\x0b\x32\x1d.google.longrunning.Operation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"&\n\x16\x43\x61ncelOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"&\n\x16\x44\x65leteOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t2\x8c\x04\n\nOperations\x12\x86\x01\n\x0eListOperations\x12).google.longrunning.ListOperationsRequest\x1a*.google.longrunning.ListOperationsResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/v1/{name=operations}\x12x\n\x0cGetOperation\x12\'.google.longrunning.GetOperationRequest\x1a\x1d.google.longrunning.Operation\" \x82\xd3\xe4\x93\x02\x1a\x12\x18/v1/{name=operations/**}\x12w\n\x0f\x44\x65leteOperation\x12*.google.longrunning.DeleteOperationRequest\x1a\x16.google.protobuf.Empty\" \x82\xd3\xe4\x93\x02\x1a*\x18/v1/{name=operations/**}\x12\x81\x01\n\x0f\x43\x61ncelOperation\x12*.google.longrunning.CancelOperationRequest\x1a\x16.google.protobuf.Empty\"*\x82\xd3\xe4\x93\x02$\"\x1f/v1/{name=operations/**}:cancel:\x01*B\x94\x01\n\x16\x63om.google.longrunningB\x0fOperationsProtoP\x01Z=google.golang.org/genproto/googleapis/longrunning;longrunning\xaa\x02\x12Google.LongRunning\xca\x02\x12Google\\LongRunningb\x06proto3')
+ ,
+ dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
+
+
+
+
+_OPERATION = _descriptor.Descriptor(
+ name='Operation',
+ full_name='google.longrunning.Operation',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='google.longrunning.Operation.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='metadata', full_name='google.longrunning.Operation.metadata', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='done', full_name='google.longrunning.Operation.done', index=2,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='error', full_name='google.longrunning.Operation.error', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='response', full_name='google.longrunning.Operation.response', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ _descriptor.OneofDescriptor(
+ name='result', full_name='google.longrunning.Operation.result',
+ index=0, containing_type=None, fields=[]),
+ ],
+ serialized_start=171,
+ serialized_end=339,
+)
+
+
+_GETOPERATIONREQUEST = _descriptor.Descriptor(
+ name='GetOperationRequest',
+ full_name='google.longrunning.GetOperationRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='google.longrunning.GetOperationRequest.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=341,
+ serialized_end=376,
+)
+
+
+_LISTOPERATIONSREQUEST = _descriptor.Descriptor(
+ name='ListOperationsRequest',
+ full_name='google.longrunning.ListOperationsRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='google.longrunning.ListOperationsRequest.name', index=0,
+ number=4, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='filter', full_name='google.longrunning.ListOperationsRequest.filter', index=1,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_size', full_name='google.longrunning.ListOperationsRequest.page_size', index=2,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_token', full_name='google.longrunning.ListOperationsRequest.page_token', index=3,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=378,
+ serialized_end=470,
+)
+
+
+_LISTOPERATIONSRESPONSE = _descriptor.Descriptor(
+ name='ListOperationsResponse',
+ full_name='google.longrunning.ListOperationsResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='operations', full_name='google.longrunning.ListOperationsResponse.operations', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='next_page_token', full_name='google.longrunning.ListOperationsResponse.next_page_token', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=472,
+ serialized_end=572,
+)
+
+
+_CANCELOPERATIONREQUEST = _descriptor.Descriptor(
+ name='CancelOperationRequest',
+ full_name='google.longrunning.CancelOperationRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='google.longrunning.CancelOperationRequest.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=574,
+ serialized_end=612,
+)
+
+
+_DELETEOPERATIONREQUEST = _descriptor.Descriptor(
+ name='DeleteOperationRequest',
+ full_name='google.longrunning.DeleteOperationRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='google.longrunning.DeleteOperationRequest.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=614,
+ serialized_end=652,
+)
+
+_OPERATION.fields_by_name['metadata'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+_OPERATION.fields_by_name['error'].message_type = google_dot_rpc_dot_status__pb2._STATUS
+_OPERATION.fields_by_name['response'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+_OPERATION.oneofs_by_name['result'].fields.append(
+ _OPERATION.fields_by_name['error'])
+_OPERATION.fields_by_name['error'].containing_oneof = _OPERATION.oneofs_by_name['result']
+_OPERATION.oneofs_by_name['result'].fields.append(
+ _OPERATION.fields_by_name['response'])
+_OPERATION.fields_by_name['response'].containing_oneof = _OPERATION.oneofs_by_name['result']
+_LISTOPERATIONSRESPONSE.fields_by_name['operations'].message_type = _OPERATION
+DESCRIPTOR.message_types_by_name['Operation'] = _OPERATION
+DESCRIPTOR.message_types_by_name['GetOperationRequest'] = _GETOPERATIONREQUEST
+DESCRIPTOR.message_types_by_name['ListOperationsRequest'] = _LISTOPERATIONSREQUEST
+DESCRIPTOR.message_types_by_name['ListOperationsResponse'] = _LISTOPERATIONSRESPONSE
+DESCRIPTOR.message_types_by_name['CancelOperationRequest'] = _CANCELOPERATIONREQUEST
+DESCRIPTOR.message_types_by_name['DeleteOperationRequest'] = _DELETEOPERATIONREQUEST
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), dict(
+ DESCRIPTOR = _OPERATION,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.Operation)
+ ))
+_sym_db.RegisterMessage(Operation)
+
+GetOperationRequest = _reflection.GeneratedProtocolMessageType('GetOperationRequest', (_message.Message,), dict(
+ DESCRIPTOR = _GETOPERATIONREQUEST,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.GetOperationRequest)
+ ))
+_sym_db.RegisterMessage(GetOperationRequest)
+
+ListOperationsRequest = _reflection.GeneratedProtocolMessageType('ListOperationsRequest', (_message.Message,), dict(
+ DESCRIPTOR = _LISTOPERATIONSREQUEST,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsRequest)
+ ))
+_sym_db.RegisterMessage(ListOperationsRequest)
+
+ListOperationsResponse = _reflection.GeneratedProtocolMessageType('ListOperationsResponse', (_message.Message,), dict(
+ DESCRIPTOR = _LISTOPERATIONSRESPONSE,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsResponse)
+ ))
+_sym_db.RegisterMessage(ListOperationsResponse)
+
+CancelOperationRequest = _reflection.GeneratedProtocolMessageType('CancelOperationRequest', (_message.Message,), dict(
+ DESCRIPTOR = _CANCELOPERATIONREQUEST,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.CancelOperationRequest)
+ ))
+_sym_db.RegisterMessage(CancelOperationRequest)
+
+DeleteOperationRequest = _reflection.GeneratedProtocolMessageType('DeleteOperationRequest', (_message.Message,), dict(
+ DESCRIPTOR = _DELETEOPERATIONREQUEST,
+ __module__ = 'google.longrunning.operations_pb2'
+ # @@protoc_insertion_point(class_scope:google.longrunning.DeleteOperationRequest)
+ ))
+_sym_db.RegisterMessage(DeleteOperationRequest)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.longrunningB\017OperationsProtoP\001Z=google.golang.org/genproto/googleapis/longrunning;longrunning\252\002\022Google.LongRunning\312\002\022Google\\LongRunning'))
+
+_OPERATIONS = _descriptor.ServiceDescriptor(
+ name='Operations',
+ full_name='google.longrunning.Operations',
+ file=DESCRIPTOR,
+ index=0,
+ options=None,
+ serialized_start=655,
+ serialized_end=1179,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='ListOperations',
+ full_name='google.longrunning.Operations.ListOperations',
+ index=0,
+ containing_service=None,
+ input_type=_LISTOPERATIONSREQUEST,
+ output_type=_LISTOPERATIONSRESPONSE,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/v1/{name=operations}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='GetOperation',
+ full_name='google.longrunning.Operations.GetOperation',
+ index=1,
+ containing_service=None,
+ input_type=_GETOPERATIONREQUEST,
+ output_type=_OPERATION,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\022\030/v1/{name=operations/**}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='DeleteOperation',
+ full_name='google.longrunning.Operations.DeleteOperation',
+ index=2,
+ containing_service=None,
+ input_type=_DELETEOPERATIONREQUEST,
+ output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032*\030/v1/{name=operations/**}')),
+ ),
+ _descriptor.MethodDescriptor(
+ name='CancelOperation',
+ full_name='google.longrunning.Operations.CancelOperation',
+ index=3,
+ containing_service=None,
+ input_type=_CANCELOPERATIONREQUEST,
+ output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
+ options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002$\"\037/v1/{name=operations/**}:cancel:\001*')),
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_OPERATIONS)
+
+DESCRIPTOR.services_by_name['Operations'] = _OPERATIONS
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/google/longrunning/operations_pb2_grpc.py b/src/buildstream/_protos/google/longrunning/operations_pb2_grpc.py
new file mode 100644
index 000000000..8f89862e7
--- /dev/null
+++ b/src/buildstream/_protos/google/longrunning/operations_pb2_grpc.py
@@ -0,0 +1,132 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
+from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
+
+
+class OperationsStub(object):
+ """Manages long-running operations with an API service.
+
+ When an API method normally takes long time to complete, it can be designed
+ to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+ interface to receive the real response asynchronously by polling the
+ operation resource, or pass the operation resource to another API (such as
+ Google Cloud Pub/Sub API) to receive the response. Any API service that
+ returns long-running operations should implement the `Operations` interface
+ so developers can have a consistent client experience.
+ """
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.ListOperations = channel.unary_unary(
+ '/google.longrunning.Operations/ListOperations',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString,
+ )
+ self.GetOperation = channel.unary_unary(
+ '/google.longrunning.Operations/GetOperation',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString,
+ response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
+ )
+ self.DeleteOperation = channel.unary_unary(
+ '/google.longrunning.Operations/DeleteOperation',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ )
+ self.CancelOperation = channel.unary_unary(
+ '/google.longrunning.Operations/CancelOperation',
+ request_serializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString,
+ response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
+ )
+
+
+class OperationsServicer(object):
+ """Manages long-running operations with an API service.
+
+ When an API method normally takes long time to complete, it can be designed
+ to return [Operation][google.longrunning.Operation] to the client, and the client can use this
+ interface to receive the real response asynchronously by polling the
+ operation resource, or pass the operation resource to another API (such as
+ Google Cloud Pub/Sub API) to receive the response. Any API service that
+ returns long-running operations should implement the `Operations` interface
+ so developers can have a consistent client experience.
+ """
+
+ def ListOperations(self, request, context):
+ """Lists operations that match the specified filter in the request. If the
+ server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+ NOTE: the `name` binding below allows API services to override the binding
+ to use different resource name schemes, such as `users/*/operations`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetOperation(self, request, context):
+ """Gets the latest state of a long-running operation. Clients can use this
+ method to poll the operation result at intervals as recommended by the API
+ service.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def DeleteOperation(self, request, context):
+ """Deletes a long-running operation. This method indicates that the client is
+ no longer interested in the operation result. It does not cancel the
+ operation. If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def CancelOperation(self, request, context):
+ """Starts asynchronous cancellation on a long-running operation. The server
+ makes a best effort to cancel the operation, but success is not
+ guaranteed. If the server doesn't support this method, it returns
+ `google.rpc.Code.UNIMPLEMENTED`. Clients can use
+ [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ other methods to check whether the cancellation succeeded or whether the
+ operation completed despite cancellation. On successful cancellation,
+ the operation is not deleted; instead, it becomes an operation with
+ an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ corresponding to `Code.CANCELLED`.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_OperationsServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'ListOperations': grpc.unary_unary_rpc_method_handler(
+ servicer.ListOperations,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.SerializeToString,
+ ),
+ 'GetOperation': grpc.unary_unary_rpc_method_handler(
+ servicer.GetOperation,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.FromString,
+ response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
+ ),
+ 'DeleteOperation': grpc.unary_unary_rpc_method_handler(
+ servicer.DeleteOperation,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ 'CancelOperation': grpc.unary_unary_rpc_method_handler(
+ servicer.CancelOperation,
+ request_deserializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.FromString,
+ response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'google.longrunning.Operations', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/buildstream/_protos/google/rpc/__init__.py b/src/buildstream/_protos/google/rpc/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/_protos/google/rpc/__init__.py
diff --git a/src/buildstream/_protos/google/rpc/code.proto b/src/buildstream/_protos/google/rpc/code.proto
new file mode 100644
index 000000000..74e2c5c9a
--- /dev/null
+++ b/src/buildstream/_protos/google/rpc/code.proto
@@ -0,0 +1,186 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+option go_package = "google.golang.org/genproto/googleapis/rpc/code;code";
+option java_multiple_files = true;
+option java_outer_classname = "CodeProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// The canonical error codes for Google APIs.
+//
+//
+// Sometimes multiple error codes may apply. Services should return
+// the most specific error code that applies. For example, prefer
+// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
+// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
+enum Code {
+ // Not an error; returned on success
+ //
+ // HTTP Mapping: 200 OK
+ OK = 0;
+
+ // The operation was cancelled, typically by the caller.
+ //
+ // HTTP Mapping: 499 Client Closed Request
+ CANCELLED = 1;
+
+ // Unknown error. For example, this error may be returned when
+ // a `Status` value received from another address space belongs to
+ // an error space that is not known in this address space. Also
+ // errors raised by APIs that do not return enough error information
+ // may be converted to this error.
+ //
+ // HTTP Mapping: 500 Internal Server Error
+ UNKNOWN = 2;
+
+ // The client specified an invalid argument. Note that this differs
+ // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
+ // that are problematic regardless of the state of the system
+ // (e.g., a malformed file name).
+ //
+ // HTTP Mapping: 400 Bad Request
+ INVALID_ARGUMENT = 3;
+
+ // The deadline expired before the operation could complete. For operations
+ // that change the state of the system, this error may be returned
+ // even if the operation has completed successfully. For example, a
+ // successful response from a server could have been delayed long
+ // enough for the deadline to expire.
+ //
+ // HTTP Mapping: 504 Gateway Timeout
+ DEADLINE_EXCEEDED = 4;
+
+ // Some requested entity (e.g., file or directory) was not found.
+ //
+ // Note to server developers: if a request is denied for an entire class
+ // of users, such as gradual feature rollout or undocumented whitelist,
+ // `NOT_FOUND` may be used. If a request is denied for some users within
+ // a class of users, such as user-based access control, `PERMISSION_DENIED`
+ // must be used.
+ //
+ // HTTP Mapping: 404 Not Found
+ NOT_FOUND = 5;
+
+ // The entity that a client attempted to create (e.g., file or directory)
+ // already exists.
+ //
+ // HTTP Mapping: 409 Conflict
+ ALREADY_EXISTS = 6;
+
+ // The caller does not have permission to execute the specified
+ // operation. `PERMISSION_DENIED` must not be used for rejections
+ // caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
+ // instead for those errors). `PERMISSION_DENIED` must not be
+ // used if the caller can not be identified (use `UNAUTHENTICATED`
+ // instead for those errors). This error code does not imply the
+ // request is valid or the requested entity exists or satisfies
+ // other pre-conditions.
+ //
+ // HTTP Mapping: 403 Forbidden
+ PERMISSION_DENIED = 7;
+
+ // The request does not have valid authentication credentials for the
+ // operation.
+ //
+ // HTTP Mapping: 401 Unauthorized
+ UNAUTHENTICATED = 16;
+
+ // Some resource has been exhausted, perhaps a per-user quota, or
+ // perhaps the entire file system is out of space.
+ //
+ // HTTP Mapping: 429 Too Many Requests
+ RESOURCE_EXHAUSTED = 8;
+
+ // The operation was rejected because the system is not in a state
+ // required for the operation's execution. For example, the directory
+ // to be deleted is non-empty, an rmdir operation is applied to
+ // a non-directory, etc.
+ //
+ // Service implementors can use the following guidelines to decide
+ // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
+ // (a) Use `UNAVAILABLE` if the client can retry just the failing call.
+ // (b) Use `ABORTED` if the client should retry at a higher level
+ // (e.g., when a client-specified test-and-set fails, indicating the
+ // client should restart a read-modify-write sequence).
+ // (c) Use `FAILED_PRECONDITION` if the client should not retry until
+ // the system state has been explicitly fixed. E.g., if an "rmdir"
+ // fails because the directory is non-empty, `FAILED_PRECONDITION`
+ // should be returned since the client should not retry unless
+ // the files are deleted from the directory.
+ //
+ // HTTP Mapping: 400 Bad Request
+ FAILED_PRECONDITION = 9;
+
+ // The operation was aborted, typically due to a concurrency issue such as
+ // a sequencer check failure or transaction abort.
+ //
+ // See the guidelines above for deciding between `FAILED_PRECONDITION`,
+ // `ABORTED`, and `UNAVAILABLE`.
+ //
+ // HTTP Mapping: 409 Conflict
+ ABORTED = 10;
+
+ // The operation was attempted past the valid range. E.g., seeking or
+ // reading past end-of-file.
+ //
+ // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
+ // be fixed if the system state changes. For example, a 32-bit file
+ // system will generate `INVALID_ARGUMENT` if asked to read at an
+ // offset that is not in the range [0,2^32-1], but it will generate
+ // `OUT_OF_RANGE` if asked to read from an offset past the current
+ // file size.
+ //
+ // There is a fair bit of overlap between `FAILED_PRECONDITION` and
+ // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
+ // error) when it applies so that callers who are iterating through
+ // a space can easily look for an `OUT_OF_RANGE` error to detect when
+ // they are done.
+ //
+ // HTTP Mapping: 400 Bad Request
+ OUT_OF_RANGE = 11;
+
+ // The operation is not implemented or is not supported/enabled in this
+ // service.
+ //
+ // HTTP Mapping: 501 Not Implemented
+ UNIMPLEMENTED = 12;
+
+ // Internal errors. This means that some invariants expected by the
+ // underlying system have been broken. This error code is reserved
+ // for serious errors.
+ //
+ // HTTP Mapping: 500 Internal Server Error
+ INTERNAL = 13;
+
+ // The service is currently unavailable. This is most likely a
+ // transient condition, which can be corrected by retrying with
+ // a backoff.
+ //
+ // See the guidelines above for deciding between `FAILED_PRECONDITION`,
+ // `ABORTED`, and `UNAVAILABLE`.
+ //
+ // HTTP Mapping: 503 Service Unavailable
+ UNAVAILABLE = 14;
+
+ // Unrecoverable data loss or corruption.
+ //
+ // HTTP Mapping: 500 Internal Server Error
+ DATA_LOSS = 15;
+} \ No newline at end of file
diff --git a/src/buildstream/_protos/google/rpc/code_pb2.py b/src/buildstream/_protos/google/rpc/code_pb2.py
new file mode 100644
index 000000000..e06dea194
--- /dev/null
+++ b/src/buildstream/_protos/google/rpc/code_pb2.py
@@ -0,0 +1,133 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/rpc/code.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/rpc/code.proto',
+ package='google.rpc',
+ syntax='proto3',
+ serialized_options=_b('\n\016com.google.rpcB\tCodeProtoP\001Z3google.golang.org/genproto/googleapis/rpc/code;code\242\002\003RPC'),
+ serialized_pb=_b('\n\x15google/rpc/code.proto\x12\ngoogle.rpc*\xb7\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\r\n\tCANCELLED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f\x42X\n\x0e\x63om.google.rpcB\tCodeProtoP\x01Z3google.golang.org/genproto/googleapis/rpc/code;code\xa2\x02\x03RPCb\x06proto3')
+)
+
+_CODE = _descriptor.EnumDescriptor(
+ name='Code',
+ full_name='google.rpc.Code',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='OK', index=0, number=0,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='CANCELLED', index=1, number=1,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='UNKNOWN', index=2, number=2,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='INVALID_ARGUMENT', index=3, number=3,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='DEADLINE_EXCEEDED', index=4, number=4,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='NOT_FOUND', index=5, number=5,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='ALREADY_EXISTS', index=6, number=6,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='PERMISSION_DENIED', index=7, number=7,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='UNAUTHENTICATED', index=8, number=16,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='RESOURCE_EXHAUSTED', index=9, number=8,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='FAILED_PRECONDITION', index=10, number=9,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='ABORTED', index=11, number=10,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='OUT_OF_RANGE', index=12, number=11,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='UNIMPLEMENTED', index=13, number=12,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='INTERNAL', index=14, number=13,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='UNAVAILABLE', index=15, number=14,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='DATA_LOSS', index=16, number=15,
+ serialized_options=None,
+ type=None),
+ ],
+ containing_type=None,
+ serialized_options=None,
+ serialized_start=38,
+ serialized_end=349,
+)
+_sym_db.RegisterEnumDescriptor(_CODE)
+
+Code = enum_type_wrapper.EnumTypeWrapper(_CODE)
+OK = 0
+CANCELLED = 1
+UNKNOWN = 2
+INVALID_ARGUMENT = 3
+DEADLINE_EXCEEDED = 4
+NOT_FOUND = 5
+ALREADY_EXISTS = 6
+PERMISSION_DENIED = 7
+UNAUTHENTICATED = 16
+RESOURCE_EXHAUSTED = 8
+FAILED_PRECONDITION = 9
+ABORTED = 10
+OUT_OF_RANGE = 11
+UNIMPLEMENTED = 12
+INTERNAL = 13
+UNAVAILABLE = 14
+DATA_LOSS = 15
+
+
+DESCRIPTOR.enum_types_by_name['Code'] = _CODE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/google/rpc/code_pb2_grpc.py b/src/buildstream/_protos/google/rpc/code_pb2_grpc.py
new file mode 100644
index 000000000..a89435267
--- /dev/null
+++ b/src/buildstream/_protos/google/rpc/code_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/src/buildstream/_protos/google/rpc/status.proto b/src/buildstream/_protos/google/rpc/status.proto
new file mode 100644
index 000000000..0839ee966
--- /dev/null
+++ b/src/buildstream/_protos/google/rpc/status.proto
@@ -0,0 +1,92 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+import "google/protobuf/any.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/rpc/status;status";
+option java_multiple_files = true;
+option java_outer_classname = "StatusProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// The `Status` type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs. It is used by
+// [gRPC](https://github.com/grpc). The error model is designed to be:
+//
+// - Simple to use and understand for most users
+// - Flexible enough to meet unexpected needs
+//
+// # Overview
+//
+// The `Status` message contains three pieces of data: error code, error message,
+// and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
+// error message should be a developer-facing English message that helps
+// developers *understand* and *resolve* the error. If a localized user-facing
+// error message is needed, put the localized message in the error details or
+// localize it in the client. The optional error details may contain arbitrary
+// information about the error. There is a predefined set of error detail types
+// in the package `google.rpc` that can be used for common error conditions.
+//
+// # Language mapping
+//
+// The `Status` message is the logical representation of the error model, but it
+// is not necessarily the actual wire format. When the `Status` message is
+// exposed in different client libraries and different wire protocols, it can be
+// mapped differently. For example, it will likely be mapped to some exceptions
+// in Java, but more likely mapped to some error codes in C.
+//
+// # Other uses
+//
+// The error model and the `Status` message can be used in a variety of
+// environments, either with or without APIs, to provide a
+// consistent developer experience across different environments.
+//
+// Example uses of this error model include:
+//
+// - Partial errors. If a service needs to return partial errors to the client,
+// it may embed the `Status` in the normal response to indicate the partial
+// errors.
+//
+// - Workflow errors. A typical workflow has multiple steps. Each step may
+// have a `Status` message for error reporting.
+//
+// - Batch operations. If a client uses batch request and batch response, the
+// `Status` message should be used directly inside batch response, one for
+// each error sub-response.
+//
+// - Asynchronous operations. If an API call embeds asynchronous operation
+// results in its response, the status of those operations should be
+// represented directly using the `Status` message.
+//
+// - Logging. If some API errors are stored in logs, the message `Status` could
+// be used directly after any stripping needed for security/privacy reasons.
+message Status {
+ // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+ int32 code = 1;
+
+ // A developer-facing error message, which should be in English. Any
+ // user-facing error message should be localized and sent in the
+ // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+ string message = 2;
+
+ // A list of messages that carry the error details. There is a common set of
+ // message types for APIs to use.
+ repeated google.protobuf.Any details = 3;
+}
diff --git a/src/buildstream/_protos/google/rpc/status_pb2.py b/src/buildstream/_protos/google/rpc/status_pb2.py
new file mode 100644
index 000000000..6c4772311
--- /dev/null
+++ b/src/buildstream/_protos/google/rpc/status_pb2.py
@@ -0,0 +1,88 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: google/rpc/status.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='google/rpc/status.proto',
+ package='google.rpc',
+ syntax='proto3',
+ serialized_pb=_b('\n\x17google/rpc/status.proto\x12\ngoogle.rpc\x1a\x19google/protobuf/any.proto\"N\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12%\n\x07\x64\x65tails\x18\x03 \x03(\x0b\x32\x14.google.protobuf.AnyB^\n\x0e\x63om.google.rpcB\x0bStatusProtoP\x01Z7google.golang.org/genproto/googleapis/rpc/status;status\xa2\x02\x03RPCb\x06proto3')
+ ,
+ dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
+
+
+
+
+_STATUS = _descriptor.Descriptor(
+ name='Status',
+ full_name='google.rpc.Status',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='code', full_name='google.rpc.Status.code', index=0,
+ number=1, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='message', full_name='google.rpc.Status.message', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='details', full_name='google.rpc.Status.details', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=66,
+ serialized_end=144,
+)
+
+_STATUS.fields_by_name['details'].message_type = google_dot_protobuf_dot_any__pb2._ANY
+DESCRIPTOR.message_types_by_name['Status'] = _STATUS
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), dict(
+ DESCRIPTOR = _STATUS,
+ __module__ = 'google.rpc.status_pb2'
+ # @@protoc_insertion_point(class_scope:google.rpc.Status)
+ ))
+_sym_db.RegisterMessage(Status)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.rpcB\013StatusProtoP\001Z7google.golang.org/genproto/googleapis/rpc/status;status\242\002\003RPC'))
+# @@protoc_insertion_point(module_scope)
diff --git a/src/buildstream/_protos/google/rpc/status_pb2_grpc.py b/src/buildstream/_protos/google/rpc/status_pb2_grpc.py
new file mode 100644
index 000000000..a89435267
--- /dev/null
+++ b/src/buildstream/_protos/google/rpc/status_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/src/buildstream/_scheduler/__init__.py b/src/buildstream/_scheduler/__init__.py
new file mode 100644
index 000000000..d2f458fa5
--- /dev/null
+++ b/src/buildstream/_scheduler/__init__.py
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from .queues import Queue, QueueStatus
+
+from .queues.fetchqueue import FetchQueue
+from .queues.sourcepushqueue import SourcePushQueue
+from .queues.trackqueue import TrackQueue
+from .queues.buildqueue import BuildQueue
+from .queues.artifactpushqueue import ArtifactPushQueue
+from .queues.pullqueue import PullQueue
+
+from .scheduler import Scheduler, SchedStatus
+from .jobs import ElementJob, JobStatus
diff --git a/src/buildstream/_scheduler/jobs/__init__.py b/src/buildstream/_scheduler/jobs/__init__.py
new file mode 100644
index 000000000..3e213171a
--- /dev/null
+++ b/src/buildstream/_scheduler/jobs/__init__.py
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+from .elementjob import ElementJob
+from .cachesizejob import CacheSizeJob
+from .cleanupjob import CleanupJob
+from .job import JobStatus
diff --git a/src/buildstream/_scheduler/jobs/cachesizejob.py b/src/buildstream/_scheduler/jobs/cachesizejob.py
new file mode 100644
index 000000000..5f27b7fc1
--- /dev/null
+++ b/src/buildstream/_scheduler/jobs/cachesizejob.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# Tristan Daniël Maat <tristan.maat@codethink.co.uk>
+#
+from .job import Job, JobStatus
+
+
+class CacheSizeJob(Job):
+ def __init__(self, *args, complete_cb, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._complete_cb = complete_cb
+
+ context = self._scheduler.context
+ self._casquota = context.get_casquota()
+
+ def child_process(self):
+ return self._casquota.compute_cache_size()
+
+ def parent_complete(self, status, result):
+ if status == JobStatus.OK:
+ self._casquota.set_cache_size(result)
+
+ if self._complete_cb:
+ self._complete_cb(status, result)
+
+ def child_process_data(self):
+ return {}
diff --git a/src/buildstream/_scheduler/jobs/cleanupjob.py b/src/buildstream/_scheduler/jobs/cleanupjob.py
new file mode 100644
index 000000000..4764b30b3
--- /dev/null
+++ b/src/buildstream/_scheduler/jobs/cleanupjob.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# Tristan Daniël Maat <tristan.maat@codethink.co.uk>
+#
+from .job import Job, JobStatus
+
+
+class CleanupJob(Job):
+ def __init__(self, *args, complete_cb, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._complete_cb = complete_cb
+
+ context = self._scheduler.context
+ self._casquota = context.get_casquota()
+
+ def child_process(self):
+ def progress():
+ self.send_message('update-cache-size',
+ self._casquota.get_cache_size())
+ return self._casquota.clean(progress)
+
+ def handle_message(self, message_type, message):
+ # Update the cache size in the main process as we go,
+ # this provides better feedback in the UI.
+ if message_type == 'update-cache-size':
+ self._casquota.set_cache_size(message, write_to_disk=False)
+ return True
+
+ return False
+
+ def parent_complete(self, status, result):
+ if status == JobStatus.OK:
+ self._casquota.set_cache_size(result, write_to_disk=False)
+
+ if self._complete_cb:
+ self._complete_cb(status, result)
diff --git a/src/buildstream/_scheduler/jobs/elementjob.py b/src/buildstream/_scheduler/jobs/elementjob.py
new file mode 100644
index 000000000..fb5d38e11
--- /dev/null
+++ b/src/buildstream/_scheduler/jobs/elementjob.py
@@ -0,0 +1,115 @@
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author:
+# Tristan Daniël Maat <tristan.maat@codethink.co.uk>
+#
+from ruamel import yaml
+
+from ..._message import Message, MessageType
+
+from .job import Job
+
+
+# ElementJob()
+#
+# A job to run an element's commands. When this job is spawned
+# `action_cb` will be called, and when it completes `complete_cb` will
+# be called.
+#
+# Args:
+# scheduler (Scheduler): The scheduler
+# action_name (str): The queue action name
+# max_retries (int): The maximum number of retries
+# action_cb (callable): The function to execute on the child
+# complete_cb (callable): The function to execute when the job completes
+# element (Element): The element to work on
+# kwargs: Remaining Job() constructor arguments
+#
+# Here is the calling signature of the action_cb:
+#
+# action_cb():
+#
+# This function will be called in the child task
+#
+# Args:
+# element (Element): The element passed to the Job() constructor
+#
+# Returns:
+# (object): Any abstract simple python object, including a string, int,
+# bool, list or dict, this must be a simple serializable object.
+#
+# Here is the calling signature of the complete_cb:
+#
+# complete_cb():
+#
+# This function will be called when the child task completes
+#
+# Args:
+# job (Job): The job object which completed
+# element (Element): The element passed to the Job() constructor
+# status (JobStatus): The status of whether the workload raised an exception
+# result (object): The deserialized object returned by the `action_cb`, or None
+# if `success` is False
+#
+class ElementJob(Job):
+ def __init__(self, *args, element, queue, action_cb, complete_cb, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.queue = queue
+ self._element = element
+ self._action_cb = action_cb # The action callable function
+ self._complete_cb = complete_cb # The complete callable function
+
+ # Set the task wide ID for logging purposes
+ self.set_task_id(element._unique_id)
+
+ @property
+ def element(self):
+ return self._element
+
+ def child_process(self):
+
+ # Print the element's environment at the beginning of any element's log file.
+ #
+ # This should probably be omitted for non-build tasks but it's harmless here
+ elt_env = self._element.get_environment()
+ env_dump = yaml.round_trip_dump(elt_env, default_flow_style=False, allow_unicode=True)
+ self.message(MessageType.LOG,
+ "Build environment for element {}".format(self._element.name),
+ detail=env_dump)
+
+ # Run the action
+ return self._action_cb(self._element)
+
+ def parent_complete(self, status, result):
+ self._complete_cb(self, self._element, status, self._result)
+
+ def message(self, message_type, message, **kwargs):
+ args = dict(kwargs)
+ args['scheduler'] = True
+ self._scheduler.context.message(
+ Message(self._element._unique_id,
+ message_type,
+ message,
+ **args))
+
+ def child_process_data(self):
+ data = {}
+
+ workspace = self._element._get_workspace()
+ if workspace is not None:
+ data['workspace'] = workspace.to_dict()
+
+ return data
diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py
new file mode 100644
index 000000000..dd91d1634
--- /dev/null
+++ b/src/buildstream/_scheduler/jobs/job.py
@@ -0,0 +1,682 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+# System imports
+import os
+import sys
+import signal
+import datetime
+import traceback
+import asyncio
+import multiprocessing
+
+# BuildStream toplevel imports
+from ..._exceptions import ImplError, BstError, set_last_task_error, SkipJob
+from ..._message import Message, MessageType, unconditional_messages
+from ... import _signals, utils
+
+# Return code values shutdown of job handling child processes
+#
+RC_OK = 0
+RC_FAIL = 1
+RC_PERM_FAIL = 2
+RC_SKIPPED = 3
+
+
+# JobStatus:
+#
+# The job completion status, passed back through the
+# complete callbacks.
+#
+class JobStatus():
+ # Job succeeded
+ OK = 0
+
+ # A temporary BstError was raised
+ FAIL = 1
+
+ # A SkipJob was raised
+ SKIPPED = 3
+
+
+# Used to distinguish between status messages and return values
+class _Envelope():
+ def __init__(self, message_type, message):
+ self.message_type = message_type
+ self.message = message
+
+
+# Process class that doesn't call waitpid on its own.
+# This prevents conflicts with the asyncio child watcher.
+class Process(multiprocessing.Process):
+ # pylint: disable=attribute-defined-outside-init
+ def start(self):
+ self._popen = self._Popen(self)
+ self._sentinel = self._popen.sentinel
+
+
+# Job()
+#
+# The Job object represents a parallel task, when calling Job.spawn(),
+# the given `Job.child_process()` will be called in parallel to the
+# calling process, and `Job.parent_complete()` will be called with the
+# action result in the calling process when the job completes.
+#
+# Args:
+# scheduler (Scheduler): The scheduler
+# action_name (str): The queue action name
+# logfile (str): A template string that points to the logfile
+# that should be used - should contain {pid}.
+# max_retries (int): The maximum number of retries
+#
+class Job():
+
+ def __init__(self, scheduler, action_name, logfile, *, max_retries=0):
+
+ #
+ # Public members
+ #
+ self.action_name = action_name # The action name for the Queue
+ self.child_data = None # Data to be sent to the main process
+
+ #
+ # Private members
+ #
+ self._scheduler = scheduler # The scheduler
+ self._queue = None # A message passing queue
+ self._process = None # The Process object
+ self._watcher = None # Child process watcher
+ self._listening = False # Whether the parent is currently listening
+ self._suspended = False # Whether this job is currently suspended
+ self._max_retries = max_retries # Maximum number of automatic retries
+ self._result = None # Return value of child action in the parent
+ self._tries = 0 # Try count, for retryable jobs
+ self._terminated = False # Whether this job has been explicitly terminated
+
+ self._logfile = logfile
+ self._task_id = None
+
+ # spawn()
+ #
+ # Spawns the job.
+ #
+ def spawn(self):
+
+ self._queue = multiprocessing.Queue()
+
+ self._tries += 1
+ self._parent_start_listening()
+
+ # Spawn the process
+ self._process = Process(target=self._child_action, args=[self._queue])
+
+ # Block signals which are handled in the main process such that
+ # the child process does not inherit the parent's state, but the main
+ # process will be notified of any signal after we launch the child.
+ #
+ with _signals.blocked([signal.SIGINT, signal.SIGTSTP, signal.SIGTERM], ignore=False):
+ self._process.start()
+
+ # Wait for the child task to complete.
+ #
+ # This is a tricky part of python which doesnt seem to
+ # make it to the online docs:
+ #
+ # o asyncio.get_child_watcher() will return a SafeChildWatcher() instance
+ # which is the default type of watcher, and the instance belongs to the
+ # "event loop policy" in use (so there is only one in the main process).
+ #
+ # o SafeChildWatcher() will register a SIGCHLD handler with the asyncio
+ # loop, and will selectively reap any child pids which have been
+ # terminated.
+ #
+ # o At registration time, the process will immediately be checked with
+ # `os.waitpid()` and will be reaped immediately, before add_child_handler()
+ # returns.
+ #
+ # The self._parent_child_completed callback passed here will normally
+ # be called after the child task has been reaped with `os.waitpid()`, in
+ # an event loop callback. Otherwise, if the job completes too fast, then
+ # the callback is called immediately.
+ #
+ self._watcher = asyncio.get_child_watcher()
+ self._watcher.add_child_handler(self._process.pid, self._parent_child_completed)
+
+ # terminate()
+ #
+ # Politely request that an ongoing job terminate soon.
+ #
+ # This will send a SIGTERM signal to the Job process.
+ #
+ def terminate(self):
+
+ # First resume the job if it's suspended
+ self.resume(silent=True)
+
+ self.message(MessageType.STATUS, "{} terminating".format(self.action_name))
+
+ # Make sure there is no garbage on the queue
+ self._parent_stop_listening()
+
+ # Terminate the process using multiprocessing API pathway
+ self._process.terminate()
+
+ self._terminated = True
+
+ # get_terminated()
+ #
+ # Check if a job has been terminated.
+ #
+ # Returns:
+ # (bool): True in the main process if Job.terminate() was called.
+ #
+ def get_terminated(self):
+ return self._terminated
+
+ # terminate_wait()
+ #
+ # Wait for terminated jobs to complete
+ #
+ # Args:
+ # timeout (float): Seconds to wait
+ #
+ # Returns:
+ # (bool): True if the process terminated cleanly, otherwise False
+ #
+ def terminate_wait(self, timeout):
+
+ # Join the child process after sending SIGTERM
+ self._process.join(timeout)
+ return self._process.exitcode is not None
+
+ # kill()
+ #
+ # Forcefully kill the process, and any children it might have.
+ #
+ def kill(self):
+ # Force kill
+ self.message(MessageType.WARN,
+ "{} did not terminate gracefully, killing".format(self.action_name))
+ utils._kill_process_tree(self._process.pid)
+
+ # suspend()
+ #
+ # Suspend this job.
+ #
+ def suspend(self):
+ if not self._suspended:
+ self.message(MessageType.STATUS,
+ "{} suspending".format(self.action_name))
+
+ try:
+ # Use SIGTSTP so that child processes may handle and propagate
+ # it to processes they spawn that become session leaders
+ os.kill(self._process.pid, signal.SIGTSTP)
+
+ # For some reason we receive exactly one suspend event for every
+ # SIGTSTP we send to the child fork(), even though the child forks
+ # are setsid(). We keep a count of these so we can ignore them
+ # in our event loop suspend_event()
+ self._scheduler.internal_stops += 1
+ self._suspended = True
+ except ProcessLookupError:
+ # ignore, process has already exited
+ pass
+
+ # resume()
+ #
+ # Resume this suspended job.
+ #
+ def resume(self, silent=False):
+ if self._suspended:
+ if not silent and not self._scheduler.terminated:
+ self.message(MessageType.STATUS,
+ "{} resuming".format(self.action_name))
+
+ os.kill(self._process.pid, signal.SIGCONT)
+ self._suspended = False
+
+ # set_task_id()
+ #
+ # This is called by Job subclasses to set a plugin ID
+ # associated with the task at large (if any element is related
+ # to the task).
+ #
+ # The task ID helps keep messages in the frontend coherent
+ # in the case that multiple plugins log in the context of
+ # a single task (e.g. running integration commands should appear
+ # in the frontend for the element being built, not the element
+ # running the integration commands).
+ #
+ # Args:
+ # task_id (int): The plugin identifier for this task
+ #
+ def set_task_id(self, task_id):
+ self._task_id = task_id
+
+ # send_message()
+ #
+ # To be called from inside Job.child_process() implementations
+ # to send messages to the main process during processing.
+ #
+ # These messages will be processed by the class's Job.handle_message()
+ # implementation.
+ #
+ def send_message(self, message_type, message):
+ self._queue.put(_Envelope(message_type, message))
+
+ #######################################################
+ # Abstract Methods #
+ #######################################################
+
+ # handle_message()
+ #
+ # Handle a custom message. This will be called in the main process in
+ # response to any messages sent to the main proces using the
+ # Job.send_message() API from inside a Job.child_process() implementation
+ #
+ # Args:
+ # message_type (str): A string to identify the message type
+ # message (any): A simple serializable object
+ #
+ # Returns:
+ # (bool): Should return a truthy value if message_type is handled.
+ #
+ def handle_message(self, message_type, message):
+ return False
+
+ # parent_complete()
+ #
+ # This will be executed after the job finishes, and is expected to
+ # pass the result to the main thread.
+ #
+ # Args:
+ # status (JobStatus): The job exit status
+ # result (any): The result returned by child_process().
+ #
+ def parent_complete(self, status, result):
+ raise ImplError("Job '{kind}' does not implement parent_complete()"
+ .format(kind=type(self).__name__))
+
+ # child_process()
+ #
+ # This will be executed after fork(), and is intended to perform
+ # the job's task.
+ #
+ # Returns:
+ # (any): A (simple!) object to be returned to the main thread
+ # as the result.
+ #
+ def child_process(self):
+ raise ImplError("Job '{kind}' does not implement child_process()"
+ .format(kind=type(self).__name__))
+
+ # message():
+ #
+ # Logs a message, this will be logged in the task's logfile and
+ # conditionally also be sent to the frontend.
+ #
+ # Args:
+ # message_type (MessageType): The type of message to send
+ # message (str): The message
+ # kwargs: Remaining Message() constructor arguments
+ #
+ def message(self, message_type, message, **kwargs):
+ args = dict(kwargs)
+ args['scheduler'] = True
+ self._scheduler.context.message(Message(None, message_type, message, **args))
+
+ # child_process_data()
+ #
+ # Abstract method to retrieve additional data that should be
+ # returned to the parent process. Note that the job result is
+ # retrieved independently.
+ #
+ # Values can later be retrieved in Job.child_data.
+ #
+ # Returns:
+ # (dict) A dict containing values to be reported to the main process
+ #
+ def child_process_data(self):
+ return {}
+
+ #######################################################
+ # Local Private Methods #
+ #######################################################
+ #
+ # Methods prefixed with the word 'child' take place in the child process
+ #
+ # Methods prefixed with the word 'parent' take place in the parent process
+ #
+ # Other methods can be called in both child or parent processes
+ #
+ #######################################################
+
+ # _child_action()
+ #
+ # Perform the action in the child process, this calls the action_cb.
+ #
+ # Args:
+ # queue (multiprocessing.Queue): The message queue for IPC
+ #
+ def _child_action(self, queue):
+
+ # This avoids some SIGTSTP signals from grandchildren
+ # getting propagated up to the master process
+ os.setsid()
+
+ # First set back to the default signal handlers for the signals
+ # we handle, and then clear their blocked state.
+ #
+ signal_list = [signal.SIGTSTP, signal.SIGTERM]
+ for sig in signal_list:
+ signal.signal(sig, signal.SIG_DFL)
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, signal_list)
+
+ # Assign the queue we passed across the process boundaries
+ #
+ # Set the global message handler in this child
+ # process to forward messages to the parent process
+ self._queue = queue
+ self._scheduler.context.set_message_handler(self._child_message_handler)
+
+ starttime = datetime.datetime.now()
+ stopped_time = None
+
+ def stop_time():
+ nonlocal stopped_time
+ stopped_time = datetime.datetime.now()
+
+ def resume_time():
+ nonlocal stopped_time
+ nonlocal starttime
+ starttime += (datetime.datetime.now() - stopped_time)
+
+ # Time, log and and run the action function
+ #
+ with _signals.suspendable(stop_time, resume_time), \
+ self._scheduler.context.recorded_messages(self._logfile) as filename:
+
+ self.message(MessageType.START, self.action_name, logfile=filename)
+
+ try:
+ # Try the task action
+ result = self.child_process() # pylint: disable=assignment-from-no-return
+ except SkipJob as e:
+ elapsed = datetime.datetime.now() - starttime
+ self.message(MessageType.SKIPPED, str(e),
+ elapsed=elapsed, logfile=filename)
+
+ # Alert parent of skip by return code
+ self._child_shutdown(RC_SKIPPED)
+ except BstError as e:
+ elapsed = datetime.datetime.now() - starttime
+ retry_flag = e.temporary
+
+ if retry_flag and (self._tries <= self._max_retries):
+ self.message(MessageType.FAIL,
+ "Try #{} failed, retrying".format(self._tries),
+ elapsed=elapsed, logfile=filename)
+ else:
+ self.message(MessageType.FAIL, str(e),
+ elapsed=elapsed, detail=e.detail,
+ logfile=filename, sandbox=e.sandbox)
+
+ self._queue.put(_Envelope('child_data', self.child_process_data()))
+
+ # Report the exception to the parent (for internal testing purposes)
+ self._child_send_error(e)
+
+ # Set return code based on whether or not the error was temporary.
+ #
+ self._child_shutdown(RC_FAIL if retry_flag else RC_PERM_FAIL)
+
+ except Exception as e: # pylint: disable=broad-except
+
+ # If an unhandled (not normalized to BstError) occurs, that's a bug,
+ # send the traceback and formatted exception back to the frontend
+ # and print it to the log file.
+ #
+ elapsed = datetime.datetime.now() - starttime
+ detail = "An unhandled exception occured:\n\n{}".format(traceback.format_exc())
+
+ self.message(MessageType.BUG, self.action_name,
+ elapsed=elapsed, detail=detail,
+ logfile=filename)
+ # Unhandled exceptions should permenantly fail
+ self._child_shutdown(RC_PERM_FAIL)
+
+ else:
+ # No exception occurred in the action
+ self._queue.put(_Envelope('child_data', self.child_process_data()))
+ self._child_send_result(result)
+
+ elapsed = datetime.datetime.now() - starttime
+ self.message(MessageType.SUCCESS, self.action_name, elapsed=elapsed,
+ logfile=filename)
+
+ # Shutdown needs to stay outside of the above context manager,
+ # make sure we dont try to handle SIGTERM while the process
+ # is already busy in sys.exit()
+ self._child_shutdown(RC_OK)
+
+ # _child_send_error()
+ #
+ # Sends an error to the main process through the message queue
+ #
+ # Args:
+ # e (Exception): The error to send
+ #
+ def _child_send_error(self, e):
+ domain = None
+ reason = None
+
+ if isinstance(e, BstError):
+ domain = e.domain
+ reason = e.reason
+
+ envelope = _Envelope('error', {
+ 'domain': domain,
+ 'reason': reason
+ })
+ self._queue.put(envelope)
+
+ # _child_send_result()
+ #
+ # Sends the serialized result to the main process through the message queue
+ #
+ # Args:
+ # result (object): A simple serializable object, or None
+ #
+ # Note: If None is passed here, nothing needs to be sent, the
+ # result member in the parent process will simply remain None.
+ #
+ def _child_send_result(self, result):
+ if result is not None:
+ envelope = _Envelope('result', result)
+ self._queue.put(envelope)
+
+ # _child_shutdown()
+ #
+ # Shuts down the child process by cleaning up and exiting the process
+ #
+ # Args:
+ # exit_code (int): The exit code to exit with
+ #
+ def _child_shutdown(self, exit_code):
+ self._queue.close()
+ sys.exit(exit_code)
+
+ # _child_message_handler()
+ #
+ # A Context delegate for handling messages, this replaces the
+ # frontend's main message handler in the context of a child task
+ # and performs local logging to the local log file before sending
+ # the message back to the parent process for further propagation.
+ #
+ # Args:
+ # message (Message): The message to log
+ # context (Context): The context object delegating this message
+ #
+ def _child_message_handler(self, message, context):
+
+ message.action_name = self.action_name
+ message.task_id = self._task_id
+
+ # Send to frontend if appropriate
+ if context.silent_messages() and (message.message_type not in unconditional_messages):
+ return
+
+ if message.message_type == MessageType.LOG:
+ return
+
+ self._queue.put(_Envelope('message', message))
+
+ # _parent_shutdown()
+ #
+ # Shuts down the Job on the parent side by reading any remaining
+ # messages on the message queue and cleaning up any resources.
+ #
+ def _parent_shutdown(self):
+ # Make sure we've read everything we need and then stop listening
+ self._parent_process_queue()
+ self._parent_stop_listening()
+
+ # _parent_child_completed()
+ #
+ # Called in the main process courtesy of asyncio's ChildWatcher.add_child_handler()
+ #
+ # Args:
+ # pid (int): The PID of the child which completed
+ # returncode (int): The return code of the child process
+ #
+ def _parent_child_completed(self, pid, returncode):
+ self._parent_shutdown()
+
+ # We don't want to retry if we got OK or a permanent fail.
+ retry_flag = returncode == RC_FAIL
+
+ if retry_flag and (self._tries <= self._max_retries) and not self._scheduler.terminated:
+ self.spawn()
+ return
+
+ # Resolve the outward facing overall job completion status
+ #
+ if returncode == RC_OK:
+ status = JobStatus.OK
+ elif returncode == RC_SKIPPED:
+ status = JobStatus.SKIPPED
+ elif returncode in (RC_FAIL, RC_PERM_FAIL):
+ status = JobStatus.FAIL
+ else:
+ status = JobStatus.FAIL
+
+ self.parent_complete(status, self._result)
+ self._scheduler.job_completed(self, status)
+
+ # Force the deletion of the queue and process objects to try and clean up FDs
+ self._queue = self._process = None
+
+ # _parent_process_envelope()
+ #
+ # Processes a message Envelope deserialized form the message queue.
+ #
+ # this will have the side effect of assigning some local state
+ # on the Job in the parent process for later inspection when the
+ # child process completes.
+ #
+ # Args:
+ # envelope (Envelope): The message envelope
+ #
+ def _parent_process_envelope(self, envelope):
+ if not self._listening:
+ return
+
+ if envelope.message_type == 'message':
+ # Propagate received messages from children
+ # back through the context.
+ self._scheduler.context.message(envelope.message)
+ elif envelope.message_type == 'error':
+ # For regression tests only, save the last error domain / reason
+ # reported from a child task in the main process, this global state
+ # is currently managed in _exceptions.py
+ set_last_task_error(envelope.message['domain'],
+ envelope.message['reason'])
+ elif envelope.message_type == 'result':
+ assert self._result is None
+ self._result = envelope.message
+ elif envelope.message_type == 'child_data':
+ # If we retry a job, we assign a new value to this
+ self.child_data = envelope.message
+
+ # Try Job subclass specific messages now
+ elif not self.handle_message(envelope.message_type,
+ envelope.message):
+ assert 0, "Unhandled message type '{}': {}" \
+ .format(envelope.message_type, envelope.message)
+
+ # _parent_process_queue()
+ #
+ # Reads back message envelopes from the message queue
+ # in the parent process.
+ #
+ def _parent_process_queue(self):
+ while not self._queue.empty():
+ envelope = self._queue.get_nowait()
+ self._parent_process_envelope(envelope)
+
+ # _parent_recv()
+ #
+ # A callback to handle I/O events from the message
+ # queue file descriptor in the main process message loop
+ #
+ def _parent_recv(self, *args):
+ self._parent_process_queue()
+
+ # _parent_start_listening()
+ #
+ # Starts listening on the message queue
+ #
+ def _parent_start_listening(self):
+ # Warning: Platform specific code up ahead
+ #
+ # The multiprocessing.Queue object does not tell us how
+ # to receive io events in the receiving process, so we
+ # need to sneak in and get its file descriptor.
+ #
+ # The _reader member of the Queue is currently private
+ # but well known, perhaps it will become public:
+ #
+ # http://bugs.python.org/issue3831
+ #
+ if not self._listening:
+ self._scheduler.loop.add_reader(
+ self._queue._reader.fileno(), self._parent_recv)
+ self._listening = True
+
+ # _parent_stop_listening()
+ #
+ # Stops listening on the message queue
+ #
+ def _parent_stop_listening(self):
+ if self._listening:
+ self._scheduler.loop.remove_reader(self._queue._reader.fileno())
+ self._listening = False
diff --git a/src/buildstream/_scheduler/queues/__init__.py b/src/buildstream/_scheduler/queues/__init__.py
new file mode 100644
index 000000000..3b2293919
--- /dev/null
+++ b/src/buildstream/_scheduler/queues/__init__.py
@@ -0,0 +1 @@
+from .queue import Queue, QueueStatus
diff --git a/src/buildstream/_scheduler/queues/artifactpushqueue.py b/src/buildstream/_scheduler/queues/artifactpushqueue.py
new file mode 100644
index 000000000..b861d4fc7
--- /dev/null
+++ b/src/buildstream/_scheduler/queues/artifactpushqueue.py
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+# Local imports
+from . import Queue, QueueStatus
+from ..resources import ResourceType
+from ..._exceptions import SkipJob
+
+
+# A queue which pushes element artifacts
+#
+class ArtifactPushQueue(Queue):
+
+ action_name = "Push"
+ complete_name = "Pushed"
+ resources = [ResourceType.UPLOAD]
+
+ def process(self, element):
+ # returns whether an artifact was uploaded or not
+ if not element._push():
+ raise SkipJob(self.action_name)
+
+ def status(self, element):
+ if element._skip_push():
+ return QueueStatus.SKIP
+
+ return QueueStatus.READY
diff --git a/src/buildstream/_scheduler/queues/buildqueue.py b/src/buildstream/_scheduler/queues/buildqueue.py
new file mode 100644
index 000000000..aa489f381
--- /dev/null
+++ b/src/buildstream/_scheduler/queues/buildqueue.py
@@ -0,0 +1,117 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+from datetime import timedelta
+
+from . import Queue, QueueStatus
+from ..jobs import ElementJob, JobStatus
+from ..resources import ResourceType
+from ..._message import MessageType
+
+
+# A queue which assembles elements
+#
+class BuildQueue(Queue):
+
+ action_name = "Build"
+ complete_name = "Built"
+ resources = [ResourceType.PROCESS, ResourceType.CACHE]
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._tried = set()
+
+ def enqueue(self, elts):
+ to_queue = []
+
+ for element in elts:
+ if not element._cached_failure() or element in self._tried:
+ to_queue.append(element)
+ continue
+
+ # XXX: Fix this, See https://mail.gnome.org/archives/buildstream-list/2018-September/msg00029.html
+ # Bypass queue processing entirely the first time it's tried.
+ self._tried.add(element)
+ _, description, detail = element._get_build_result()
+ logfile = element._get_build_log()
+ self._message(element, MessageType.FAIL, description,
+ detail=detail, action_name=self.action_name,
+ elapsed=timedelta(seconds=0),
+ logfile=logfile)
+ job = ElementJob(self._scheduler, self.action_name,
+ logfile, element=element, queue=self,
+ action_cb=self.process,
+ complete_cb=self._job_done,
+ max_retries=self._max_retries)
+ self._done_queue.append(element)
+ self.failed_elements.append(element)
+ self._scheduler._job_complete_callback(job, False)
+
+ return super().enqueue(to_queue)
+
+ def process(self, element):
+ return element._assemble()
+
+ def status(self, element):
+ if not element._is_required():
+ # Artifact is not currently required but it may be requested later.
+ # Keep it in the queue.
+ return QueueStatus.WAIT
+
+ if element._cached_success():
+ return QueueStatus.SKIP
+
+ if not element._buildable():
+ return QueueStatus.WAIT
+
+ return QueueStatus.READY
+
+ def _check_cache_size(self, job, element, artifact_size):
+
+ # After completing a build job, add the artifact size
+ # as returned from Element._assemble() to the estimated
+ # artifact cache size
+ #
+ context = self._scheduler.context
+ artifacts = context.artifactcache
+
+ artifacts.add_artifact_size(artifact_size)
+
+ # If the estimated size outgrows the quota, ask the scheduler
+ # to queue a job to actually check the real cache size.
+ #
+ if artifacts.full():
+ self._scheduler.check_cache_size()
+
+ def done(self, job, element, result, status):
+
+ # Inform element in main process that assembly is done
+ element._assemble_done()
+
+ # This has to be done after _assemble_done, such that the
+ # element may register its cache key as required
+ #
+ # FIXME: Element._assemble() does not report both the failure state and the
+ # size of the newly cached failed artifact, so we can only adjust the
+ # artifact cache size for a successful build even though we know a
+ # failed build also grows the artifact cache size.
+ #
+ if status == JobStatus.OK:
+ self._check_cache_size(job, element, result)
diff --git a/src/buildstream/_scheduler/queues/fetchqueue.py b/src/buildstream/_scheduler/queues/fetchqueue.py
new file mode 100644
index 000000000..9edeebb1d
--- /dev/null
+++ b/src/buildstream/_scheduler/queues/fetchqueue.py
@@ -0,0 +1,80 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+# BuildStream toplevel imports
+from ... import Consistency
+
+# Local imports
+from . import Queue, QueueStatus
+from ..resources import ResourceType
+from ..jobs import JobStatus
+
+
+# A queue which fetches element sources
+#
+class FetchQueue(Queue):
+
+ action_name = "Fetch"
+ complete_name = "Fetched"
+ resources = [ResourceType.DOWNLOAD]
+
+ def __init__(self, scheduler, skip_cached=False, fetch_original=False):
+ super().__init__(scheduler)
+
+ self._skip_cached = skip_cached
+ self._fetch_original = fetch_original
+
+ def process(self, element):
+ element._fetch(fetch_original=self._fetch_original)
+
+ def status(self, element):
+ if not element._is_required():
+ # Artifact is not currently required but it may be requested later.
+ # Keep it in the queue.
+ return QueueStatus.WAIT
+
+ # Optionally skip elements that are already in the artifact cache
+ if self._skip_cached:
+ if not element._can_query_cache():
+ return QueueStatus.WAIT
+
+ if element._cached():
+ return QueueStatus.SKIP
+
+ # This will automatically skip elements which
+ # have no sources.
+
+ if not element._should_fetch(self._fetch_original):
+ return QueueStatus.SKIP
+
+ return QueueStatus.READY
+
+ def done(self, _, element, result, status):
+
+ if status == JobStatus.FAIL:
+ return
+
+ element._fetch_done()
+
+ # Successful fetch, we must be CACHED or in the sourcecache
+ if self._fetch_original:
+ assert element._get_consistency() == Consistency.CACHED
+ else:
+ assert element._source_cached()
diff --git a/src/buildstream/_scheduler/queues/pullqueue.py b/src/buildstream/_scheduler/queues/pullqueue.py
new file mode 100644
index 000000000..013ee6489
--- /dev/null
+++ b/src/buildstream/_scheduler/queues/pullqueue.py
@@ -0,0 +1,66 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+# Local imports
+from . import Queue, QueueStatus
+from ..resources import ResourceType
+from ..jobs import JobStatus
+from ..._exceptions import SkipJob
+
+
+# A queue which pulls element artifacts
+#
+class PullQueue(Queue):
+
+ action_name = "Pull"
+ complete_name = "Pulled"
+ resources = [ResourceType.DOWNLOAD, ResourceType.CACHE]
+
+ def process(self, element):
+ # returns whether an artifact was downloaded or not
+ if not element._pull():
+ raise SkipJob(self.action_name)
+
+ def status(self, element):
+ if not element._is_required():
+ # Artifact is not currently required but it may be requested later.
+ # Keep it in the queue.
+ return QueueStatus.WAIT
+
+ if not element._can_query_cache():
+ return QueueStatus.WAIT
+
+ if element._pull_pending():
+ return QueueStatus.READY
+ else:
+ return QueueStatus.SKIP
+
+ def done(self, _, element, result, status):
+
+ if status == JobStatus.FAIL:
+ return
+
+ element._pull_done()
+
+ # Build jobs will check the "approximate" size first. Since we
+ # do not get an artifact size from pull jobs, we have to
+ # actually check the cache size.
+ if status == JobStatus.OK:
+ self._scheduler.check_cache_size()
diff --git a/src/buildstream/_scheduler/queues/queue.py b/src/buildstream/_scheduler/queues/queue.py
new file mode 100644
index 000000000..1efcffc16
--- /dev/null
+++ b/src/buildstream/_scheduler/queues/queue.py
@@ -0,0 +1,328 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+# System imports
+import os
+from collections import deque
+from enum import Enum
+import traceback
+
+# Local imports
+from ..jobs import ElementJob, JobStatus
+from ..resources import ResourceType
+
+# BuildStream toplevel imports
+from ..._exceptions import BstError, set_last_task_error
+from ..._message import Message, MessageType
+
+
+# Queue status for a given element
+#
+#
+class QueueStatus(Enum):
+ # The element is waiting for dependencies.
+ WAIT = 1
+
+ # The element can skip this queue.
+ SKIP = 2
+
+ # The element is ready for processing in this queue.
+ READY = 3
+
+
+# Queue()
+#
+# Args:
+# scheduler (Scheduler): The Scheduler
+#
+class Queue():
+
+ # These should be overridden on class data of of concrete Queue implementations
+ action_name = None
+ complete_name = None
+ resources = [] # Resources this queues' jobs want
+
+ def __init__(self, scheduler):
+
+ #
+ # Public members
+ #
+ self.failed_elements = [] # List of failed elements, for the frontend
+ self.processed_elements = [] # List of processed elements, for the frontend
+ self.skipped_elements = [] # List of skipped elements, for the frontend
+
+ #
+ # Private members
+ #
+ self._scheduler = scheduler
+ self._resources = scheduler.resources # Shared resource pool
+ self._wait_queue = deque() # Ready / Waiting elements
+ self._done_queue = deque() # Processed / Skipped elements
+ self._max_retries = 0
+
+ # Assert the subclass has setup class data
+ assert self.action_name is not None
+ assert self.complete_name is not None
+
+ if ResourceType.UPLOAD in self.resources or ResourceType.DOWNLOAD in self.resources:
+ self._max_retries = scheduler.context.sched_network_retries
+
+ #####################################################
+ # Abstract Methods for Queue implementations #
+ #####################################################
+
+ # process()
+ #
+ # Abstract method for processing an element
+ #
+ # Args:
+ # element (Element): An element to process
+ #
+ # Returns:
+ # (any): An optional something to be returned
+ # for every element successfully processed
+ #
+ #
+ def process(self, element):
+ pass
+
+ # status()
+ #
+ # Abstract method for reporting the status of an element.
+ #
+ # Args:
+ # element (Element): An element to process
+ #
+ # Returns:
+ # (QueueStatus): The element status
+ #
+ def status(self, element):
+ return QueueStatus.READY
+
+ # done()
+ #
+ # Abstract method for handling a successful job completion.
+ #
+ # Args:
+ # job (Job): The job which completed processing
+ # element (Element): The element which completed processing
+ # result (any): The return value of the process() implementation
+ # status (JobStatus): The return status of the Job
+ #
+ def done(self, job, element, result, status):
+ pass
+
+ #####################################################
+ # Scheduler / Pipeline facing APIs #
+ #####################################################
+
+ # enqueue()
+ #
+ # Enqueues some elements
+ #
+ # Args:
+ # elts (list): A list of Elements
+ #
+ def enqueue(self, elts):
+ if not elts:
+ return
+
+ # Place skipped elements on the done queue right away.
+ #
+ # The remaining ready and waiting elements must remain in the
+ # same queue, and ready status must be determined at the moment
+ # which the scheduler is asking for the next job.
+ #
+ skip = [elt for elt in elts if self.status(elt) == QueueStatus.SKIP]
+ wait = [elt for elt in elts if elt not in skip]
+
+ self.skipped_elements.extend(skip) # Public record of skipped elements
+ self._done_queue.extend(skip) # Elements to be processed
+ self._wait_queue.extend(wait) # Elements eligible to be dequeued
+
+ # dequeue()
+ #
+ # A generator which dequeues the elements which
+ # are ready to exit the queue.
+ #
+ # Yields:
+ # (Element): Elements being dequeued
+ #
+ def dequeue(self):
+ while self._done_queue:
+ yield self._done_queue.popleft()
+
+ # dequeue_ready()
+ #
+ # Reports whether any elements can be promoted to other queues
+ #
+ # Returns:
+ # (bool): Whether there are elements ready
+ #
+ def dequeue_ready(self):
+ return any(self._done_queue)
+
+ # harvest_jobs()
+ #
+ # Process elements in the queue, moving elements which were enqueued
+ # into the dequeue pool, and creating as many jobs for which resources
+ # can be reserved.
+ #
+ # Returns:
+ # ([Job]): A list of jobs which can be run now
+ #
+ def harvest_jobs(self):
+ unready = []
+ ready = []
+
+ while self._wait_queue:
+ if not self._resources.reserve(self.resources, peek=True):
+ break
+
+ element = self._wait_queue.popleft()
+ status = self.status(element)
+
+ if status == QueueStatus.WAIT:
+ unready.append(element)
+ elif status == QueueStatus.SKIP:
+ self._done_queue.append(element)
+ self.skipped_elements.append(element)
+ else:
+ reserved = self._resources.reserve(self.resources)
+ assert reserved
+ ready.append(element)
+
+ self._wait_queue.extendleft(unready)
+
+ return [
+ ElementJob(self._scheduler, self.action_name,
+ self._element_log_path(element),
+ element=element, queue=self,
+ action_cb=self.process,
+ complete_cb=self._job_done,
+ max_retries=self._max_retries)
+ for element in ready
+ ]
+
+ #####################################################
+ # Private Methods #
+ #####################################################
+
+ # _update_workspaces()
+ #
+ # Updates and possibly saves the workspaces in the
+ # main data model in the main process after a job completes.
+ #
+ # Args:
+ # element (Element): The element which completed
+ # job (Job): The job which completed
+ #
+ def _update_workspaces(self, element, job):
+ workspace_dict = None
+ if job.child_data:
+ workspace_dict = job.child_data.get('workspace', None)
+
+ # Handle any workspace modifications now
+ #
+ if workspace_dict:
+ context = element._get_context()
+ workspaces = context.get_workspaces()
+ if workspaces.update_workspace(element._get_full_name(), workspace_dict):
+ try:
+ workspaces.save_config()
+ except BstError as e:
+ self._message(element, MessageType.ERROR, "Error saving workspaces", detail=str(e))
+ except Exception as e: # pylint: disable=broad-except
+ self._message(element, MessageType.BUG,
+ "Unhandled exception while saving workspaces",
+ detail=traceback.format_exc())
+
+ # _job_done()
+ #
+ # A callback reported by the Job() when a job completes
+ #
+ # This will call the Queue implementation specific Queue.done()
+ # implementation and trigger the scheduler to reschedule.
+ #
+ # See the Job object for an explanation of the call signature
+ #
+ def _job_done(self, job, element, status, result):
+
+ # Now release the resources we reserved
+ #
+ self._resources.release(self.resources)
+
+ # Update values that need to be synchronized in the main task
+ # before calling any queue implementation
+ self._update_workspaces(element, job)
+
+ # Give the result of the job to the Queue implementor,
+ # and determine if it should be considered as processed
+ # or skipped.
+ try:
+ self.done(job, element, result, status)
+ except BstError as e:
+
+ # Report error and mark as failed
+ #
+ self._message(element, MessageType.ERROR, "Post processing error", detail=str(e))
+ self.failed_elements.append(element)
+
+ # Treat this as a task error as it's related to a task
+ # even though it did not occur in the task context
+ #
+ # This just allows us stronger testing capability
+ #
+ set_last_task_error(e.domain, e.reason)
+
+ except Exception as e: # pylint: disable=broad-except
+
+ # Report unhandled exceptions and mark as failed
+ #
+ self._message(element, MessageType.BUG,
+ "Unhandled exception in post processing",
+ detail=traceback.format_exc())
+ self.failed_elements.append(element)
+ else:
+ # All elements get placed on the done queue for later processing.
+ self._done_queue.append(element)
+
+ # These lists are for bookkeeping purposes for the UI and logging.
+ if status == JobStatus.SKIPPED or job.get_terminated():
+ self.skipped_elements.append(element)
+ elif status == JobStatus.OK:
+ self.processed_elements.append(element)
+ else:
+ self.failed_elements.append(element)
+
+ # Convenience wrapper for Queue implementations to send
+ # a message for the element they are processing
+ def _message(self, element, message_type, brief, **kwargs):
+ context = element._get_context()
+ message = Message(element._unique_id, message_type, brief, **kwargs)
+ context.message(message)
+
+ def _element_log_path(self, element):
+ project = element._get_project()
+ key = element._get_display_key()[1]
+ action = self.action_name.lower()
+ logfile = "{key}-{action}".format(key=key, action=action)
+
+ return os.path.join(project.name, element.normal_name, logfile)
diff --git a/src/buildstream/_scheduler/queues/sourcepushqueue.py b/src/buildstream/_scheduler/queues/sourcepushqueue.py
new file mode 100644
index 000000000..c38460e6a
--- /dev/null
+++ b/src/buildstream/_scheduler/queues/sourcepushqueue.py
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Raoul Hidalgo Charman <raoul.hidalgocharman@codethink.co.uk>
+
+from . import Queue, QueueStatus
+from ..resources import ResourceType
+from ..._exceptions import SkipJob
+
+
+# A queue which pushes staged sources
+#
+class SourcePushQueue(Queue):
+
+ action_name = "Src-push"
+ complete_name = "Sources pushed"
+ resources = [ResourceType.UPLOAD]
+
+ def process(self, element):
+ # Returns whether a source was pushed or not
+ if not element._source_push():
+ raise SkipJob(self.action_name)
+
+ def status(self, element):
+ if element._skip_source_push():
+ return QueueStatus.SKIP
+
+ return QueueStatus.READY
diff --git a/src/buildstream/_scheduler/queues/trackqueue.py b/src/buildstream/_scheduler/queues/trackqueue.py
new file mode 100644
index 000000000..72a79a532
--- /dev/null
+++ b/src/buildstream/_scheduler/queues/trackqueue.py
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+# BuildStream toplevel imports
+from ...plugin import Plugin
+
+# Local imports
+from . import Queue, QueueStatus
+from ..resources import ResourceType
+from ..jobs import JobStatus
+
+
+# A queue which tracks sources
+#
+class TrackQueue(Queue):
+
+ action_name = "Track"
+ complete_name = "Tracked"
+ resources = [ResourceType.DOWNLOAD]
+
+ def process(self, element):
+ return element._track()
+
+ def status(self, element):
+ # We can skip elements entirely if they have no sources.
+ if not list(element.sources()):
+
+ # But we still have to mark them as tracked
+ element._tracking_done()
+ return QueueStatus.SKIP
+
+ return QueueStatus.READY
+
+ def done(self, _, element, result, status):
+
+ if status == JobStatus.FAIL:
+ return
+
+ # Set the new refs in the main process one by one as they complete,
+ # writing to bst files this time
+ for unique_id, new_ref in result:
+ source = Plugin._lookup(unique_id)
+ source._set_ref(new_ref, save=True)
+
+ element._tracking_done()
diff --git a/src/buildstream/_scheduler/resources.py b/src/buildstream/_scheduler/resources.py
new file mode 100644
index 000000000..73bf66b4a
--- /dev/null
+++ b/src/buildstream/_scheduler/resources.py
@@ -0,0 +1,166 @@
+class ResourceType():
+ CACHE = 0
+ DOWNLOAD = 1
+ PROCESS = 2
+ UPLOAD = 3
+
+
+class Resources():
+ def __init__(self, num_builders, num_fetchers, num_pushers):
+ self._max_resources = {
+ ResourceType.CACHE: 0,
+ ResourceType.DOWNLOAD: num_fetchers,
+ ResourceType.PROCESS: num_builders,
+ ResourceType.UPLOAD: num_pushers
+ }
+
+ # Resources jobs are currently using.
+ self._used_resources = {
+ ResourceType.CACHE: 0,
+ ResourceType.DOWNLOAD: 0,
+ ResourceType.PROCESS: 0,
+ ResourceType.UPLOAD: 0
+ }
+
+ # Resources jobs currently want exclusive access to. The set
+ # of jobs that have asked for exclusive access is the value -
+ # this is so that we can avoid scheduling any other jobs until
+ # *all* exclusive jobs that "register interest" have finished
+ # - which avoids starving them of scheduling time.
+ self._exclusive_resources = {
+ ResourceType.CACHE: set(),
+ ResourceType.DOWNLOAD: set(),
+ ResourceType.PROCESS: set(),
+ ResourceType.UPLOAD: set()
+ }
+
+ # reserve()
+ #
+ # Reserves a set of resources
+ #
+ # Args:
+ # resources (set): A set of ResourceTypes
+ # exclusive (set): Another set of ResourceTypes
+ # peek (bool): Whether to only peek at whether the resource is available
+ #
+ # Returns:
+ # (bool): True if the resources could be reserved
+ #
+ def reserve(self, resources, exclusive=None, *, peek=False):
+ if exclusive is None:
+ exclusive = set()
+
+ resources = set(resources)
+ exclusive = set(exclusive)
+
+ # First, we check if the job wants to access a resource that
+ # another job wants exclusive access to. If so, it cannot be
+ # scheduled.
+ #
+ # Note that if *both* jobs want this exclusively, we don't
+ # fail yet.
+ #
+ # FIXME: I *think* we can deadlock if two jobs want disjoint
+ # sets of exclusive and non-exclusive resources. This
+ # is currently not possible, but may be worth thinking
+ # about.
+ #
+ for resource in resources - exclusive:
+
+ # If our job wants this resource exclusively, we never
+ # check this, so we can get away with not (temporarily)
+ # removing it from the set.
+ if self._exclusive_resources[resource]:
+ return False
+
+ # Now we check if anything is currently using any resources
+ # this job wants exclusively. If so, the job cannot be
+ # scheduled.
+ #
+ # Since jobs that use a resource exclusively are also using
+ # it, this means only one exclusive job can ever be scheduled
+ # at a time, despite being allowed to be part of the exclusive
+ # set.
+ #
+ for resource in exclusive:
+ if self._used_resources[resource] != 0:
+ return False
+
+ # Finally, we check if we have enough of each resource
+ # available. If we don't have enough, the job cannot be
+ # scheduled.
+ for resource in resources:
+ if (self._max_resources[resource] > 0 and
+ self._used_resources[resource] >= self._max_resources[resource]):
+ return False
+
+ # Now we register the fact that our job is using the resources
+ # it asked for, and tell the scheduler that it is allowed to
+ # continue.
+ if not peek:
+ for resource in resources:
+ self._used_resources[resource] += 1
+
+ return True
+
+ # release()
+ #
+ # Release resources previously reserved with Resources.reserve()
+ #
+ # Args:
+ # resources (set): A set of resources to release
+ #
+ def release(self, resources):
+ for resource in resources:
+ assert self._used_resources[resource] > 0, "Scheduler resource imbalance"
+ self._used_resources[resource] -= 1
+
+ # register_exclusive_interest()
+ #
+ # Inform the resources pool that `source` has an interest in
+ # reserving this resource exclusively.
+ #
+ # The source parameter is used to identify the caller, it
+ # must be ensured to be unique for the time that the
+ # interest is registered.
+ #
+ # This function may be called multiple times, and subsequent
+ # calls will simply have no effect until clear_exclusive_interest()
+ # is used to clear the interest.
+ #
+ # This must be called in advance of reserve()
+ #
+ # Args:
+ # resources (set): Set of resources to reserve exclusively
+ # source (any): Source identifier, to be used again when unregistering
+ # the interest.
+ #
+ def register_exclusive_interest(self, resources, source):
+
+ # The very first thing we do is to register any exclusive
+ # resources this job may want. Even if the job is not yet
+ # allowed to run (because another job is holding the resource
+ # it wants), we can still set this - it just means that any
+ # job *currently* using these resources has to finish first,
+ # and no new jobs wanting these can be launched (except other
+ # exclusive-access jobs).
+ #
+ for resource in resources:
+ self._exclusive_resources[resource].add(source)
+
+ # unregister_exclusive_interest()
+ #
+ # Clear the exclusive interest in these resources.
+ #
+ # This should be called by the given source which registered
+ # an exclusive interest.
+ #
+ # Args:
+ # resources (set): Set of resources to reserve exclusively
+ # source (str): Source identifier, to be used again when unregistering
+ # the interest.
+ #
+ def unregister_exclusive_interest(self, resources, source):
+
+ for resource in resources:
+ self._exclusive_resources[resource].discard(source)
diff --git a/src/buildstream/_scheduler/scheduler.py b/src/buildstream/_scheduler/scheduler.py
new file mode 100644
index 000000000..50ad7f07a
--- /dev/null
+++ b/src/buildstream/_scheduler/scheduler.py
@@ -0,0 +1,602 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+# System imports
+import os
+import asyncio
+from itertools import chain
+import signal
+import datetime
+from contextlib import contextmanager
+
+# Local imports
+from .resources import Resources, ResourceType
+from .jobs import JobStatus, CacheSizeJob, CleanupJob
+from .._profile import Topics, PROFILER
+
+
+# A decent return code for Scheduler.run()
+class SchedStatus():
+ SUCCESS = 0
+ ERROR = -1
+ TERMINATED = 1
+
+
+# Some action names for the internal jobs we launch
+#
+_ACTION_NAME_CLEANUP = 'clean'
+_ACTION_NAME_CACHE_SIZE = 'size'
+
+
+# Scheduler()
+#
+# The scheduler operates on a list queues, each of which is meant to accomplish
+# a specific task. Elements enter the first queue when Scheduler.run() is called
+# and into the next queue when complete. Scheduler.run() returns when all of the
+# elements have been traversed or when an error occurs.
+#
+# Using the scheduler is a matter of:
+# a.) Deriving the Queue class and implementing its abstract methods
+# b.) Instantiating a Scheduler with one or more queues
+# c.) Calling Scheduler.run(elements) with a list of elements
+# d.) Fetching results from your queues
+#
+# Args:
+# context: The Context in the parent scheduling process
+# start_time: The time at which the session started
+# interrupt_callback: A callback to handle ^C
+# ticker_callback: A callback call once per second
+# job_start_callback: A callback call when each job starts
+# job_complete_callback: A callback call when each job completes
+#
+class Scheduler():
+
+ def __init__(self, context,
+ start_time,
+ interrupt_callback=None,
+ ticker_callback=None,
+ job_start_callback=None,
+ job_complete_callback=None):
+
+ #
+ # Public members
+ #
+ self.queues = None # Exposed for the frontend to print summaries
+ self.context = context # The Context object shared with Queues
+ self.terminated = False # Whether the scheduler was asked to terminate or has terminated
+ self.suspended = False # Whether the scheduler is currently suspended
+
+ # These are shared with the Job, but should probably be removed or made private in some way.
+ self.loop = None # Shared for Job access to observe the message queue
+ self.internal_stops = 0 # Amount of SIGSTP signals we've introduced, this is shared with job.py
+
+ #
+ # Private members
+ #
+ self._active_jobs = [] # Jobs currently being run in the scheduler
+ self._starttime = start_time # Initial application start time
+ self._suspendtime = None # Session time compensation for suspended state
+ self._queue_jobs = True # Whether we should continue to queue jobs
+
+ # State of cache management related jobs
+ self._cache_size_scheduled = False # Whether we have a cache size job scheduled
+ self._cache_size_running = None # A running CacheSizeJob, or None
+ self._cleanup_scheduled = False # Whether we have a cleanup job scheduled
+ self._cleanup_running = None # A running CleanupJob, or None
+
+ # Callbacks to report back to the Scheduler owner
+ self._interrupt_callback = interrupt_callback
+ self._ticker_callback = ticker_callback
+ self._job_start_callback = job_start_callback
+ self._job_complete_callback = job_complete_callback
+
+ # Whether our exclusive jobs, like 'cleanup' are currently already
+ # waiting or active.
+ #
+ # This is just a bit quicker than scanning the wait queue and active
+ # queue and comparing job action names.
+ #
+ self._exclusive_waiting = set()
+ self._exclusive_active = set()
+
+ self.resources = Resources(context.sched_builders,
+ context.sched_fetchers,
+ context.sched_pushers)
+
+ # run()
+ #
+ # Args:
+ # queues (list): A list of Queue objects
+ #
+ # Returns:
+ # (timedelta): The amount of time since the start of the session,
+ # discounting any time spent while jobs were suspended
+ # (SchedStatus): How the scheduling terminated
+ #
+ # Elements in the 'plan' will be processed by each
+ # queue in order. Processing will complete when all
+ # elements have been processed by each queue or when
+ # an error arises
+ #
+ def run(self, queues):
+
+ # Hold on to the queues to process
+ self.queues = queues
+
+ # Ensure that we have a fresh new event loop, in case we want
+ # to run another test in this thread.
+ self.loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(self.loop)
+
+ # Add timeouts
+ if self._ticker_callback:
+ self.loop.call_later(1, self._tick)
+
+ # Handle unix signals while running
+ self._connect_signals()
+
+ # Check if we need to start with some cache maintenance
+ self._check_cache_management()
+
+ # Start the profiler
+ with PROFILER.profile(Topics.SCHEDULER, "_".join(queue.action_name for queue in self.queues)):
+ # Run the queues
+ self._sched()
+ self.loop.run_forever()
+ self.loop.close()
+
+ # Stop handling unix signals
+ self._disconnect_signals()
+
+ failed = any(any(queue.failed_elements) for queue in self.queues)
+ self.loop = None
+
+ if failed:
+ status = SchedStatus.ERROR
+ elif self.terminated:
+ status = SchedStatus.TERMINATED
+ else:
+ status = SchedStatus.SUCCESS
+
+ return self.elapsed_time(), status
+
+ # terminate_jobs()
+ #
+ # Forcefully terminates all ongoing jobs.
+ #
+ # For this to be effective, one needs to return to
+ # the scheduler loop first and allow the scheduler
+ # to complete gracefully.
+ #
+ # NOTE: This will block SIGINT so that graceful process
+ # termination is not interrupted, and SIGINT will
+ # remain blocked after Scheduler.run() returns.
+ #
+ def terminate_jobs(self):
+
+ # Set this right away, the frontend will check this
+ # attribute to decide whether or not to print status info
+ # etc and the following code block will trigger some callbacks.
+ self.terminated = True
+ self.loop.call_soon(self._terminate_jobs_real)
+
+ # Block this until we're finished terminating jobs,
+ # this will remain blocked forever.
+ signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGINT])
+
+ # jobs_suspended()
+ #
+ # A context manager for running with jobs suspended
+ #
+ @contextmanager
+ def jobs_suspended(self):
+ self._disconnect_signals()
+ self._suspend_jobs()
+
+ yield
+
+ self._resume_jobs()
+ self._connect_signals()
+
+ # stop_queueing()
+ #
+ # Stop queueing additional jobs, causes Scheduler.run()
+ # to return once all currently processing jobs are finished.
+ #
+ def stop_queueing(self):
+ self._queue_jobs = False
+
+ # elapsed_time()
+ #
+ # Fetches the current session elapsed time
+ #
+ # Returns:
+ # (timedelta): The amount of time since the start of the session,
+ # discounting any time spent while jobs were suspended.
+ #
+ def elapsed_time(self):
+ timenow = datetime.datetime.now()
+ starttime = self._starttime
+ if not starttime:
+ starttime = timenow
+ return timenow - starttime
+
+ # job_completed():
+ #
+ # Called when a Job completes
+ #
+ # Args:
+ # queue (Queue): The Queue holding a complete job
+ # job (Job): The completed Job
+ # status (JobStatus): The status of the completed job
+ #
+ def job_completed(self, job, status):
+
+ # Remove from the active jobs list
+ self._active_jobs.remove(job)
+
+ # Scheduler owner facing callback
+ self._job_complete_callback(job, status)
+
+ # Now check for more jobs
+ self._sched()
+
+ # check_cache_size():
+ #
+ # Queues a cache size calculation job, after the cache
+ # size is calculated, a cleanup job will be run automatically
+ # if needed.
+ #
+ def check_cache_size(self):
+
+ # Here we assume we are called in response to a job
+ # completion callback, or before entering the scheduler.
+ #
+ # As such there is no need to call `_sched()` from here,
+ # and we prefer to run it once at the last moment.
+ #
+ self._cache_size_scheduled = True
+
+ #######################################################
+ # Local Private Methods #
+ #######################################################
+
+ # _check_cache_management()
+ #
+ # Run an initial check if we need to lock the cache
+ # resource and check the size and possibly launch
+ # a cleanup.
+ #
+ # Sessions which do not add to the cache are not affected.
+ #
+ def _check_cache_management(self):
+
+ # Only trigger the check for a scheduler run which has
+ # queues which require the CACHE resource.
+ if not any(q for q in self.queues
+ if ResourceType.CACHE in q.resources):
+ return
+
+ # If the estimated size outgrows the quota, queue a job to
+ # actually check the real cache size initially, this one
+ # should have exclusive access to the cache to ensure nothing
+ # starts while we are checking the cache.
+ #
+ artifacts = self.context.artifactcache
+ if artifacts.full():
+ self._sched_cache_size_job(exclusive=True)
+
+ # _spawn_job()
+ #
+ # Spanws a job
+ #
+ # Args:
+ # job (Job): The job to spawn
+ #
+ def _spawn_job(self, job):
+ self._active_jobs.append(job)
+ if self._job_start_callback:
+ self._job_start_callback(job)
+ job.spawn()
+
+ # Callback for the cache size job
+ def _cache_size_job_complete(self, status, cache_size):
+
+ # Deallocate cache size job resources
+ self._cache_size_running = None
+ self.resources.release([ResourceType.CACHE, ResourceType.PROCESS])
+
+ # Unregister the exclusive interest if there was any
+ self.resources.unregister_exclusive_interest(
+ [ResourceType.CACHE], 'cache-size'
+ )
+
+ # Schedule a cleanup job if we've hit the threshold
+ if status != JobStatus.OK:
+ return
+
+ context = self.context
+ artifacts = context.artifactcache
+
+ if artifacts.full():
+ self._cleanup_scheduled = True
+
+ # Callback for the cleanup job
+ def _cleanup_job_complete(self, status, cache_size):
+
+ # Deallocate cleanup job resources
+ self._cleanup_running = None
+ self.resources.release([ResourceType.CACHE, ResourceType.PROCESS])
+
+ # Unregister the exclusive interest when we're done with it
+ if not self._cleanup_scheduled:
+ self.resources.unregister_exclusive_interest(
+ [ResourceType.CACHE], 'cache-cleanup'
+ )
+
+ # _sched_cleanup_job()
+ #
+ # Runs a cleanup job if one is scheduled to run now and
+ # sufficient recources are available.
+ #
+ def _sched_cleanup_job(self):
+
+ if self._cleanup_scheduled and self._cleanup_running is None:
+
+ # Ensure we have an exclusive interest in the resources
+ self.resources.register_exclusive_interest(
+ [ResourceType.CACHE], 'cache-cleanup'
+ )
+
+ if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS],
+ [ResourceType.CACHE]):
+
+ # Update state and launch
+ self._cleanup_scheduled = False
+ self._cleanup_running = \
+ CleanupJob(self, _ACTION_NAME_CLEANUP, 'cleanup/cleanup',
+ complete_cb=self._cleanup_job_complete)
+ self._spawn_job(self._cleanup_running)
+
+ # _sched_cache_size_job()
+ #
+ # Runs a cache size job if one is scheduled to run now and
+ # sufficient recources are available.
+ #
+ # Args:
+ # exclusive (bool): Run a cache size job immediately and
+ # hold the ResourceType.CACHE resource
+ # exclusively (used at startup).
+ #
+ def _sched_cache_size_job(self, *, exclusive=False):
+
+ # The exclusive argument is not intended (or safe) for arbitrary use.
+ if exclusive:
+ assert not self._cache_size_scheduled
+ assert not self._cache_size_running
+ assert not self._active_jobs
+ self._cache_size_scheduled = True
+
+ if self._cache_size_scheduled and not self._cache_size_running:
+
+ # Handle the exclusive launch
+ exclusive_resources = set()
+ if exclusive:
+ exclusive_resources.add(ResourceType.CACHE)
+ self.resources.register_exclusive_interest(
+ exclusive_resources, 'cache-size'
+ )
+
+ # Reserve the resources (with the possible exclusive cache resource)
+ if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS],
+ exclusive_resources):
+
+ # Update state and launch
+ self._cache_size_scheduled = False
+ self._cache_size_running = \
+ CacheSizeJob(self, _ACTION_NAME_CACHE_SIZE,
+ 'cache_size/cache_size',
+ complete_cb=self._cache_size_job_complete)
+ self._spawn_job(self._cache_size_running)
+
+ # _sched_queue_jobs()
+ #
+ # Ask the queues what jobs they want to schedule and schedule
+ # them. This is done here so we can ask for new jobs when jobs
+ # from previous queues become available.
+ #
+ # This will process the Queues, pull elements through the Queues
+ # and process anything that is ready.
+ #
+ def _sched_queue_jobs(self):
+ ready = []
+ process_queues = True
+
+ while self._queue_jobs and process_queues:
+
+ # Pull elements forward through queues
+ elements = []
+ for queue in self.queues:
+ queue.enqueue(elements)
+ elements = list(queue.dequeue())
+
+ # Kickoff whatever processes can be processed at this time
+ #
+ # We start by queuing from the last queue first, because
+ # we want to give priority to queues later in the
+ # scheduling process in the case that multiple queues
+ # share the same token type.
+ #
+ # This avoids starvation situations where we dont move on
+ # to fetch tasks for elements which failed to pull, and
+ # thus need all the pulls to complete before ever starting
+ # a build
+ ready.extend(chain.from_iterable(
+ q.harvest_jobs() for q in reversed(self.queues)
+ ))
+
+ # harvest_jobs() may have decided to skip some jobs, making
+ # them eligible for promotion to the next queue as a side effect.
+ #
+ # If that happens, do another round.
+ process_queues = any(q.dequeue_ready() for q in self.queues)
+
+ # Spawn the jobs
+ #
+ for job in ready:
+ self._spawn_job(job)
+
+ # _sched()
+ #
+ # Run any jobs which are ready to run, or quit the main loop
+ # when nothing is running or is ready to run.
+ #
+ # This is the main driving function of the scheduler, it is called
+ # initially when we enter Scheduler.run(), and at the end of whenever
+ # any job completes, after any bussiness logic has occurred and before
+ # going back to sleep.
+ #
+ def _sched(self):
+
+ if not self.terminated:
+
+ #
+ # Try the cache management jobs
+ #
+ self._sched_cleanup_job()
+ self._sched_cache_size_job()
+
+ #
+ # Run as many jobs as the queues can handle for the
+ # available resources
+ #
+ self._sched_queue_jobs()
+
+ #
+ # If nothing is ticking then bail out
+ #
+ if not self._active_jobs:
+ self.loop.stop()
+
+ # _suspend_jobs()
+ #
+ # Suspend all ongoing jobs.
+ #
+ def _suspend_jobs(self):
+ if not self.suspended:
+ self._suspendtime = datetime.datetime.now()
+ self.suspended = True
+ for job in self._active_jobs:
+ job.suspend()
+
+ # _resume_jobs()
+ #
+ # Resume suspended jobs.
+ #
+ def _resume_jobs(self):
+ if self.suspended:
+ for job in self._active_jobs:
+ job.resume()
+ self.suspended = False
+ self._starttime += (datetime.datetime.now() - self._suspendtime)
+ self._suspendtime = None
+
+ # _interrupt_event():
+ #
+ # A loop registered event callback for keyboard interrupts
+ #
+ def _interrupt_event(self):
+
+ # FIXME: This should not be needed, but for some reason we receive an
+ # additional SIGINT event when the user hits ^C a second time
+ # to inform us that they really intend to terminate; even though
+ # we have disconnected our handlers at this time.
+ #
+ if self.terminated:
+ return
+
+ # Leave this to the frontend to decide, if no
+ # interrrupt callback was specified, then just terminate.
+ if self._interrupt_callback:
+ self._interrupt_callback()
+ else:
+ # Default without a frontend is just terminate
+ self.terminate_jobs()
+
+ # _terminate_event():
+ #
+ # A loop registered event callback for SIGTERM
+ #
+ def _terminate_event(self):
+ self.terminate_jobs()
+
+ # _suspend_event():
+ #
+ # A loop registered event callback for SIGTSTP
+ #
+ def _suspend_event(self):
+
+ # Ignore the feedback signals from Job.suspend()
+ if self.internal_stops:
+ self.internal_stops -= 1
+ return
+
+ # No need to care if jobs were suspended or not, we _only_ handle this
+ # while we know jobs are not suspended.
+ self._suspend_jobs()
+ os.kill(os.getpid(), signal.SIGSTOP)
+ self._resume_jobs()
+
+ # _connect_signals():
+ #
+ # Connects our signal handler event callbacks to the mainloop
+ #
+ def _connect_signals(self):
+ self.loop.add_signal_handler(signal.SIGINT, self._interrupt_event)
+ self.loop.add_signal_handler(signal.SIGTERM, self._terminate_event)
+ self.loop.add_signal_handler(signal.SIGTSTP, self._suspend_event)
+
+ def _disconnect_signals(self):
+ self.loop.remove_signal_handler(signal.SIGINT)
+ self.loop.remove_signal_handler(signal.SIGTSTP)
+ self.loop.remove_signal_handler(signal.SIGTERM)
+
+ def _terminate_jobs_real(self):
+ # 20 seconds is a long time, it can take a while and sometimes
+ # we still fail, need to look deeper into this again.
+ wait_start = datetime.datetime.now()
+ wait_limit = 20.0
+
+ # First tell all jobs to terminate
+ for job in self._active_jobs:
+ job.terminate()
+
+ # Now wait for them to really terminate
+ for job in self._active_jobs:
+ elapsed = datetime.datetime.now() - wait_start
+ timeout = max(wait_limit - elapsed.total_seconds(), 0.0)
+ if not job.terminate_wait(timeout):
+ job.kill()
+
+ # Regular timeout for driving status in the UI
+ def _tick(self):
+ elapsed = self.elapsed_time()
+ self._ticker_callback(elapsed)
+ self.loop.call_later(1, self._tick)
diff --git a/src/buildstream/_signals.py b/src/buildstream/_signals.py
new file mode 100644
index 000000000..41b100f93
--- /dev/null
+++ b/src/buildstream/_signals.py
@@ -0,0 +1,203 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+import os
+import signal
+import sys
+import threading
+import traceback
+from contextlib import contextmanager, ExitStack
+from collections import deque
+
+
+# Global per process state for handling of sigterm/sigtstp/sigcont,
+# note that it is expected that this only ever be used by processes
+# the scheduler forks off, not the main process
+terminator_stack = deque()
+suspendable_stack = deque()
+
+
+# Per process SIGTERM handler
+def terminator_handler(signal_, frame):
+ while terminator_stack:
+ terminator_ = terminator_stack.pop()
+ try:
+ terminator_()
+ except: # noqa pylint: disable=bare-except
+ # Ensure we print something if there's an exception raised when
+ # processing the handlers. Note that the default exception
+ # handler won't be called because we os._exit next, so we must
+ # catch all possible exceptions with the unqualified 'except'
+ # clause.
+ traceback.print_exc(file=sys.stderr)
+ print('Error encountered in BuildStream while processing custom SIGTERM handler:',
+ terminator_,
+ file=sys.stderr)
+
+ # Use special exit here, terminate immediately, recommended
+ # for precisely this situation where child forks are teminated.
+ os._exit(-1)
+
+
+# terminator()
+#
+# A context manager for interruptable tasks, this guarantees
+# that while the code block is running, the supplied function
+# will be called upon process termination.
+#
+# Note that after handlers are called, the termination will be handled by
+# terminating immediately with os._exit(). This means that SystemExit will not
+# be raised and 'finally' clauses will not be executed.
+#
+# Args:
+# terminate_func (callable): A function to call when aborting
+# the nested code block.
+#
+@contextmanager
+def terminator(terminate_func):
+ global terminator_stack # pylint: disable=global-statement
+
+ # Signal handling only works in the main thread
+ if threading.current_thread() != threading.main_thread():
+ yield
+ return
+
+ outermost = bool(not terminator_stack)
+
+ terminator_stack.append(terminate_func)
+ if outermost:
+ original_handler = signal.signal(signal.SIGTERM, terminator_handler)
+
+ try:
+ yield
+ finally:
+ if outermost:
+ signal.signal(signal.SIGTERM, original_handler)
+ terminator_stack.pop()
+
+
+# Just a simple object for holding on to two callbacks
+class Suspender():
+ def __init__(self, suspend_callback, resume_callback):
+ self.suspend = suspend_callback
+ self.resume = resume_callback
+
+
+# Per process SIGTSTP handler
+def suspend_handler(sig, frame):
+
+ # Suspend callbacks from innermost frame first
+ for suspender in reversed(suspendable_stack):
+ suspender.suspend()
+
+ # Use SIGSTOP directly now on self, dont introduce more SIGTSTP
+ #
+ # Here the process sleeps until SIGCONT, which we simply
+ # dont handle. We know we'll pickup execution right here
+ # when we wake up.
+ os.kill(os.getpid(), signal.SIGSTOP)
+
+ # Resume callbacks from outermost frame inwards
+ for suspender in suspendable_stack:
+ suspender.resume()
+
+
+# suspendable()
+#
+# A context manager for handling process suspending and resumeing
+#
+# Args:
+# suspend_callback (callable): A function to call as process suspend time.
+# resume_callback (callable): A function to call as process resume time.
+#
+# This must be used in code blocks which spawn processes that become
+# their own session leader. In these cases, SIGSTOP and SIGCONT need
+# to be propagated to the child process group.
+#
+# This context manager can also be used recursively, so multiple
+# things can happen at suspend/resume time (such as tracking timers
+# and ensuring durations do not count suspended time).
+#
+@contextmanager
+def suspendable(suspend_callback, resume_callback):
+ global suspendable_stack # pylint: disable=global-statement
+
+ outermost = bool(not suspendable_stack)
+ suspender = Suspender(suspend_callback, resume_callback)
+ suspendable_stack.append(suspender)
+
+ if outermost:
+ original_stop = signal.signal(signal.SIGTSTP, suspend_handler)
+
+ try:
+ yield
+ finally:
+ if outermost:
+ signal.signal(signal.SIGTSTP, original_stop)
+
+ suspendable_stack.pop()
+
+
+# blocked()
+#
+# A context manager for running a code block with blocked signals
+#
+# Args:
+# signals (list): A list of unix signals to block
+# ignore (bool): Whether to ignore entirely the signals which were
+# received and pending while the process had blocked them
+#
+@contextmanager
+def blocked(signal_list, ignore=True):
+
+ with ExitStack() as stack:
+
+ # Optionally add the ignored() context manager to this context
+ if ignore:
+ stack.enter_context(ignored(signal_list))
+
+ # Set and save the sigprocmask
+ blocked_signals = signal.pthread_sigmask(signal.SIG_BLOCK, signal_list)
+
+ try:
+ yield
+ finally:
+ # If we have discarded the signals completely, this line will cause
+ # the discard_handler() to trigger for each signal in the list
+ signal.pthread_sigmask(signal.SIG_SETMASK, blocked_signals)
+
+
+# ignored()
+#
+# A context manager for running a code block with ignored signals
+#
+# Args:
+# signals (list): A list of unix signals to ignore
+#
+@contextmanager
+def ignored(signal_list):
+
+ orig_handlers = {}
+ for sig in signal_list:
+ orig_handlers[sig] = signal.signal(sig, signal.SIG_IGN)
+
+ try:
+ yield
+ finally:
+ for sig in signal_list:
+ signal.signal(sig, orig_handlers[sig])
diff --git a/src/buildstream/_site.py b/src/buildstream/_site.py
new file mode 100644
index 000000000..8940fa34a
--- /dev/null
+++ b/src/buildstream/_site.py
@@ -0,0 +1,67 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+import os
+import shutil
+import subprocess
+
+#
+# Private module declaring some info about where the buildstream
+# is installed so we can lookup package relative resources easily
+#
+
+# The package root, wherever we are running the package from
+root = os.path.dirname(os.path.abspath(__file__))
+
+# The Element plugin directory
+element_plugins = os.path.join(root, 'plugins', 'elements')
+
+# The Source plugin directory
+source_plugins = os.path.join(root, 'plugins', 'sources')
+
+# Default user configuration
+default_user_config = os.path.join(root, 'data', 'userconfig.yaml')
+
+# Default project configuration
+default_project_config = os.path.join(root, 'data', 'projectconfig.yaml')
+
+# Script template to call module building scripts
+build_all_template = os.path.join(root, 'data', 'build-all.sh.in')
+
+# Module building script template
+build_module_template = os.path.join(root, 'data', 'build-module.sh.in')
+
+
+def get_bwrap_version():
+ # Get the current bwrap version
+ #
+ # returns None if no bwrap was found
+ # otherwise returns a tuple of 3 int: major, minor, patch
+ bwrap_path = shutil.which('bwrap')
+
+ if not bwrap_path:
+ return None
+
+ cmd = [bwrap_path, "--version"]
+ try:
+ version = str(subprocess.check_output(cmd).split()[1], "utf-8")
+ except subprocess.CalledProcessError:
+ return None
+
+ return tuple(int(x) for x in version.split("."))
diff --git a/src/buildstream/_sourcecache.py b/src/buildstream/_sourcecache.py
new file mode 100644
index 000000000..1d3342a75
--- /dev/null
+++ b/src/buildstream/_sourcecache.py
@@ -0,0 +1,249 @@
+#
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Raoul Hidalgo Charman <raoul.hidalgocharman@codethink.co.uk>
+#
+import os
+
+from ._cas import CASRemoteSpec
+from .storage._casbaseddirectory import CasBasedDirectory
+from ._basecache import BaseCache
+from ._exceptions import CASError, CASCacheError, SourceCacheError
+from . import utils
+
+
+# Holds configuration for a remote used for the source cache.
+#
+# Args:
+# url (str): Location of the remote source cache
+# push (bool): Whether we should attempt to push sources to this cache,
+# in addition to pulling from it.
+# instance-name (str): Name if any, of instance of server
+#
+class SourceCacheSpec(CASRemoteSpec):
+ pass
+
+
+# Class that keeps config of remotes and deals with caching of sources.
+#
+# Args:
+# context (Context): The Buildstream context
+#
+class SourceCache(BaseCache):
+
+ spec_class = SourceCacheSpec
+ spec_name = "source_cache_specs"
+ spec_error = SourceCacheError
+ config_node_name = "source-caches"
+
+ def __init__(self, context):
+ super().__init__(context)
+
+ self._required_sources = set()
+
+ self.casquota.add_remove_callbacks(self.unrequired_sources, self.cas.remove)
+ self.casquota.add_list_refs_callback(self.list_sources)
+
+ # mark_required_sources()
+ #
+ # Mark sources that are required by the current run.
+ #
+ # Sources that are in this list will not be removed during the current
+ # pipeline.
+ #
+ # Args:
+ # sources (iterable): An iterable over sources that are required
+ #
+ def mark_required_sources(self, sources):
+ sources = list(sources) # in case it's a generator
+
+ self._required_sources.update(sources)
+
+ # update mtimes just in case
+ for source in sources:
+ ref = source._get_source_name()
+ try:
+ self.cas.update_mtime(ref)
+ except CASCacheError:
+ pass
+
+ # required_sources()
+ #
+ # Yields the keys of all sources marked as required by the current build
+ # plan
+ #
+ # Returns:
+ # iterable (str): iterable over the required source refs
+ #
+ def required_sources(self):
+ for source in self._required_sources:
+ yield source._get_source_name()
+
+ # unrequired_sources()
+ #
+ # Yields the refs of all sources not required by the current build plan
+ #
+ # Returns:
+ # iter (str): iterable over unrequired source keys
+ #
+ def unrequired_sources(self):
+ required_source_names = set(map(
+ lambda x: x._get_source_name(), self._required_sources))
+ for (mtime, source) in self._list_refs_mtimes(
+ os.path.join(self.cas.casdir, 'refs', 'heads'),
+ glob_expr="@sources/*"):
+ if source not in required_source_names:
+ yield (mtime, source)
+
+ # list_sources()
+ #
+ # Get list of all sources in the `cas/refs/heads/@sources/` folder
+ #
+ # Returns:
+ # ([str]): iterable over all source refs
+ #
+ def list_sources(self):
+ return [ref for _, ref in self._list_refs_mtimes(
+ os.path.join(self.cas.casdir, 'refs', 'heads'),
+ glob_expr="@sources/*")]
+
+ # contains()
+ #
+ # Given a source, gets the ref name and checks whether the local CAS
+ # contains it.
+ #
+ # Args:
+ # source (Source): Source to check
+ #
+ # Returns:
+ # (bool): whether the CAS contains this source or not
+ #
+ def contains(self, source):
+ ref = source._get_source_name()
+ return self.cas.contains(ref)
+
+ # commit()
+ #
+ # Given a source along with previous sources, it stages and commits these
+ # to the local CAS. This is done due to some types of sources being
+ # dependent on previous sources, such as the patch source.
+ #
+ # Args:
+ # source: last source
+ # previous_sources: rest of the sources.
+ def commit(self, source, previous_sources):
+ ref = source._get_source_name()
+
+ # Use tmpdir for now
+ vdir = CasBasedDirectory(self.cas)
+ for previous_source in previous_sources:
+ vdir.import_files(self.export(previous_source))
+
+ with utils._tempdir(dir=self.context.tmpdir, prefix='staging-temp') as tmpdir:
+ if not vdir.is_empty():
+ vdir.export_files(tmpdir)
+ source._stage(tmpdir)
+ vdir.import_files(tmpdir, can_link=True)
+
+ self.cas.set_ref(ref, vdir._get_digest())
+
+ # export()
+ #
+ # Exports a source in the CAS to a virtual directory
+ #
+ # Args:
+ # source (Source): source we want to export
+ #
+ # Returns:
+ # CASBasedDirectory
+ def export(self, source):
+ ref = source._get_source_name()
+
+ try:
+ digest = self.cas.resolve_ref(ref)
+ except CASCacheError as e:
+ raise SourceCacheError("Error exporting source: {}".format(e))
+
+ return CasBasedDirectory(self.cas, digest=digest)
+
+ # pull()
+ #
+ # Attempts to pull sources from configure remote source caches.
+ #
+ # Args:
+ # source (Source): The source we want to fetch
+ # progress (callable|None): The progress callback
+ #
+ # Returns:
+ # (bool): True if pull successful, False if not
+ def pull(self, source):
+ ref = source._get_source_name()
+
+ project = source._get_project()
+
+ display_key = source._get_brief_display_key()
+
+ for remote in self._remotes[project]:
+ try:
+ source.status("Pulling source {} <- {}".format(display_key, remote.spec.url))
+
+ if self.cas.pull(ref, remote):
+ source.info("Pulled source {} <- {}".format(display_key, remote.spec.url))
+ # no need to pull from additional remotes
+ return True
+ else:
+ source.info("Remote ({}) does not have source {} cached".format(
+ remote.spec.url, display_key))
+ except CASError as e:
+ raise SourceCacheError("Failed to pull source {}: {}".format(
+ display_key, e)) from e
+ return False
+
+ # push()
+ #
+ # Push a source to configured remote source caches
+ #
+ # Args:
+ # source (Source): source to push
+ #
+ # Returns:
+ # (Bool): whether it pushed to a remote source cache
+ #
+ def push(self, source):
+ ref = source._get_source_name()
+ project = source._get_project()
+
+ # find configured push remotes for this source
+ if self._has_push_remotes:
+ push_remotes = [r for r in self._remotes[project] if r.spec.push]
+ else:
+ push_remotes = []
+
+ pushed = False
+
+ display_key = source._get_brief_display_key()
+ for remote in push_remotes:
+ remote.init()
+ source.status("Pushing source {} -> {}".format(display_key, remote.spec.url))
+ if self.cas.push([ref], remote):
+ source.info("Pushed source {} -> {}".format(display_key, remote.spec.url))
+ pushed = True
+ else:
+ source.info("Remote ({}) already has source {} cached"
+ .format(remote.spec.url, display_key))
+
+ return pushed
diff --git a/src/buildstream/_sourcefactory.py b/src/buildstream/_sourcefactory.py
new file mode 100644
index 000000000..1d959a140
--- /dev/null
+++ b/src/buildstream/_sourcefactory.py
@@ -0,0 +1,64 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+from . import _site
+from ._plugincontext import PluginContext
+from .source import Source
+
+
+# A SourceFactory creates Source instances
+# in the context of a given factory
+#
+# Args:
+# plugin_base (PluginBase): The main PluginBase object to work with
+# plugin_origins (list): Data used to search for external Source plugins
+#
+class SourceFactory(PluginContext):
+
+ def __init__(self, plugin_base, *,
+ format_versions={},
+ plugin_origins=None):
+
+ super().__init__(plugin_base, Source, [_site.source_plugins],
+ format_versions=format_versions,
+ plugin_origins=plugin_origins)
+
+ # create():
+ #
+ # Create a Source object, the pipeline uses this to create Source
+ # objects on demand for a given pipeline.
+ #
+ # Args:
+ # context (object): The Context object for processing
+ # project (object): The project object
+ # meta (object): The loaded MetaSource
+ #
+ # Returns:
+ # A newly created Source object of the appropriate kind
+ #
+ # Raises:
+ # PluginError (if the kind lookup failed)
+ # LoadError (if the source itself took issue with the config)
+ #
+ def create(self, context, project, meta):
+ source_type, _ = self.lookup(meta.kind)
+ source = source_type(context, project, meta)
+ version = self._format_versions.get(meta.kind, 0)
+ self._assert_plugin_format(source, version)
+ return source
diff --git a/src/buildstream/_stream.py b/src/buildstream/_stream.py
new file mode 100644
index 000000000..2343c553c
--- /dev/null
+++ b/src/buildstream/_stream.py
@@ -0,0 +1,1512 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+import itertools
+import functools
+import os
+import sys
+import stat
+import shlex
+import shutil
+import tarfile
+import tempfile
+from contextlib import contextmanager, suppress
+from fnmatch import fnmatch
+
+from ._artifactelement import verify_artifact_ref
+from ._exceptions import StreamError, ImplError, BstError, ArtifactElementError, ArtifactError
+from ._message import Message, MessageType
+from ._scheduler import Scheduler, SchedStatus, TrackQueue, FetchQueue, \
+ SourcePushQueue, BuildQueue, PullQueue, ArtifactPushQueue
+from ._pipeline import Pipeline, PipelineSelection
+from ._profile import Topics, PROFILER
+from .types import _KeyStrength
+from . import utils, _yaml, _site
+from . import Scope, Consistency
+
+
+# Stream()
+#
+# This is the main, toplevel calling interface in BuildStream core.
+#
+# Args:
+# context (Context): The Context object
+# project (Project): The Project object
+# session_start (datetime): The time when the session started
+# session_start_callback (callable): A callback to invoke when the session starts
+# interrupt_callback (callable): A callback to invoke when we get interrupted
+# ticker_callback (callable): Invoked every second while running the scheduler
+# job_start_callback (callable): Called when a job starts
+# job_complete_callback (callable): Called when a job completes
+#
+class Stream():
+
+ def __init__(self, context, project, session_start, *,
+ session_start_callback=None,
+ interrupt_callback=None,
+ ticker_callback=None,
+ job_start_callback=None,
+ job_complete_callback=None):
+
+ #
+ # Public members
+ #
+ self.targets = [] # Resolved target elements
+ self.session_elements = [] # List of elements being processed this session
+ self.total_elements = [] # Total list of elements based on targets
+ self.queues = [] # Queue objects
+
+ #
+ # Private members
+ #
+ self._artifacts = context.artifactcache
+ self._sourcecache = context.sourcecache
+ self._context = context
+ self._project = project
+ self._pipeline = Pipeline(context, project, self._artifacts)
+ self._scheduler = Scheduler(context, session_start,
+ interrupt_callback=interrupt_callback,
+ ticker_callback=ticker_callback,
+ job_start_callback=job_start_callback,
+ job_complete_callback=job_complete_callback)
+ self._first_non_track_queue = None
+ self._session_start_callback = session_start_callback
+
+ # cleanup()
+ #
+ # Cleans up application state
+ #
+ def cleanup(self):
+ if self._project:
+ self._project.cleanup()
+
+ # load_selection()
+ #
+ # An all purpose method for loading a selection of elements, this
+ # is primarily useful for the frontend to implement `bst show`
+ # and `bst shell`.
+ #
+ # Args:
+ # targets (list of str): Targets to pull
+ # selection (PipelineSelection): The selection mode for the specified targets
+ # except_targets (list of str): Specified targets to except from fetching
+ # use_artifact_config (bool): If artifact remote configs should be loaded
+ #
+ # Returns:
+ # (list of Element): The selected elements
+ def load_selection(self, targets, *,
+ selection=PipelineSelection.NONE,
+ except_targets=(),
+ use_artifact_config=False,
+ load_refs=False):
+ with PROFILER.profile(Topics.LOAD_SELECTION, "_".join(t.replace(os.sep, "-") for t in targets)):
+ target_objects, _ = self._load(targets, (),
+ selection=selection,
+ except_targets=except_targets,
+ fetch_subprojects=False,
+ use_artifact_config=use_artifact_config,
+ load_refs=load_refs)
+
+ return target_objects
+
+ # shell()
+ #
+ # Run a shell
+ #
+ # Args:
+ # element (Element): An Element object to run the shell for
+ # scope (Scope): The scope for the shell (Scope.BUILD or Scope.RUN)
+ # prompt (str): The prompt to display in the shell
+ # directory (str): A directory where an existing prestaged sysroot is expected, or None
+ # mounts (list of HostMount): Additional directories to mount into the sandbox
+ # isolate (bool): Whether to isolate the environment like we do in builds
+ # command (list): An argv to launch in the sandbox, or None
+ # usebuildtree (str): Whether to use a buildtree as the source, given cli option
+ #
+ # Returns:
+ # (int): The exit code of the launched shell
+ #
+ def shell(self, element, scope, prompt, *,
+ directory=None,
+ mounts=None,
+ isolate=False,
+ command=None,
+ usebuildtree=None):
+
+ # Assert we have everything we need built, unless the directory is specified
+ # in which case we just blindly trust the directory, using the element
+ # definitions to control the execution environment only.
+ if directory is None:
+ missing_deps = [
+ dep._get_full_name()
+ for dep in self._pipeline.dependencies([element], scope)
+ if not dep._cached()
+ ]
+ if missing_deps:
+ raise StreamError("Elements need to be built or downloaded before staging a shell environment",
+ detail="\n".join(missing_deps))
+
+ buildtree = False
+ # Check if we require a pull queue attempt, with given artifact state and context
+ if usebuildtree:
+ if not element._cached_buildtree():
+ require_buildtree = self._buildtree_pull_required([element])
+ # Attempt a pull queue for the given element if remote and context allow it
+ if require_buildtree:
+ self._message(MessageType.INFO, "Attempting to fetch missing artifact buildtree")
+ self._add_queue(PullQueue(self._scheduler))
+ self._enqueue_plan(require_buildtree)
+ self._run()
+ # Now check if the buildtree was successfully fetched
+ if element._cached_buildtree():
+ buildtree = True
+
+ if not buildtree:
+ if element._buildtree_exists():
+ message = "Buildtree is not cached locally or in available remotes"
+ else:
+ message = "Artifact was created without buildtree"
+
+ if usebuildtree == "always":
+ raise StreamError(message)
+ else:
+ self._message(MessageType.INFO, message + ", shell will be loaded without it")
+ else:
+ buildtree = True
+
+ return element._shell(scope, directory, mounts=mounts, isolate=isolate, prompt=prompt, command=command,
+ usebuildtree=buildtree)
+
+ # build()
+ #
+ # Builds (assembles) elements in the pipeline.
+ #
+ # Args:
+ # targets (list of str): Targets to build
+ # track_targets (list of str): Specified targets for tracking
+ # track_except (list of str): Specified targets to except from tracking
+ # track_cross_junctions (bool): Whether tracking should cross junction boundaries
+ # ignore_junction_targets (bool): Whether junction targets should be filtered out
+ # build_all (bool): Whether to build all elements, or only those
+ # which are required to build the target.
+ # remote (str): The URL of a specific remote server to push to, or None
+ #
+ # If `remote` specified as None, then regular configuration will be used
+ # to determine where to push artifacts to.
+ #
+ def build(self, targets, *,
+ track_targets=None,
+ track_except=None,
+ track_cross_junctions=False,
+ ignore_junction_targets=False,
+ build_all=False,
+ remote=None):
+
+ if build_all:
+ selection = PipelineSelection.ALL
+ else:
+ selection = PipelineSelection.PLAN
+
+ use_config = True
+ if remote:
+ use_config = False
+
+ elements, track_elements = \
+ self._load(targets, track_targets,
+ selection=selection, track_selection=PipelineSelection.ALL,
+ track_except_targets=track_except,
+ track_cross_junctions=track_cross_junctions,
+ ignore_junction_targets=ignore_junction_targets,
+ use_artifact_config=use_config,
+ artifact_remote_url=remote,
+ use_source_config=True,
+ fetch_subprojects=True,
+ dynamic_plan=True)
+
+ # Remove the tracking elements from the main targets
+ elements = self._pipeline.subtract_elements(elements, track_elements)
+
+ # Assert that the elements we're not going to track are consistent
+ self._pipeline.assert_consistent(elements)
+
+ if all(project.remote_execution_specs for project in self._context.get_projects()):
+ # Remote execution is configured for all projects.
+ # Require artifact files only for target elements and their runtime dependencies.
+ self._context.set_artifact_files_optional()
+ for element in self.targets:
+ element._set_artifact_files_required()
+
+ # Now construct the queues
+ #
+ track_queue = None
+ if track_elements:
+ track_queue = TrackQueue(self._scheduler)
+ self._add_queue(track_queue, track=True)
+
+ if self._artifacts.has_fetch_remotes():
+ self._add_queue(PullQueue(self._scheduler))
+
+ self._add_queue(FetchQueue(self._scheduler, skip_cached=True))
+
+ self._add_queue(BuildQueue(self._scheduler))
+
+ if self._artifacts.has_push_remotes():
+ self._add_queue(ArtifactPushQueue(self._scheduler))
+
+ if self._sourcecache.has_push_remotes():
+ self._add_queue(SourcePushQueue(self._scheduler))
+
+ # Enqueue elements
+ #
+ if track_elements:
+ self._enqueue_plan(track_elements, queue=track_queue)
+ self._enqueue_plan(elements)
+ self._run()
+
+ # fetch()
+ #
+ # Fetches sources on the pipeline.
+ #
+ # Args:
+ # targets (list of str): Targets to fetch
+ # selection (PipelineSelection): The selection mode for the specified targets
+ # except_targets (list of str): Specified targets to except from fetching
+ # track_targets (bool): Whether to track selected targets in addition to fetching
+ # track_cross_junctions (bool): Whether tracking should cross junction boundaries
+ # remote (str|None): The URL of a specific remote server to pull from.
+ #
+ def fetch(self, targets, *,
+ selection=PipelineSelection.PLAN,
+ except_targets=None,
+ track_targets=False,
+ track_cross_junctions=False,
+ remote=None):
+
+ if track_targets:
+ track_targets = targets
+ track_selection = selection
+ track_except_targets = except_targets
+ else:
+ track_targets = ()
+ track_selection = PipelineSelection.NONE
+ track_except_targets = ()
+
+ use_source_config = True
+ if remote:
+ use_source_config = False
+
+ elements, track_elements = \
+ self._load(targets, track_targets,
+ selection=selection, track_selection=track_selection,
+ except_targets=except_targets,
+ track_except_targets=track_except_targets,
+ track_cross_junctions=track_cross_junctions,
+ fetch_subprojects=True,
+ use_source_config=use_source_config,
+ source_remote_url=remote)
+
+ # Delegated to a shared fetch method
+ self._fetch(elements, track_elements=track_elements)
+
+ # track()
+ #
+ # Tracks all the sources of the selected elements.
+ #
+ # Args:
+ # targets (list of str): Targets to track
+ # selection (PipelineSelection): The selection mode for the specified targets
+ # except_targets (list of str): Specified targets to except from tracking
+ # cross_junctions (bool): Whether tracking should cross junction boundaries
+ #
+ # If no error is encountered while tracking, then the project files
+ # are rewritten inline.
+ #
+ def track(self, targets, *,
+ selection=PipelineSelection.REDIRECT,
+ except_targets=None,
+ cross_junctions=False):
+
+ # We pass no target to build. Only to track. Passing build targets
+ # would fully load project configuration which might not be
+ # possible before tracking is done.
+ _, elements = \
+ self._load([], targets,
+ selection=selection, track_selection=selection,
+ except_targets=except_targets,
+ track_except_targets=except_targets,
+ track_cross_junctions=cross_junctions,
+ fetch_subprojects=True)
+
+ track_queue = TrackQueue(self._scheduler)
+ self._add_queue(track_queue, track=True)
+ self._enqueue_plan(elements, queue=track_queue)
+ self._run()
+
+ # pull()
+ #
+ # Pulls artifacts from remote artifact server(s)
+ #
+ # Args:
+ # targets (list of str): Targets to pull
+ # selection (PipelineSelection): The selection mode for the specified targets
+ # ignore_junction_targets (bool): Whether junction targets should be filtered out
+ # remote (str): The URL of a specific remote server to pull from, or None
+ #
+ # If `remote` specified as None, then regular configuration will be used
+ # to determine where to pull artifacts from.
+ #
+ def pull(self, targets, *,
+ selection=PipelineSelection.NONE,
+ ignore_junction_targets=False,
+ remote=None):
+
+ use_config = True
+ if remote:
+ use_config = False
+
+ elements, _ = self._load(targets, (),
+ selection=selection,
+ ignore_junction_targets=ignore_junction_targets,
+ use_artifact_config=use_config,
+ artifact_remote_url=remote,
+ fetch_subprojects=True)
+
+ if not self._artifacts.has_fetch_remotes():
+ raise StreamError("No artifact caches available for pulling artifacts")
+
+ self._pipeline.assert_consistent(elements)
+ self._add_queue(PullQueue(self._scheduler))
+ self._enqueue_plan(elements)
+ self._run()
+
+ # push()
+ #
+ # Pulls artifacts to remote artifact server(s)
+ #
+ # Args:
+ # targets (list of str): Targets to push
+ # selection (PipelineSelection): The selection mode for the specified targets
+ # ignore_junction_targets (bool): Whether junction targets should be filtered out
+ # remote (str): The URL of a specific remote server to push to, or None
+ #
+ # If `remote` specified as None, then regular configuration will be used
+ # to determine where to push artifacts to.
+ #
+ # If any of the given targets are missing their expected buildtree artifact,
+ # a pull queue will be created if user context and available remotes allow for
+ # attempting to fetch them.
+ #
+ def push(self, targets, *,
+ selection=PipelineSelection.NONE,
+ ignore_junction_targets=False,
+ remote=None):
+
+ use_config = True
+ if remote:
+ use_config = False
+
+ elements, _ = self._load(targets, (),
+ selection=selection,
+ ignore_junction_targets=ignore_junction_targets,
+ use_artifact_config=use_config,
+ artifact_remote_url=remote,
+ fetch_subprojects=True)
+
+ if not self._artifacts.has_push_remotes():
+ raise StreamError("No artifact caches available for pushing artifacts")
+
+ self._pipeline.assert_consistent(elements)
+
+ # Check if we require a pull queue, with given artifact state and context
+ require_buildtrees = self._buildtree_pull_required(elements)
+ if require_buildtrees:
+ self._message(MessageType.INFO, "Attempting to fetch missing artifact buildtrees")
+ self._add_queue(PullQueue(self._scheduler))
+ self._enqueue_plan(require_buildtrees)
+ else:
+ # FIXME: This hack should be removed as a result of refactoring
+ # Element._update_state()
+ #
+ # This workaround marks all dependencies of all selected elements as
+ # "pulled" before trying to push.
+ #
+ # Instead of lying to the elements and telling them they have already
+ # been pulled, we should have something more consistent with how other
+ # state bits are handled; and explicitly tell the elements that they
+ # need to be pulled with something like Element._schedule_pull().
+ #
+ for element in elements:
+ element._pull_done()
+
+ push_queue = ArtifactPushQueue(self._scheduler)
+ self._add_queue(push_queue)
+ self._enqueue_plan(elements, queue=push_queue)
+ self._run()
+
+ # checkout()
+ #
+ # Checkout target artifact to the specified location
+ #
+ # Args:
+ # target (str): Target to checkout
+ # location (str): Location to checkout the artifact to
+ # force (bool): Whether files can be overwritten if necessary
+ # scope (str): The scope of dependencies to checkout
+ # integrate (bool): Whether to run integration commands
+ # hardlinks (bool): Whether checking out files hardlinked to
+ # their artifacts is acceptable
+ # tar (bool): If true, a tarball from the artifact contents will
+ # be created, otherwise the file tree of the artifact
+ # will be placed at the given location. If true and
+ # location is '-', the tarball will be dumped on the
+ # standard output.
+ #
+ def checkout(self, target, *,
+ location=None,
+ force=False,
+ scope=Scope.RUN,
+ integrate=True,
+ hardlinks=False,
+ tar=False):
+
+ # We only have one target in a checkout command
+ elements, _ = self._load((target,), (), fetch_subprojects=True)
+ target = elements[0]
+
+ self._check_location_writable(location, force=force, tar=tar)
+
+ # Stage deps into a temporary sandbox first
+ try:
+ with target._prepare_sandbox(scope=scope, directory=None,
+ integrate=integrate) as sandbox:
+
+ # Copy or move the sandbox to the target directory
+ sandbox_vroot = sandbox.get_virtual_directory()
+
+ if not tar:
+ with target.timed_activity("Checking out files in '{}'"
+ .format(location)):
+ try:
+ if hardlinks:
+ self._checkout_hardlinks(sandbox_vroot, location)
+ else:
+ sandbox_vroot.export_files(location)
+ except OSError as e:
+ raise StreamError("Failed to checkout files: '{}'"
+ .format(e)) from e
+ else:
+ if location == '-':
+ with target.timed_activity("Creating tarball"):
+ # Save the stdout FD to restore later
+ saved_fd = os.dup(sys.stdout.fileno())
+ try:
+ with os.fdopen(sys.stdout.fileno(), 'wb') as fo:
+ with tarfile.open(fileobj=fo, mode="w|") as tf:
+ sandbox_vroot.export_to_tar(tf, '.')
+ finally:
+ # No matter what, restore stdout for further use
+ os.dup2(saved_fd, sys.stdout.fileno())
+ os.close(saved_fd)
+ else:
+ with target.timed_activity("Creating tarball '{}'"
+ .format(location)):
+ with tarfile.open(location, "w:") as tf:
+ sandbox_vroot.export_to_tar(tf, '.')
+
+ except BstError as e:
+ raise StreamError("Error while staging dependencies into a sandbox"
+ ": '{}'".format(e), detail=e.detail, reason=e.reason) from e
+
+ # artifact_log()
+ #
+ # Show the full log of an artifact
+ #
+ # Args:
+ # targets (str): Targets to view the logs of
+ #
+ # Returns:
+ # logsdir (list): A list of CasBasedDirectory objects containing artifact logs
+ #
+ def artifact_log(self, targets):
+ # Return list of Element and/or ArtifactElement objects
+ target_objects = self.load_selection(targets, selection=PipelineSelection.NONE, load_refs=True)
+
+ logsdirs = []
+ for obj in target_objects:
+ ref = obj.get_artifact_name()
+ if not obj._cached():
+ self._message(MessageType.WARN, "{} is not cached".format(ref))
+ continue
+ elif not obj._cached_logs():
+ self._message(MessageType.WARN, "{} is cached without log files".format(ref))
+ continue
+
+ logsdirs.append(self._artifacts.get_artifact_logs(ref))
+
+ return logsdirs
+
+ # artifact_delete()
+ #
+ # Remove artifacts from the local cache
+ #
+ # Args:
+ # targets (str): Targets to remove
+ # no_prune (bool): Whether to prune the unreachable refs, default False
+ #
+ def artifact_delete(self, targets, no_prune):
+ # Return list of Element and/or ArtifactElement objects
+ target_objects = self.load_selection(targets, selection=PipelineSelection.NONE, load_refs=True)
+
+ # Some of the targets may refer to the same key, so first obtain a
+ # set of the refs to be removed.
+ remove_refs = set()
+ for obj in target_objects:
+ for key_strength in [_KeyStrength.STRONG, _KeyStrength.WEAK]:
+ key = obj._get_cache_key(strength=key_strength)
+ remove_refs.add(obj.get_artifact_name(key=key))
+
+ ref_removed = False
+ for ref in remove_refs:
+ try:
+ self._artifacts.remove(ref, defer_prune=True)
+ except ArtifactError as e:
+ self._message(MessageType.WARN, str(e))
+ continue
+
+ self._message(MessageType.INFO, "Removed: {}".format(ref))
+ ref_removed = True
+
+ # Prune the artifact cache
+ if ref_removed and not no_prune:
+ with self._context.timed_activity("Pruning artifact cache"):
+ self._artifacts.prune()
+
+ if not ref_removed:
+ self._message(MessageType.INFO, "No artifacts were removed")
+
+ # source_checkout()
+ #
+ # Checkout sources of the target element to the specified location
+ #
+ # Args:
+ # target (str): The target element whose sources to checkout
+ # location (str): Location to checkout the sources to
+ # deps (str): The dependencies to checkout
+ # fetch (bool): Whether to fetch missing sources
+ # except_targets (list): List of targets to except from staging
+ #
+ def source_checkout(self, target, *,
+ location=None,
+ force=False,
+ deps='none',
+ fetch=False,
+ except_targets=(),
+ tar=False,
+ include_build_scripts=False):
+
+ self._check_location_writable(location, force=force, tar=tar)
+
+ elements, _ = self._load((target,), (),
+ selection=deps,
+ except_targets=except_targets,
+ fetch_subprojects=True)
+
+ # Assert all sources are cached in the source dir
+ if fetch:
+ self._fetch(elements, fetch_original=True)
+ self._pipeline.assert_sources_cached(elements)
+
+ # Stage all sources determined by scope
+ try:
+ self._source_checkout(elements, location, force, deps,
+ fetch, tar, include_build_scripts)
+ except BstError as e:
+ raise StreamError("Error while writing sources"
+ ": '{}'".format(e), detail=e.detail, reason=e.reason) from e
+
+ # workspace_open
+ #
+ # Open a project workspace
+ #
+ # Args:
+ # targets (list): List of target elements to open workspaces for
+ # no_checkout (bool): Whether to skip checking out the source
+ # track_first (bool): Whether to track and fetch first
+ # force (bool): Whether to ignore contents in an existing directory
+ # custom_dir (str): Custom location to create a workspace or false to use default location.
+ #
+ def workspace_open(self, targets, *,
+ no_checkout,
+ track_first,
+ force,
+ custom_dir):
+ # This function is a little funny but it is trying to be as atomic as possible.
+
+ if track_first:
+ track_targets = targets
+ else:
+ track_targets = ()
+
+ elements, track_elements = self._load(targets, track_targets,
+ selection=PipelineSelection.REDIRECT,
+ track_selection=PipelineSelection.REDIRECT)
+
+ workspaces = self._context.get_workspaces()
+
+ # If we're going to checkout, we need at least a fetch,
+ # if we were asked to track first, we're going to fetch anyway.
+ #
+ if not no_checkout or track_first:
+ track_elements = []
+ if track_first:
+ track_elements = elements
+ self._fetch(elements, track_elements=track_elements, fetch_original=True)
+
+ expanded_directories = []
+ # To try to be more atomic, loop through the elements and raise any errors we can early
+ for target in elements:
+
+ if not list(target.sources()):
+ build_depends = [x.name for x in target.dependencies(Scope.BUILD, recurse=False)]
+ if not build_depends:
+ raise StreamError("The element {} has no sources".format(target.name))
+ detail = "Try opening a workspace on one of its dependencies instead:\n"
+ detail += " \n".join(build_depends)
+ raise StreamError("The element {} has no sources".format(target.name), detail=detail)
+
+ # Check for workspace config
+ workspace = workspaces.get_workspace(target._get_full_name())
+ if workspace and not force:
+ raise StreamError("Element '{}' already has workspace defined at: {}"
+ .format(target.name, workspace.get_absolute_path()))
+
+ target_consistency = target._get_consistency()
+ if not no_checkout and target_consistency < Consistency.CACHED and \
+ target_consistency._source_cached():
+ raise StreamError("Could not stage uncached source. For {} ".format(target.name) +
+ "Use `--track` to track and " +
+ "fetch the latest version of the " +
+ "source.")
+
+ if not custom_dir:
+ directory = os.path.abspath(os.path.join(self._context.workspacedir, target.name))
+ if directory[-4:] == '.bst':
+ directory = directory[:-4]
+ expanded_directories.append(directory)
+
+ if custom_dir:
+ if len(elements) != 1:
+ raise StreamError("Exactly one element can be given if --directory is used",
+ reason='directory-with-multiple-elements')
+ directory = os.path.abspath(custom_dir)
+ expanded_directories = [directory, ]
+ else:
+ # If this fails it is a bug in what ever calls this, usually cli.py and so can not be tested for via the
+ # run bst test mechanism.
+ assert len(elements) == len(expanded_directories)
+
+ for target, directory in zip(elements, expanded_directories):
+ if os.path.exists(directory):
+ if not os.path.isdir(directory):
+ raise StreamError("For element '{}', Directory path is not a directory: {}"
+ .format(target.name, directory), reason='bad-directory')
+
+ if not (no_checkout or force) and os.listdir(directory):
+ raise StreamError("For element '{}', Directory path is not empty: {}"
+ .format(target.name, directory), reason='bad-directory')
+
+ # So far this function has tried to catch as many issues as possible with out making any changes
+ # Now it dose the bits that can not be made atomic.
+ targetGenerator = zip(elements, expanded_directories)
+ for target, directory in targetGenerator:
+ self._message(MessageType.INFO, "Creating workspace for element {}"
+ .format(target.name))
+
+ workspace = workspaces.get_workspace(target._get_full_name())
+ if workspace:
+ workspaces.delete_workspace(target._get_full_name())
+ workspaces.save_config()
+ shutil.rmtree(directory)
+ try:
+ os.makedirs(directory, exist_ok=True)
+ except OSError as e:
+ todo_elements = " ".join([str(target.name) for target, directory_dict in targetGenerator])
+ if todo_elements:
+ # This output should make creating the remaining workspaces as easy as possible.
+ todo_elements = "\nDid not try to create workspaces for " + todo_elements
+ raise StreamError("Failed to create workspace directory: {}".format(e) + todo_elements) from e
+
+ workspaces.create_workspace(target, directory, checkout=not no_checkout)
+ self._message(MessageType.INFO, "Created a workspace for element: {}"
+ .format(target._get_full_name()))
+
+ # workspace_close
+ #
+ # Close a project workspace
+ #
+ # Args:
+ # element_name (str): The element name to close the workspace for
+ # remove_dir (bool): Whether to remove the associated directory
+ #
+ def workspace_close(self, element_name, *, remove_dir):
+ workspaces = self._context.get_workspaces()
+ workspace = workspaces.get_workspace(element_name)
+
+ # Remove workspace directory if prompted
+ if remove_dir:
+ with self._context.timed_activity("Removing workspace directory {}"
+ .format(workspace.get_absolute_path())):
+ try:
+ shutil.rmtree(workspace.get_absolute_path())
+ except OSError as e:
+ raise StreamError("Could not remove '{}': {}"
+ .format(workspace.get_absolute_path(), e)) from e
+
+ # Delete the workspace and save the configuration
+ workspaces.delete_workspace(element_name)
+ workspaces.save_config()
+ self._message(MessageType.INFO, "Closed workspace for {}".format(element_name))
+
+ # workspace_reset
+ #
+ # Reset a workspace to its original state, discarding any user
+ # changes.
+ #
+ # Args:
+ # targets (list of str): The target elements to reset the workspace for
+ # soft (bool): Only reset workspace state
+ # track_first (bool): Whether to also track the sources first
+ #
+ def workspace_reset(self, targets, *, soft, track_first):
+
+ if track_first:
+ track_targets = targets
+ else:
+ track_targets = ()
+
+ elements, track_elements = self._load(targets, track_targets,
+ selection=PipelineSelection.REDIRECT,
+ track_selection=PipelineSelection.REDIRECT)
+
+ nonexisting = []
+ for element in elements:
+ if not self.workspace_exists(element.name):
+ nonexisting.append(element.name)
+ if nonexisting:
+ raise StreamError("Workspace does not exist", detail="\n".join(nonexisting))
+
+ # Do the tracking first
+ if track_first:
+ self._fetch(elements, track_elements=track_elements, fetch_original=True)
+
+ workspaces = self._context.get_workspaces()
+
+ for element in elements:
+ workspace = workspaces.get_workspace(element._get_full_name())
+ workspace_path = workspace.get_absolute_path()
+ if soft:
+ workspace.prepared = False
+ self._message(MessageType.INFO, "Reset workspace state for {} at: {}"
+ .format(element.name, workspace_path))
+ continue
+
+ with element.timed_activity("Removing workspace directory {}"
+ .format(workspace_path)):
+ try:
+ shutil.rmtree(workspace_path)
+ except OSError as e:
+ raise StreamError("Could not remove '{}': {}"
+ .format(workspace_path, e)) from e
+
+ workspaces.delete_workspace(element._get_full_name())
+ workspaces.create_workspace(element, workspace_path, checkout=True)
+
+ self._message(MessageType.INFO,
+ "Reset workspace for {} at: {}".format(element.name,
+ workspace_path))
+
+ workspaces.save_config()
+
+ # workspace_exists
+ #
+ # Check if a workspace exists
+ #
+ # Args:
+ # element_name (str): The element name to close the workspace for, or None
+ #
+ # Returns:
+ # (bool): True if the workspace exists
+ #
+ # If None is specified for `element_name`, then this will return
+ # True if there are any existing workspaces.
+ #
+ def workspace_exists(self, element_name=None):
+ workspaces = self._context.get_workspaces()
+ if element_name:
+ workspace = workspaces.get_workspace(element_name)
+ if workspace:
+ return True
+ elif any(workspaces.list()):
+ return True
+
+ return False
+
+ # workspace_is_required()
+ #
+ # Checks whether the workspace belonging to element_name is required to
+ # load the project
+ #
+ # Args:
+ # element_name (str): The element whose workspace may be required
+ #
+ # Returns:
+ # (bool): True if the workspace is required
+ def workspace_is_required(self, element_name):
+ invoked_elm = self._project.invoked_from_workspace_element()
+ return invoked_elm == element_name
+
+ # workspace_list
+ #
+ # Serializes the workspaces and dumps them in YAML to stdout.
+ #
+ def workspace_list(self):
+ workspaces = []
+ for element_name, workspace_ in self._context.get_workspaces().list():
+ workspace_detail = {
+ 'element': element_name,
+ 'directory': workspace_.get_absolute_path(),
+ }
+ workspaces.append(workspace_detail)
+
+ _yaml.dump({
+ 'workspaces': workspaces
+ })
+
+ # redirect_element_names()
+ #
+ # Takes a list of element names and returns a list where elements have been
+ # redirected to their source elements if the element file exists, and just
+ # the name, if not.
+ #
+ # Args:
+ # elements (list of str): The element names to redirect
+ #
+ # Returns:
+ # (list of str): The element names after redirecting
+ #
+ def redirect_element_names(self, elements):
+ element_dir = self._project.element_path
+ load_elements = []
+ output_elements = set()
+
+ for e in elements:
+ element_path = os.path.join(element_dir, e)
+ if os.path.exists(element_path):
+ load_elements.append(e)
+ else:
+ output_elements.add(e)
+ if load_elements:
+ loaded_elements, _ = self._load(load_elements, (),
+ selection=PipelineSelection.REDIRECT,
+ track_selection=PipelineSelection.REDIRECT)
+
+ for e in loaded_elements:
+ output_elements.add(e.name)
+
+ return list(output_elements)
+
+ #############################################################
+ # Scheduler API forwarding #
+ #############################################################
+
+ # running
+ #
+ # Whether the scheduler is running
+ #
+ @property
+ def running(self):
+ return self._scheduler.loop is not None
+
+ # suspended
+ #
+ # Whether the scheduler is currently suspended
+ #
+ @property
+ def suspended(self):
+ return self._scheduler.suspended
+
+ # terminated
+ #
+ # Whether the scheduler is currently terminated
+ #
+ @property
+ def terminated(self):
+ return self._scheduler.terminated
+
+ # elapsed_time
+ #
+ # Elapsed time since the session start
+ #
+ @property
+ def elapsed_time(self):
+ return self._scheduler.elapsed_time()
+
+ # terminate()
+ #
+ # Terminate jobs
+ #
+ def terminate(self):
+ self._scheduler.terminate_jobs()
+
+ # quit()
+ #
+ # Quit the session, this will continue with any ongoing
+ # jobs, use Stream.terminate() instead for cancellation
+ # of ongoing jobs
+ #
+ def quit(self):
+ self._scheduler.stop_queueing()
+
+ # suspend()
+ #
+ # Context manager to suspend ongoing jobs
+ #
+ @contextmanager
+ def suspend(self):
+ with self._scheduler.jobs_suspended():
+ yield
+
+ #############################################################
+ # Private Methods #
+ #############################################################
+
+ # _load()
+ #
+ # A convenience method for loading element lists
+ #
+ # If `targets` is not empty used project configuration will be
+ # fully loaded. If `targets` is empty, tracking will still be
+ # resolved for elements in `track_targets`, but no build pipeline
+ # will be resolved. This is behavior is import for track() to
+ # not trigger full loading of project configuration.
+ #
+ # Args:
+ # targets (list of str): Main targets to load
+ # track_targets (list of str): Tracking targets
+ # selection (PipelineSelection): The selection mode for the specified targets
+ # track_selection (PipelineSelection): The selection mode for the specified tracking targets
+ # except_targets (list of str): Specified targets to except from fetching
+ # track_except_targets (list of str): Specified targets to except from fetching
+ # track_cross_junctions (bool): Whether tracking should cross junction boundaries
+ # ignore_junction_targets (bool): Whether junction targets should be filtered out
+ # use_artifact_config (bool): Whether to initialize artifacts with the config
+ # use_source_config (bool): Whether to initialize remote source caches with the config
+ # artifact_remote_url (str): A remote url for initializing the artifacts
+ # source_remote_url (str): A remote url for initializing source caches
+ # fetch_subprojects (bool): Whether to fetch subprojects while loading
+ #
+ # Returns:
+ # (list of Element): The primary element selection
+ # (list of Element): The tracking element selection
+ #
+ def _load(self, targets, track_targets, *,
+ selection=PipelineSelection.NONE,
+ track_selection=PipelineSelection.NONE,
+ except_targets=(),
+ track_except_targets=(),
+ track_cross_junctions=False,
+ ignore_junction_targets=False,
+ use_artifact_config=False,
+ use_source_config=False,
+ artifact_remote_url=None,
+ source_remote_url=None,
+ fetch_subprojects=False,
+ dynamic_plan=False,
+ load_refs=False):
+
+ # Classify element and artifact strings
+ target_elements, target_artifacts = self._classify_artifacts(targets)
+
+ if target_artifacts and not load_refs:
+ detail = '\n'.join(target_artifacts)
+ raise ArtifactElementError("Cannot perform this operation with artifact refs:", detail=detail)
+
+ # Load rewritable if we have any tracking selection to make
+ rewritable = False
+ if track_targets:
+ rewritable = True
+
+ # Load all target elements
+ elements, except_elements, track_elements, track_except_elements = \
+ self._pipeline.load([target_elements, except_targets, track_targets, track_except_targets],
+ rewritable=rewritable,
+ fetch_subprojects=fetch_subprojects)
+
+ # Obtain the ArtifactElement objects
+ artifacts = [self._project.create_artifact_element(ref) for ref in target_artifacts]
+
+ # Optionally filter out junction elements
+ if ignore_junction_targets:
+ elements = [e for e in elements if e.get_kind() != 'junction']
+
+ # Hold on to the targets
+ self.targets = elements + artifacts
+
+ # Here we should raise an error if the track_elements targets
+ # are not dependencies of the primary targets, this is not
+ # supported.
+ #
+ # This can happen with `bst build --track`
+ #
+ if targets and not self._pipeline.targets_include(elements, track_elements):
+ raise StreamError("Specified tracking targets that are not "
+ "within the scope of primary targets")
+
+ # First take care of marking tracking elements, this must be
+ # done before resolving element states.
+ #
+ assert track_selection != PipelineSelection.PLAN
+
+ # Tracked elements are split by owner projects in order to
+ # filter cross junctions tracking dependencies on their
+ # respective project.
+ track_projects = {}
+ for element in track_elements:
+ project = element._get_project()
+ if project not in track_projects:
+ track_projects[project] = [element]
+ else:
+ track_projects[project].append(element)
+
+ track_selected = []
+
+ for project, project_elements in track_projects.items():
+ selected = self._pipeline.get_selection(project_elements, track_selection)
+ selected = self._pipeline.track_cross_junction_filter(project,
+ selected,
+ track_cross_junctions)
+ track_selected.extend(selected)
+
+ track_selected = self._pipeline.except_elements(track_elements,
+ track_selected,
+ track_except_elements)
+
+ for element in track_selected:
+ element._schedule_tracking()
+
+ if not targets:
+ self._pipeline.resolve_elements(track_selected)
+ return [], track_selected
+
+ # ArtifactCache.setup_remotes expects all projects to be fully loaded
+ for project in self._context.get_projects():
+ project.ensure_fully_loaded()
+
+ # Connect to remote caches, this needs to be done before resolving element state
+ self._artifacts.setup_remotes(use_config=use_artifact_config, remote_url=artifact_remote_url)
+ self._sourcecache.setup_remotes(use_config=use_source_config, remote_url=source_remote_url)
+
+ # Now move on to loading primary selection.
+ #
+ self._pipeline.resolve_elements(self.targets)
+ selected = self._pipeline.get_selection(self.targets, selection, silent=False)
+ selected = self._pipeline.except_elements(self.targets,
+ selected,
+ except_elements)
+
+ # Set the "required" artifacts that should not be removed
+ # while this pipeline is active
+ #
+ # It must include all the artifacts which are required by the
+ # final product. Note that this is a superset of the build plan.
+ #
+ # use partial as we send this to both Artifact and Source caches
+ required_elements = functools.partial(self._pipeline.dependencies, elements, Scope.ALL)
+ self._artifacts.mark_required_elements(required_elements())
+
+ self._sourcecache.mark_required_sources(
+ itertools.chain.from_iterable(
+ [element.sources() for element in required_elements()]))
+
+ if selection == PipelineSelection.PLAN and dynamic_plan:
+ # We use a dynamic build plan, only request artifacts of top-level targets,
+ # others are requested dynamically as needed.
+ # This avoids pulling, fetching, or building unneeded build-only dependencies.
+ for element in elements:
+ element._set_required()
+ else:
+ for element in selected:
+ element._set_required()
+
+ return selected, track_selected
+
+ # _message()
+ #
+ # Local message propagator
+ #
+ def _message(self, message_type, message, **kwargs):
+ args = dict(kwargs)
+ self._context.message(
+ Message(None, message_type, message, **args))
+
+ # _add_queue()
+ #
+ # Adds a queue to the stream
+ #
+ # Args:
+ # queue (Queue): Queue to add to the pipeline
+ # track (bool): Whether this is the tracking queue
+ #
+ def _add_queue(self, queue, *, track=False):
+ self.queues.append(queue)
+
+ if not (track or self._first_non_track_queue):
+ self._first_non_track_queue = queue
+
+ # _enqueue_plan()
+ #
+ # Enqueues planned elements to the specified queue.
+ #
+ # Args:
+ # plan (list of Element): The list of elements to be enqueued
+ # queue (Queue): The target queue, defaults to the first non-track queue
+ #
+ def _enqueue_plan(self, plan, *, queue=None):
+ queue = queue or self._first_non_track_queue
+
+ queue.enqueue(plan)
+ self.session_elements += plan
+
+ # _run()
+ #
+ # Common function for running the scheduler
+ #
+ def _run(self):
+
+ # Inform the frontend of the full list of elements
+ # and the list of elements which will be processed in this run
+ #
+ self.total_elements = list(self._pipeline.dependencies(self.targets, Scope.ALL))
+
+ if self._session_start_callback is not None:
+ self._session_start_callback()
+
+ _, status = self._scheduler.run(self.queues)
+
+ if status == SchedStatus.ERROR:
+ raise StreamError()
+ elif status == SchedStatus.TERMINATED:
+ raise StreamError(terminated=True)
+
+ # _fetch()
+ #
+ # Performs the fetch job, the body of this function is here because
+ # it is shared between a few internals.
+ #
+ # Args:
+ # elements (list of Element): Elements to fetch
+ # track_elements (list of Element): Elements to track
+ # fetch_original (Bool): Whether to fetch original unstaged
+ #
+ def _fetch(self, elements, *, track_elements=None, fetch_original=False):
+
+ if track_elements is None:
+ track_elements = []
+
+ # Subtract the track elements from the fetch elements, they will be added separately
+ fetch_plan = self._pipeline.subtract_elements(elements, track_elements)
+
+ # Assert consistency for the fetch elements
+ self._pipeline.assert_consistent(fetch_plan)
+
+ # Filter out elements with cached sources, only from the fetch plan
+ # let the track plan resolve new refs.
+ cached = [elt for elt in fetch_plan
+ if not elt._should_fetch(fetch_original)]
+ fetch_plan = self._pipeline.subtract_elements(fetch_plan, cached)
+
+ # Construct queues, enqueue and run
+ #
+ track_queue = None
+ if track_elements:
+ track_queue = TrackQueue(self._scheduler)
+ self._add_queue(track_queue, track=True)
+ self._add_queue(FetchQueue(self._scheduler, fetch_original=fetch_original))
+
+ if track_elements:
+ self._enqueue_plan(track_elements, queue=track_queue)
+
+ self._enqueue_plan(fetch_plan)
+ self._run()
+
+ # _check_location_writable()
+ #
+ # Check if given location is writable.
+ #
+ # Args:
+ # location (str): Destination path
+ # force (bool): Allow files to be overwritten
+ # tar (bool): Whether destination is a tarball
+ #
+ # Raises:
+ # (StreamError): If the destination is not writable
+ #
+ def _check_location_writable(self, location, force=False, tar=False):
+ if not tar:
+ try:
+ os.makedirs(location, exist_ok=True)
+ except OSError as e:
+ raise StreamError("Failed to create destination directory: '{}'"
+ .format(e)) from e
+ if not os.access(location, os.W_OK):
+ raise StreamError("Destination directory '{}' not writable"
+ .format(location))
+ if not force and os.listdir(location):
+ raise StreamError("Destination directory '{}' not empty"
+ .format(location))
+ elif os.path.exists(location) and location != '-':
+ if not os.access(location, os.W_OK):
+ raise StreamError("Output file '{}' not writable"
+ .format(location))
+ if not force and os.path.exists(location):
+ raise StreamError("Output file '{}' already exists"
+ .format(location))
+
+ # Helper function for checkout()
+ #
+ def _checkout_hardlinks(self, sandbox_vroot, directory):
+ try:
+ utils.safe_remove(directory)
+ except OSError as e:
+ raise StreamError("Failed to remove checkout directory: {}".format(e)) from e
+
+ sandbox_vroot.export_files(directory, can_link=True, can_destroy=True)
+
+ # Helper function for source_checkout()
+ def _source_checkout(self, elements,
+ location=None,
+ force=False,
+ deps='none',
+ fetch=False,
+ tar=False,
+ include_build_scripts=False):
+ location = os.path.abspath(location)
+ location_parent = os.path.abspath(os.path.join(location, ".."))
+
+ # Stage all our sources in a temporary directory. The this
+ # directory can be used to either construct a tarball or moved
+ # to the final desired location.
+ temp_source_dir = tempfile.TemporaryDirectory(dir=location_parent)
+ try:
+ self._write_element_sources(temp_source_dir.name, elements)
+ if include_build_scripts:
+ self._write_build_scripts(temp_source_dir.name, elements)
+ if tar:
+ self._create_tarball(temp_source_dir.name, location)
+ else:
+ self._move_directory(temp_source_dir.name, location, force)
+ except OSError as e:
+ raise StreamError("Failed to checkout sources to {}: {}"
+ .format(location, e)) from e
+ finally:
+ with suppress(FileNotFoundError):
+ temp_source_dir.cleanup()
+
+ # Move a directory src to dest. This will work across devices and
+ # may optionaly overwrite existing files.
+ def _move_directory(self, src, dest, force=False):
+ def is_empty_dir(path):
+ return os.path.isdir(dest) and not os.listdir(dest)
+
+ try:
+ os.rename(src, dest)
+ return
+ except OSError:
+ pass
+
+ if force or is_empty_dir(dest):
+ try:
+ utils.link_files(src, dest)
+ except utils.UtilError as e:
+ raise StreamError("Failed to move directory: {}".format(e)) from e
+
+ # Write the element build script to the given directory
+ def _write_element_script(self, directory, element):
+ try:
+ element._write_script(directory)
+ except ImplError:
+ return False
+ return True
+
+ # Write all source elements to the given directory
+ def _write_element_sources(self, directory, elements):
+ for element in elements:
+ element_source_dir = self._get_element_dirname(directory, element)
+ if list(element.sources()):
+ os.makedirs(element_source_dir)
+ element._stage_sources_at(element_source_dir, mount_workspaces=False)
+
+ # Create a tarball from the content of directory
+ def _create_tarball(self, directory, tar_name):
+ try:
+ with utils.save_file_atomic(tar_name, mode='wb') as f:
+ # This TarFile does not need to be explicitly closed
+ # as the underlying file object will be closed be the
+ # save_file_atomic contect manager
+ tarball = tarfile.open(fileobj=f, mode='w')
+ for item in os.listdir(str(directory)):
+ file_to_add = os.path.join(directory, item)
+ tarball.add(file_to_add, arcname=item)
+ except OSError as e:
+ raise StreamError("Failed to create tar archive: {}".format(e)) from e
+
+ # Write all the build_scripts for elements in the directory location
+ def _write_build_scripts(self, location, elements):
+ for element in elements:
+ self._write_element_script(location, element)
+ self._write_master_build_script(location, elements)
+
+ # Write a master build script to the sandbox
+ def _write_master_build_script(self, directory, elements):
+
+ module_string = ""
+ for element in elements:
+ module_string += shlex.quote(element.normal_name) + " "
+
+ script_path = os.path.join(directory, "build.sh")
+
+ with open(_site.build_all_template, "r") as f:
+ script_template = f.read()
+
+ with utils.save_file_atomic(script_path, "w") as script:
+ script.write(script_template.format(modules=module_string))
+
+ os.chmod(script_path, stat.S_IEXEC | stat.S_IREAD)
+
+ # Collect the sources in the given sandbox into a tarfile
+ def _collect_sources(self, directory, tar_name, element_name, compression):
+ with self._context.timed_activity("Creating tarball {}".format(tar_name)):
+ if compression == "none":
+ permissions = "w:"
+ else:
+ permissions = "w:" + compression
+
+ with tarfile.open(tar_name, permissions) as tar:
+ tar.add(directory, arcname=element_name)
+
+ # _get_element_dirname()
+ #
+ # Get path to directory for an element based on its normal name.
+ #
+ # For cross-junction elements, the path will be prefixed with the name
+ # of the junction element.
+ #
+ # Args:
+ # directory (str): path to base directory
+ # element (Element): the element
+ #
+ # Returns:
+ # (str): Path to directory for this element
+ #
+ def _get_element_dirname(self, directory, element):
+ parts = [element.normal_name]
+ while element._get_project() != self._project:
+ element = element._get_project().junction
+ parts.append(element.normal_name)
+
+ return os.path.join(directory, *reversed(parts))
+
+ # _buildtree_pull_required()
+ #
+ # Check if current task, given config, requires element buildtree artifact
+ #
+ # Args:
+ # elements (list): elements to check if buildtrees are required
+ #
+ # Returns:
+ # (list): elements requiring buildtrees
+ #
+ def _buildtree_pull_required(self, elements):
+ required_list = []
+
+ # If context is set to not pull buildtrees, or no fetch remotes, return empty list
+ if not self._context.pull_buildtrees or not self._artifacts.has_fetch_remotes():
+ return required_list
+
+ for element in elements:
+ # Check if element is partially cached without its buildtree, as the element
+ # artifact may not be cached at all
+ if element._cached() and not element._cached_buildtree() and element._buildtree_exists():
+ required_list.append(element)
+
+ return required_list
+
+ # _classify_artifacts()
+ #
+ # Split up a list of targets into element names and artifact refs
+ #
+ # Args:
+ # targets (list): A list of targets
+ #
+ # Returns:
+ # (list): element names present in the targets
+ # (list): artifact refs present in the targets
+ #
+ def _classify_artifacts(self, targets):
+ element_targets = []
+ artifact_refs = []
+ element_globs = []
+ artifact_globs = []
+
+ for target in targets:
+ if target.endswith('.bst'):
+ if any(c in "*?[" for c in target):
+ element_globs.append(target)
+ else:
+ element_targets.append(target)
+ else:
+ if any(c in "*?[" for c in target):
+ artifact_globs.append(target)
+ else:
+ try:
+ verify_artifact_ref(target)
+ except ArtifactElementError:
+ element_targets.append(target)
+ continue
+ artifact_refs.append(target)
+
+ if element_globs:
+ for dirpath, _, filenames in os.walk(self._project.element_path):
+ for filename in filenames:
+ element_path = os.path.join(dirpath, filename)
+ length = len(self._project.element_path) + 1
+ element_path = element_path[length:] # Strip out the element_path
+
+ if any(fnmatch(element_path, glob) for glob in element_globs):
+ element_targets.append(element_path)
+
+ if artifact_globs:
+ for glob in artifact_globs:
+ artifact_refs.extend(self._artifacts.list_artifacts(glob=glob))
+ if not artifact_refs:
+ self._message(MessageType.WARN, "No artifacts found for globs: {}".format(', '.join(artifact_globs)))
+
+ return element_targets, artifact_refs
diff --git a/src/buildstream/_variables.py b/src/buildstream/_variables.py
new file mode 100644
index 000000000..74314cf1f
--- /dev/null
+++ b/src/buildstream/_variables.py
@@ -0,0 +1,251 @@
+#
+# Copyright (C) 2016 Codethink Limited
+# Copyright (C) 2019 Bloomberg L.P.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Daniel Silverstone <daniel.silverstone@codethink.co.uk>
+
+import re
+import sys
+
+from ._exceptions import LoadError, LoadErrorReason
+from . import _yaml
+
+# Variables are allowed to have dashes here
+#
+PARSE_EXPANSION = re.compile(r"\%\{([a-zA-Z][a-zA-Z0-9_-]*)\}")
+
+
+# Throughout this code you will see variables named things like `expstr`.
+# These hold data structures called "expansion strings" and are the parsed
+# form of the strings which are the input to this subsystem. Strings
+# such as "Hello %{name}, how are you?" are parsed into the form:
+# (3, ["Hello ", "name", ", how are you?"])
+# i.e. a tuple of an integer and a list, where the integer is the cached
+# length of the list, and the list consists of one or more strings.
+# Strings in even indices of the list (0, 2, 4, etc) are constants which
+# are copied into the output of the expansion algorithm. Strings in the
+# odd indices (1, 3, 5, etc) are the names of further expansions to make.
+# In the example above, first "Hello " is copied, then "name" is expanded
+# and so must be another named expansion string passed in to the constructor
+# of the Variables class, and whatever is yielded from the expansion of "name"
+# is added to the concatenation for the result. Finally ", how are you?" is
+# copied in and the whole lot concatenated for return.
+#
+# To see how strings are parsed, see `_parse_expstr()` after the class, and
+# to see how expansion strings are expanded, see `_expand_expstr()` after that.
+
+
+# The Variables helper object will resolve the variable references in
+# the given dictionary, expecting that any dictionary values which contain
+# variable references can be resolved from the same dictionary.
+#
+# Each Element creates its own Variables instance to track the configured
+# variable settings for the element.
+#
+# Args:
+# node (dict): A node loaded and composited with yaml tools
+#
+# Raises:
+# LoadError, if unresolved variables, or cycles in resolution, occur.
+#
+class Variables():
+
+ def __init__(self, node):
+
+ self.original = node
+ self._expstr_map = self._resolve(node)
+ self.flat = self._flatten()
+
+ # subst():
+ #
+ # Substitutes any variables in 'string' and returns the result.
+ #
+ # Args:
+ # (string): The string to substitute
+ #
+ # Returns:
+ # (string): The new string with any substitutions made
+ #
+ # Raises:
+ # LoadError, if the string contains unresolved variable references.
+ #
+ def subst(self, string):
+ expstr = _parse_expstr(string)
+
+ try:
+ return _expand_expstr(self._expstr_map, expstr)
+ except KeyError:
+ unmatched = []
+
+ # Look for any unmatched variable names in the expansion string
+ for var in expstr[1][1::2]:
+ if var not in self._expstr_map:
+ unmatched.append(var)
+
+ if unmatched:
+ message = "Unresolved variable{}: {}".format(
+ "s" if len(unmatched) > 1 else "",
+ ", ".join(unmatched)
+ )
+
+ raise LoadError(LoadErrorReason.UNRESOLVED_VARIABLE, message)
+ # Otherwise, re-raise the KeyError since it clearly came from some
+ # other unknowable cause.
+ raise
+
+ # Variable resolving code
+ #
+ # Here we resolve all of our inputs into a dictionary, ready for use
+ # in subst()
+ def _resolve(self, node):
+ # Special case, if notparallel is specified in the variables for this
+ # element, then override max-jobs to be 1.
+ # Initialize it as a string as all variables are processed as strings.
+ #
+ if _yaml.node_get(node, bool, 'notparallel', default_value=False):
+ _yaml.node_set(node, 'max-jobs', str(1))
+
+ ret = {}
+ for key, value in _yaml.node_items(node):
+ value = _yaml.node_get(node, str, key)
+ ret[sys.intern(key)] = _parse_expstr(value)
+ return ret
+
+ def _check_for_missing(self):
+ # First the check for anything unresolvable
+ summary = []
+ for key, expstr in self._expstr_map.items():
+ for var in expstr[1][1::2]:
+ if var not in self._expstr_map:
+ line = " unresolved variable '{unmatched}' in declaration of '{variable}' at: {provenance}"
+ provenance = _yaml.node_get_provenance(self.original, key)
+ summary.append(line.format(unmatched=var, variable=key, provenance=provenance))
+ if summary:
+ raise LoadError(LoadErrorReason.UNRESOLVED_VARIABLE,
+ "Failed to resolve one or more variable:\n{}\n".format("\n".join(summary)))
+
+ def _check_for_cycles(self):
+ # And now the cycle checks
+ def cycle_check(expstr, visited, cleared):
+ for var in expstr[1][1::2]:
+ if var in cleared:
+ continue
+ if var in visited:
+ raise LoadError(LoadErrorReason.RECURSIVE_VARIABLE,
+ "{}: ".format(_yaml.node_get_provenance(self.original, var)) +
+ ("Variable '{}' expands to contain a reference to itself. " +
+ "Perhaps '{}' contains '%{{{}}}").format(var, visited[-1], var))
+ visited.append(var)
+ cycle_check(self._expstr_map[var], visited, cleared)
+ visited.pop()
+ cleared.add(var)
+
+ cleared = set()
+ for key, expstr in self._expstr_map.items():
+ if key not in cleared:
+ cycle_check(expstr, [key], cleared)
+
+ # _flatten():
+ #
+ # Turn our dictionary of expansion strings into a flattened dict
+ # so that we can run expansions faster in the future
+ #
+ # Raises:
+ # LoadError, if the string contains unresolved variable references or
+ # if cycles are detected in the variable references
+ #
+ def _flatten(self):
+ flat = {}
+ try:
+ for key, expstr in self._expstr_map.items():
+ if expstr[0] > 1:
+ expstr = (1, [sys.intern(_expand_expstr(self._expstr_map, expstr))])
+ self._expstr_map[key] = expstr
+ flat[key] = expstr[1][0]
+ except KeyError:
+ self._check_for_missing()
+ raise
+ except RecursionError:
+ self._check_for_cycles()
+ raise
+ return flat
+
+
+# Cache for the parsed expansion strings. While this is nominally
+# something which might "waste" memory, in reality each of these
+# will live as long as the element which uses it, which is the
+# vast majority of the memory usage across the execution of BuildStream.
+PARSE_CACHE = {
+ # Prime the cache with the empty string since otherwise that can
+ # cause issues with the parser, complications to which cause slowdown
+ "": (1, [""]),
+}
+
+
+# Helper to parse a string into an expansion string tuple, caching
+# the results so that future parse requests don't need to think about
+# the string
+def _parse_expstr(instr):
+ try:
+ return PARSE_CACHE[instr]
+ except KeyError:
+ # This use of the regex turns a string like "foo %{bar} baz" into
+ # a list ["foo ", "bar", " baz"]
+ splits = PARSE_EXPANSION.split(instr)
+ # If an expansion ends the string, we get an empty string on the end
+ # which we can optimise away, making the expansion routines not need
+ # a test for this.
+ if splits[-1] == '':
+ splits = splits[:-1]
+ # Cache an interned copy of this. We intern it to try and reduce the
+ # memory impact of the cache. It seems odd to cache the list length
+ # but this is measurably cheaper than calculating it each time during
+ # string expansion.
+ PARSE_CACHE[instr] = (len(splits), [sys.intern(s) for s in splits])
+ return PARSE_CACHE[instr]
+
+
+# Helper to expand a given top level expansion string tuple in the context
+# of the given dictionary of expansion strings.
+#
+# Note: Will raise KeyError if any expansion is missing
+def _expand_expstr(content, topvalue):
+ # Short-circuit constant strings
+ if topvalue[0] == 1:
+ return topvalue[1][0]
+
+ # Short-circuit strings which are entirely an expansion of another variable
+ # e.g. "%{another}"
+ if topvalue[0] == 2 and topvalue[1][0] == "":
+ return _expand_expstr(content, content[topvalue[1][1]])
+
+ # Otherwise process fully...
+ def internal_expand(value):
+ (expansion_len, expansion_bits) = value
+ idx = 0
+ while idx < expansion_len:
+ # First yield any constant string content
+ yield expansion_bits[idx]
+ idx += 1
+ # Now, if there is an expansion variable left to expand, yield
+ # the expansion of that variable too
+ if idx < expansion_len:
+ yield from internal_expand(content[expansion_bits[idx]])
+ idx += 1
+
+ return "".join(internal_expand(topvalue))
diff --git a/src/buildstream/_version.py b/src/buildstream/_version.py
new file mode 100644
index 000000000..03f946cb8
--- /dev/null
+++ b/src/buildstream/_version.py
@@ -0,0 +1,522 @@
+# pylint: skip-file
+
+# This file helps to compute a version number in source trees obtained from
+# git-archive tarball (such as those provided by githubs download-from-tag
+# feature). Distribution tarballs (built by setup.py sdist) and build
+# directories (produced by setup.py build) will contain a much shorter file
+# that just contains the computed version number.
+
+# This file is released into the public domain. Generated by
+# versioneer-0.18 (https://github.com/warner/python-versioneer)
+
+"""Git implementation of _version.py."""
+
+import errno
+import os
+import re
+import subprocess
+import sys
+
+
+def get_keywords():
+ """Get the keywords needed to look up the version information."""
+ # these strings will be replaced by git during git-archive.
+ # setup.py/versioneer.py will grep for the variable names, so they must
+ # each be defined on a line of their own. _version.py will just call
+ # get_keywords().
+ git_refnames = "$Format:%d$"
+ git_full = "$Format:%H$"
+ git_date = "$Format:%ci$"
+ keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
+ return keywords
+
+
+class VersioneerConfig:
+ """Container for Versioneer configuration parameters."""
+
+
+def get_config():
+ """Create, populate and return the VersioneerConfig() object."""
+ # these strings are filled in when 'setup.py versioneer' creates
+ # _version.py
+ cfg = VersioneerConfig()
+ cfg.VCS = "git"
+ cfg.style = "pep440"
+ cfg.tag_prefix = ""
+ cfg.tag_regex = "*.*.*"
+ cfg.parentdir_prefix = "BuildStream-"
+ cfg.versionfile_source = "buildstream/_version.py"
+ cfg.verbose = False
+ return cfg
+
+
+class NotThisMethod(Exception):
+ """Exception raised if a method is not valid for the current scenario."""
+
+
+LONG_VERSION_PY = {}
+HANDLERS = {}
+
+
+def register_vcs_handler(vcs, method): # decorator
+ """Decorator to mark a method as the handler for a particular VCS."""
+ def decorate(f):
+ """Store f in HANDLERS[vcs][method]."""
+ if vcs not in HANDLERS:
+ HANDLERS[vcs] = {}
+ HANDLERS[vcs][method] = f
+ return f
+ return decorate
+
+
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
+ env=None):
+ """Call the given command(s)."""
+ assert isinstance(commands, list)
+ p = None
+ for c in commands:
+ try:
+ dispcmd = str([c] + args)
+ # remember shell=False, so use git.cmd on windows, not just git
+ p = subprocess.Popen([c] + args, cwd=cwd, env=env,
+ stdout=subprocess.PIPE,
+ stderr=(subprocess.PIPE if hide_stderr
+ else None))
+ break
+ except EnvironmentError:
+ e = sys.exc_info()[1]
+ if e.errno == errno.ENOENT:
+ continue
+ if verbose:
+ print("unable to run %s" % dispcmd)
+ print(e)
+ return None, None
+ else:
+ if verbose:
+ print("unable to find command, tried %s" % (commands,))
+ return None, None
+ stdout = p.communicate()[0].strip()
+ if sys.version_info[0] >= 3:
+ stdout = stdout.decode()
+ if p.returncode != 0:
+ if verbose:
+ print("unable to run %s (error)" % dispcmd)
+ print("stdout was %s" % stdout)
+ return None, p.returncode
+ return stdout, p.returncode
+
+
+def versions_from_parentdir(parentdir_prefix, root, verbose):
+ """Try to determine the version from the parent directory name.
+
+ Source tarballs conventionally unpack into a directory that includes both
+ the project name and a version string. We will also support searching up
+ two directory levels for an appropriately named parent directory
+ """
+ rootdirs = []
+
+ for i in range(3):
+ dirname = os.path.basename(root)
+ if dirname.startswith(parentdir_prefix):
+ return {"version": dirname[len(parentdir_prefix):],
+ "full-revisionid": None,
+ "dirty": False, "error": None, "date": None}
+ else:
+ rootdirs.append(root)
+ root = os.path.dirname(root) # up a level
+
+ if verbose:
+ print("Tried directories %s but none started with prefix %s" %
+ (str(rootdirs), parentdir_prefix))
+ raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
+
+
+@register_vcs_handler("git", "get_keywords")
+def git_get_keywords(versionfile_abs):
+ """Extract version information from the given file."""
+ # the code embedded in _version.py can just fetch the value of these
+ # keywords. When used from setup.py, we don't want to import _version.py,
+ # so we do it with a regexp instead. This function is not used from
+ # _version.py.
+ keywords = {}
+ try:
+ f = open(versionfile_abs, "r")
+ for line in f.readlines():
+ if line.strip().startswith("git_refnames ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["refnames"] = mo.group(1)
+ if line.strip().startswith("git_full ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["full"] = mo.group(1)
+ if line.strip().startswith("git_date ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["date"] = mo.group(1)
+ f.close()
+ except EnvironmentError:
+ pass
+ return keywords
+
+
+@register_vcs_handler("git", "keywords")
+def git_versions_from_keywords(keywords, tag_prefix, verbose):
+ """Get version information from git keywords."""
+ if not keywords:
+ raise NotThisMethod("no keywords at all, weird")
+ date = keywords.get("date")
+ if date is not None:
+ # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
+ # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
+ # -like" string, which we must then edit to make compliant), because
+ # it's been around since git-1.5.3, and it's too difficult to
+ # discover which version we're using, or to work around using an
+ # older one.
+ date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
+ refnames = keywords["refnames"].strip()
+ if refnames.startswith("$Format"):
+ if verbose:
+ print("keywords are unexpanded, not using")
+ raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
+ refs = set([r.strip() for r in refnames.strip("()").split(",")])
+ # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
+ # just "foo-1.0". If we see a "tag: " prefix, prefer those.
+ TAG = "tag: "
+ tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+ if not tags:
+ # Either we're using git < 1.8.3, or there really are no tags. We use
+ # a heuristic: assume all version tags have a digit. The old git %d
+ # expansion behaves like git log --decorate=short and strips out the
+ # refs/heads/ and refs/tags/ prefixes that would let us distinguish
+ # between branches and tags. By ignoring refnames without digits, we
+ # filter out many common branch names like "release" and
+ # "stabilization", as well as "HEAD" and "master".
+ tags = set([r for r in refs if re.search(r'\d', r)])
+ if verbose:
+ print("discarding '%s', no digits" % ",".join(refs - tags))
+ if verbose:
+ print("likely tags: %s" % ",".join(sorted(tags)))
+ for ref in sorted(tags):
+ # sorting will prefer e.g. "2.0" over "2.0rc1"
+ if ref.startswith(tag_prefix):
+ r = ref[len(tag_prefix):]
+ if verbose:
+ print("picking %s" % r)
+ return {"version": r,
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": None,
+ "date": date}
+ # no suitable tags, so version is "0+unknown", but full hex is still there
+ if verbose:
+ print("no suitable tags, using unknown + full revision id")
+ return {"version": "0+unknown",
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": "no suitable tags", "date": None}
+
+
+@register_vcs_handler("git", "pieces_from_vcs")
+def git_pieces_from_vcs(tag_prefix, tag_regex, root, verbose, run_command=run_command):
+ """Get version from 'git describe' in the root of the source tree.
+
+ This only gets called if the git-archive 'subst' keywords were *not*
+ expanded, and _version.py hasn't already been rewritten with a short
+ version string, meaning we're inside a checked out source tree.
+ """
+ GITS = ["git"]
+ if sys.platform == "win32":
+ GITS = ["git.cmd", "git.exe"]
+
+ out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
+ hide_stderr=True)
+ if rc != 0:
+ if verbose:
+ print("Directory %s not under git control" % root)
+ raise NotThisMethod("'git rev-parse --git-dir' returned error")
+
+ # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
+ # if there isn't one, this yields HEX[-dirty] (no NUM)
+ describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
+ "--always", "--long",
+ "--match", "%s%s" % (tag_prefix, tag_regex)],
+ cwd=root)
+ # --long was added in git-1.5.5
+ if describe_out is None:
+ raise NotThisMethod("'git describe' failed")
+ describe_out = describe_out.strip()
+ full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+ if full_out is None:
+ raise NotThisMethod("'git rev-parse' failed")
+ full_out = full_out.strip()
+
+ pieces = {}
+ pieces["long"] = full_out
+ pieces["short"] = full_out[:7] # maybe improved later
+ pieces["error"] = None
+
+ # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
+ # TAG might have hyphens.
+ git_describe = describe_out
+
+ # look for -dirty suffix
+ dirty = git_describe.endswith("-dirty")
+ pieces["dirty"] = dirty
+ if dirty:
+ git_describe = git_describe[:git_describe.rindex("-dirty")]
+
+ # now we have TAG-NUM-gHEX or HEX
+
+ if "-" in git_describe:
+ # TAG-NUM-gHEX
+ mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+ if not mo:
+ # unparseable. Maybe git-describe is misbehaving?
+ pieces["error"] = ("unable to parse git-describe output: '%s'"
+ % describe_out)
+ return pieces
+
+ # tag
+ full_tag = mo.group(1)
+ if not full_tag.startswith(tag_prefix):
+ if verbose:
+ fmt = "tag '%s' doesn't start with prefix '%s'"
+ print(fmt % (full_tag, tag_prefix))
+ pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
+ % (full_tag, tag_prefix))
+ return pieces
+ pieces["closest-tag"] = full_tag[len(tag_prefix):]
+
+ # distance: number of commits since tag
+ pieces["distance"] = int(mo.group(2))
+
+ # commit: short hex revision ID
+ pieces["short"] = mo.group(3)
+
+ else:
+ # HEX: no tags
+ pieces["closest-tag"] = None
+ count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
+ cwd=root)
+ pieces["distance"] = int(count_out) # total number of commits
+
+ # commit date: see ISO-8601 comment in git_versions_from_keywords()
+ date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
+ cwd=root)[0].strip()
+ pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
+
+ return pieces
+
+
+def plus_or_dot(pieces):
+ """Return a + if we don't already have one, else return a ."""
+ if "+" in pieces.get("closest-tag", ""):
+ return "."
+ return "+"
+
+
+def render_pep440(pieces):
+ """Build up version string, with post-release "local version identifier".
+
+ Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+ get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+
+ Exceptions:
+ 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += plus_or_dot(pieces)
+ rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ else:
+ # exception #1
+ rendered = "0+untagged.%d.g%s" % (pieces["distance"],
+ pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ return rendered
+
+
+def render_pep440_pre(pieces):
+ """TAG[.post.devDISTANCE] -- No -dirty.
+
+ Exceptions:
+ 1: no tags. 0.post.devDISTANCE
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += ".post.dev%d" % pieces["distance"]
+ else:
+ # exception #1
+ rendered = "0.post.dev%d" % pieces["distance"]
+ return rendered
+
+
+def render_pep440_post(pieces):
+ """TAG[.postDISTANCE[.dev0]+gHEX] .
+
+ The ".dev0" means dirty. Note that .dev0 sorts backwards
+ (a dirty tree will appear "older" than the corresponding clean one),
+ but you shouldn't be releasing software with -dirty anyways.
+
+ Exceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += plus_or_dot(pieces)
+ rendered += "g%s" % pieces["short"]
+ else:
+ # exception #1
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += "+g%s" % pieces["short"]
+ return rendered
+
+
+def render_pep440_old(pieces):
+ """TAG[.postDISTANCE[.dev0]] .
+
+ The ".dev0" means dirty.
+
+ Eexceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ else:
+ # exception #1
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ return rendered
+
+
+def render_git_describe(pieces):
+ """TAG[-DISTANCE-gHEX][-dirty].
+
+ Like 'git describe --tags --dirty --always'.
+
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render_git_describe_long(pieces):
+ """TAG-DISTANCE-gHEX[-dirty].
+
+ Like 'git describe --tags --dirty --always -long'.
+ The distance/hash is unconditional.
+
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render(pieces, style):
+ """Render the given version pieces into the requested style."""
+ if pieces["error"]:
+ return {"version": "unknown",
+ "full-revisionid": pieces.get("long"),
+ "dirty": None,
+ "error": pieces["error"],
+ "date": None}
+
+ if not style or style == "default":
+ style = "pep440" # the default
+
+ if style == "pep440":
+ rendered = render_pep440(pieces)
+ elif style == "pep440-pre":
+ rendered = render_pep440_pre(pieces)
+ elif style == "pep440-post":
+ rendered = render_pep440_post(pieces)
+ elif style == "pep440-old":
+ rendered = render_pep440_old(pieces)
+ elif style == "git-describe":
+ rendered = render_git_describe(pieces)
+ elif style == "git-describe-long":
+ rendered = render_git_describe_long(pieces)
+ else:
+ raise ValueError("unknown style '%s'" % style)
+
+ return {"version": rendered, "full-revisionid": pieces["long"],
+ "dirty": pieces["dirty"], "error": None,
+ "date": pieces.get("date")}
+
+
+def get_versions():
+ """Get version information or return default if unable to do so."""
+ # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
+ # __file__, we can work backwards from there to the root. Some
+ # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
+ # case we can only use expanded keywords.
+
+ cfg = get_config()
+ verbose = cfg.verbose
+
+ try:
+ return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
+ verbose)
+ except NotThisMethod:
+ pass
+
+ try:
+ root = os.path.realpath(__file__)
+ # versionfile_source is the relative path from the top of the source
+ # tree (where the .git directory might live) to this file. Invert
+ # this to find the root from __file__.
+ for i in cfg.versionfile_source.split('/'):
+ root = os.path.dirname(root)
+ except NameError:
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to find root of source tree",
+ "date": None}
+
+ try:
+ pieces = git_pieces_from_vcs(cfg.tag_prefix, cfg.tag_regex, root, verbose)
+ return render(pieces, cfg.style)
+ except NotThisMethod:
+ pass
+
+ try:
+ if cfg.parentdir_prefix:
+ return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
+ except NotThisMethod:
+ pass
+
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to compute version", "date": None}
diff --git a/src/buildstream/_versions.py b/src/buildstream/_versions.py
new file mode 100644
index 000000000..c439f59fb
--- /dev/null
+++ b/src/buildstream/_versions.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+
+# The base BuildStream format version
+#
+# This version is bumped whenever enhancements are made
+# to the `project.conf` format or the core element format.
+#
+BST_FORMAT_VERSION = 24
+
+
+# The base BuildStream artifact version
+#
+# The artifact version changes whenever the cache key
+# calculation algorithm changes in an incompatible way
+# or if buildstream was changed in a way which can cause
+# the same cache key to produce something that is no longer
+# the same.
+BST_CORE_ARTIFACT_VERSION = 8
diff --git a/src/buildstream/_workspaces.py b/src/buildstream/_workspaces.py
new file mode 100644
index 000000000..9fbfb7e63
--- /dev/null
+++ b/src/buildstream/_workspaces.py
@@ -0,0 +1,650 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+import os
+from . import utils
+from . import _yaml
+
+from ._exceptions import LoadError, LoadErrorReason
+
+
+BST_WORKSPACE_FORMAT_VERSION = 3
+BST_WORKSPACE_PROJECT_FORMAT_VERSION = 1
+WORKSPACE_PROJECT_FILE = ".bstproject.yaml"
+
+
+# WorkspaceProject()
+#
+# An object to contain various helper functions and data required for
+# referring from a workspace back to buildstream.
+#
+# Args:
+# directory (str): The directory that the workspace exists in.
+#
+class WorkspaceProject():
+ def __init__(self, directory):
+ self._projects = []
+ self._directory = directory
+
+ # get_default_project_path()
+ #
+ # Retrieves the default path to a project.
+ #
+ # Returns:
+ # (str): The path to a project
+ #
+ def get_default_project_path(self):
+ return self._projects[0]['project-path']
+
+ # get_default_element()
+ #
+ # Retrieves the name of the element that owns this workspace.
+ #
+ # Returns:
+ # (str): The name of an element
+ #
+ def get_default_element(self):
+ return self._projects[0]['element-name']
+
+ # to_dict()
+ #
+ # Turn the members data into a dict for serialization purposes
+ #
+ # Returns:
+ # (dict): A dict representation of the WorkspaceProject
+ #
+ def to_dict(self):
+ ret = {
+ 'projects': self._projects,
+ 'format-version': BST_WORKSPACE_PROJECT_FORMAT_VERSION,
+ }
+ return ret
+
+ # from_dict()
+ #
+ # Loads a new WorkspaceProject from a simple dictionary
+ #
+ # Args:
+ # directory (str): The directory that the workspace exists in
+ # dictionary (dict): The dict to generate a WorkspaceProject from
+ #
+ # Returns:
+ # (WorkspaceProject): A newly instantiated WorkspaceProject
+ #
+ @classmethod
+ def from_dict(cls, directory, dictionary):
+ # Only know how to handle one format-version at the moment.
+ format_version = int(dictionary['format-version'])
+ assert format_version == BST_WORKSPACE_PROJECT_FORMAT_VERSION, \
+ "Format version {} not found in {}".format(BST_WORKSPACE_PROJECT_FORMAT_VERSION, dictionary)
+
+ workspace_project = cls(directory)
+ for item in dictionary['projects']:
+ workspace_project.add_project(item['project-path'], item['element-name'])
+
+ return workspace_project
+
+ # load()
+ #
+ # Loads the WorkspaceProject for a given directory.
+ #
+ # Args:
+ # directory (str): The directory
+ # Returns:
+ # (WorkspaceProject): The created WorkspaceProject, if in a workspace, or
+ # (NoneType): None, if the directory is not inside a workspace.
+ #
+ @classmethod
+ def load(cls, directory):
+ workspace_file = os.path.join(directory, WORKSPACE_PROJECT_FILE)
+ if os.path.exists(workspace_file):
+ data_dict = _yaml.node_sanitize(_yaml.roundtrip_load(workspace_file), dict_type=dict)
+ return cls.from_dict(directory, data_dict)
+ else:
+ return None
+
+ # write()
+ #
+ # Writes the WorkspaceProject to disk
+ #
+ def write(self):
+ os.makedirs(self._directory, exist_ok=True)
+ _yaml.dump(self.to_dict(), self.get_filename())
+
+ # get_filename()
+ #
+ # Returns the full path to the workspace local project file
+ #
+ def get_filename(self):
+ return os.path.join(self._directory, WORKSPACE_PROJECT_FILE)
+
+ # add_project()
+ #
+ # Adds an entry containing the project's path and element's name.
+ #
+ # Args:
+ # project_path (str): The path to the project that opened the workspace.
+ # element_name (str): The name of the element that the workspace belongs to.
+ #
+ def add_project(self, project_path, element_name):
+ assert (project_path and element_name)
+ self._projects.append({'project-path': project_path, 'element-name': element_name})
+
+
+# WorkspaceProjectCache()
+#
+# A class to manage workspace project data for multiple workspaces.
+#
+class WorkspaceProjectCache():
+ def __init__(self):
+ self._projects = {} # Mapping of a workspace directory to its WorkspaceProject
+
+ # get()
+ #
+ # Returns a WorkspaceProject for a given directory, retrieving from the cache if
+ # present.
+ #
+ # Args:
+ # directory (str): The directory to search for a WorkspaceProject.
+ #
+ # Returns:
+ # (WorkspaceProject): The WorkspaceProject that was found for that directory.
+ # or (NoneType): None, if no WorkspaceProject can be found.
+ #
+ def get(self, directory):
+ try:
+ workspace_project = self._projects[directory]
+ except KeyError:
+ workspace_project = WorkspaceProject.load(directory)
+ if workspace_project:
+ self._projects[directory] = workspace_project
+
+ return workspace_project
+
+ # add()
+ #
+ # Adds the project path and element name to the WorkspaceProject that exists
+ # for that directory
+ #
+ # Args:
+ # directory (str): The directory to search for a WorkspaceProject.
+ # project_path (str): The path to the project that refers to this workspace
+ # element_name (str): The element in the project that was refers to this workspace
+ #
+ # Returns:
+ # (WorkspaceProject): The WorkspaceProject that was found for that directory.
+ #
+ def add(self, directory, project_path, element_name):
+ workspace_project = self.get(directory)
+ if not workspace_project:
+ workspace_project = WorkspaceProject(directory)
+ self._projects[directory] = workspace_project
+
+ workspace_project.add_project(project_path, element_name)
+ return workspace_project
+
+ # remove()
+ #
+ # Removes the project path and element name from the WorkspaceProject that exists
+ # for that directory.
+ #
+ # NOTE: This currently just deletes the file, but with support for multiple
+ # projects opening the same workspace, this will involve decreasing the count
+ # and deleting the file if there are no more projects.
+ #
+ # Args:
+ # directory (str): The directory to search for a WorkspaceProject.
+ #
+ def remove(self, directory):
+ workspace_project = self.get(directory)
+ if not workspace_project:
+ raise LoadError(LoadErrorReason.MISSING_FILE,
+ "Failed to find a {} file to remove".format(WORKSPACE_PROJECT_FILE))
+ path = workspace_project.get_filename()
+ try:
+ os.unlink(path)
+ except FileNotFoundError:
+ pass
+
+
+# Workspace()
+#
+# An object to contain various helper functions and data required for
+# workspaces.
+#
+# last_successful, path and running_files are intended to be public
+# properties, but may be best accessed using this classes' helper
+# methods.
+#
+# Args:
+# toplevel_project (Project): Top project. Will be used for resolving relative workspace paths.
+# path (str): The path that should host this workspace
+# last_successful (str): The key of the last successful build of this workspace
+# running_files (dict): A dict mapping dependency elements to files
+# changed between failed builds. Should be
+# made obsolete with failed build artifacts.
+#
+class Workspace():
+ def __init__(self, toplevel_project, *, last_successful=None, path=None, prepared=False, running_files=None):
+ self.prepared = prepared
+ self.last_successful = last_successful
+ self._path = path
+ self.running_files = running_files if running_files is not None else {}
+
+ self._toplevel_project = toplevel_project
+ self._key = None
+
+ # to_dict()
+ #
+ # Convert a list of members which get serialized to a dict for serialization purposes
+ #
+ # Returns:
+ # (dict) A dict representation of the workspace
+ #
+ def to_dict(self):
+ ret = {
+ 'prepared': self.prepared,
+ 'path': self._path,
+ 'running_files': self.running_files
+ }
+ if self.last_successful is not None:
+ ret["last_successful"] = self.last_successful
+ return ret
+
+ # from_dict():
+ #
+ # Loads a new workspace from a simple dictionary, the dictionary
+ # is expected to be generated from Workspace.to_dict(), or manually
+ # when loading from a YAML file.
+ #
+ # Args:
+ # toplevel_project (Project): Top project. Will be used for resolving relative workspace paths.
+ # dictionary: A simple dictionary object
+ #
+ # Returns:
+ # (Workspace): A newly instantiated Workspace
+ #
+ @classmethod
+ def from_dict(cls, toplevel_project, dictionary):
+
+ # Just pass the dictionary as kwargs
+ return cls(toplevel_project, **dictionary)
+
+ # differs()
+ #
+ # Checks if two workspaces are different in any way.
+ #
+ # Args:
+ # other (Workspace): Another workspace instance
+ #
+ # Returns:
+ # True if the workspace differs from 'other', otherwise False
+ #
+ def differs(self, other):
+ return self.to_dict() != other.to_dict()
+
+ # invalidate_key()
+ #
+ # Invalidate the workspace key, forcing a recalculation next time
+ # it is accessed.
+ #
+ def invalidate_key(self):
+ self._key = None
+
+ # stage()
+ #
+ # Stage the workspace to the given directory.
+ #
+ # Args:
+ # directory (str) - The directory into which to stage this workspace
+ #
+ def stage(self, directory):
+ fullpath = self.get_absolute_path()
+ if os.path.isdir(fullpath):
+ utils.copy_files(fullpath, directory)
+ else:
+ destfile = os.path.join(directory, os.path.basename(self.get_absolute_path()))
+ utils.safe_copy(fullpath, destfile)
+
+ # add_running_files()
+ #
+ # Append a list of files to the running_files for the given
+ # dependency. Duplicate files will be ignored.
+ #
+ # Args:
+ # dep_name (str) - The dependency name whose files to append to
+ # files (str) - A list of files to append
+ #
+ def add_running_files(self, dep_name, files):
+ if dep_name in self.running_files:
+ # ruamel.py cannot serialize sets in python3.4
+ to_add = set(files) - set(self.running_files[dep_name])
+ self.running_files[dep_name].extend(to_add)
+ else:
+ self.running_files[dep_name] = list(files)
+
+ # clear_running_files()
+ #
+ # Clear all running files associated with this workspace.
+ #
+ def clear_running_files(self):
+ self.running_files = {}
+
+ # get_key()
+ #
+ # Get a unique key for this workspace.
+ #
+ # Args:
+ # recalculate (bool) - Whether to recalculate the key
+ #
+ # Returns:
+ # (str) A unique key for this workspace
+ #
+ def get_key(self, recalculate=False):
+ def unique_key(filename):
+ try:
+ stat = os.lstat(filename)
+ except OSError as e:
+ raise LoadError(LoadErrorReason.MISSING_FILE,
+ "Failed to stat file in workspace: {}".format(e))
+
+ # Use the mtime of any file with sub second precision
+ return stat.st_mtime_ns
+
+ if recalculate or self._key is None:
+ fullpath = self.get_absolute_path()
+
+ excluded_files = (WORKSPACE_PROJECT_FILE,)
+
+ # Get a list of tuples of the the project relative paths and fullpaths
+ if os.path.isdir(fullpath):
+ filelist = utils.list_relative_paths(fullpath)
+ filelist = [
+ (relpath, os.path.join(fullpath, relpath)) for relpath in filelist
+ if relpath not in excluded_files
+ ]
+ else:
+ filelist = [(self.get_absolute_path(), fullpath)]
+
+ self._key = [(relpath, unique_key(fullpath)) for relpath, fullpath in filelist]
+
+ return self._key
+
+ # get_absolute_path():
+ #
+ # Returns: The absolute path of the element's workspace.
+ #
+ def get_absolute_path(self):
+ return os.path.join(self._toplevel_project.directory, self._path)
+
+
+# Workspaces()
+#
+# A class to manage Workspaces for multiple elements.
+#
+# Args:
+# toplevel_project (Project): Top project used to resolve paths.
+# workspace_project_cache (WorkspaceProjectCache): The cache of WorkspaceProjects
+#
+class Workspaces():
+ def __init__(self, toplevel_project, workspace_project_cache):
+ self._toplevel_project = toplevel_project
+ self._bst_directory = os.path.join(toplevel_project.directory, ".bst")
+ self._workspaces = self._load_config()
+ self._workspace_project_cache = workspace_project_cache
+
+ # list()
+ #
+ # Generator function to enumerate workspaces.
+ #
+ # Yields:
+ # A tuple in the following format: (str, Workspace), where the
+ # first element is the name of the workspaced element.
+ def list(self):
+ for element in self._workspaces.keys():
+ yield (element, self._workspaces[element])
+
+ # create_workspace()
+ #
+ # Create a workspace in the given path for the given element, and potentially
+ # checks-out the target into it.
+ #
+ # Args:
+ # target (Element) - The element to create a workspace for
+ # path (str) - The path in which the workspace should be kept
+ # checkout (bool): Whether to check-out the element's sources into the directory
+ #
+ def create_workspace(self, target, path, *, checkout):
+ element_name = target._get_full_name()
+ project_dir = self._toplevel_project.directory
+ if path.startswith(project_dir):
+ workspace_path = os.path.relpath(path, project_dir)
+ else:
+ workspace_path = path
+
+ self._workspaces[element_name] = Workspace(self._toplevel_project, path=workspace_path)
+
+ if checkout:
+ with target.timed_activity("Staging sources to {}".format(path)):
+ target._open_workspace()
+
+ workspace_project = self._workspace_project_cache.add(path, project_dir, element_name)
+ project_file_path = workspace_project.get_filename()
+
+ if os.path.exists(project_file_path):
+ target.warn("{} was staged from this element's sources".format(WORKSPACE_PROJECT_FILE))
+ workspace_project.write()
+
+ self.save_config()
+
+ # get_workspace()
+ #
+ # Get the path of the workspace source associated with the given
+ # element's source at the given index
+ #
+ # Args:
+ # element_name (str) - The element name whose workspace to return
+ #
+ # Returns:
+ # (None|Workspace)
+ #
+ def get_workspace(self, element_name):
+ if element_name not in self._workspaces:
+ return None
+ return self._workspaces[element_name]
+
+ # update_workspace()
+ #
+ # Update the datamodel with a new Workspace instance
+ #
+ # Args:
+ # element_name (str): The name of the element to update a workspace for
+ # workspace_dict (Workspace): A serialized workspace dictionary
+ #
+ # Returns:
+ # (bool): Whether the workspace has changed as a result
+ #
+ def update_workspace(self, element_name, workspace_dict):
+ assert element_name in self._workspaces
+
+ workspace = Workspace.from_dict(self._toplevel_project, workspace_dict)
+ if self._workspaces[element_name].differs(workspace):
+ self._workspaces[element_name] = workspace
+ return True
+
+ return False
+
+ # delete_workspace()
+ #
+ # Remove the workspace from the workspace element. Note that this
+ # does *not* remove the workspace from the stored yaml
+ # configuration, call save_config() afterwards.
+ #
+ # Args:
+ # element_name (str) - The element name whose workspace to delete
+ #
+ def delete_workspace(self, element_name):
+ workspace = self.get_workspace(element_name)
+ del self._workspaces[element_name]
+
+ # Remove from the cache if it exists
+ try:
+ self._workspace_project_cache.remove(workspace.get_absolute_path())
+ except LoadError as e:
+ # We might be closing a workspace with a deleted directory
+ if e.reason == LoadErrorReason.MISSING_FILE:
+ pass
+ else:
+ raise
+
+ # save_config()
+ #
+ # Dump the current workspace element to the project configuration
+ # file. This makes any changes performed with delete_workspace or
+ # create_workspace permanent
+ #
+ def save_config(self):
+ assert utils._is_main_process()
+
+ config = {
+ 'format-version': BST_WORKSPACE_FORMAT_VERSION,
+ 'workspaces': {
+ element: workspace.to_dict()
+ for element, workspace in self._workspaces.items()
+ }
+ }
+ os.makedirs(self._bst_directory, exist_ok=True)
+ _yaml.dump(config, self._get_filename())
+
+ # _load_config()
+ #
+ # Loads and parses the workspace configuration
+ #
+ # Returns:
+ # (dict) The extracted workspaces
+ #
+ # Raises: LoadError if there was a problem with the workspace config
+ #
+ def _load_config(self):
+ workspace_file = self._get_filename()
+ try:
+ node = _yaml.load(workspace_file)
+ except LoadError as e:
+ if e.reason == LoadErrorReason.MISSING_FILE:
+ # Return an empty dict if there was no workspace file
+ return {}
+
+ raise
+
+ return self._parse_workspace_config(node)
+
+ # _parse_workspace_config_format()
+ #
+ # If workspace config is in old-style format, i.e. it is using
+ # source-specific workspaces, try to convert it to element-specific
+ # workspaces.
+ #
+ # Args:
+ # workspaces (dict): current workspace config, usually output of _load_workspace_config()
+ #
+ # Returns:
+ # (dict) The extracted workspaces
+ #
+ # Raises: LoadError if there was a problem with the workspace config
+ #
+ def _parse_workspace_config(self, workspaces):
+ try:
+ version = _yaml.node_get(workspaces, int, 'format-version', default_value=0)
+ except ValueError:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "Format version is not an integer in workspace configuration")
+
+ if version == 0:
+ # Pre-versioning format can be of two forms
+ for element, config in _yaml.node_items(workspaces):
+ if _yaml.is_node(config):
+ # Get a dict
+ config = _yaml.node_sanitize(config, dict_type=dict)
+
+ if isinstance(config, str):
+ pass
+
+ elif isinstance(config, dict):
+ sources = list(config.items())
+ if len(sources) > 1:
+ detail = "There are multiple workspaces open for '{}'.\n" + \
+ "This is not supported anymore.\n" + \
+ "Please remove this element from '{}'."
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ detail.format(element, self._get_filename()))
+
+ _yaml.node_set(workspaces, element, sources[0][1])
+
+ else:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "Workspace config is in unexpected format.")
+
+ res = {
+ element: Workspace(self._toplevel_project, path=config)
+ for element, config in _yaml.node_items(workspaces)
+ }
+
+ elif 1 <= version <= BST_WORKSPACE_FORMAT_VERSION:
+ workspaces = _yaml.node_get(workspaces, dict, "workspaces",
+ default_value=_yaml.new_empty_node())
+ res = {element: self._load_workspace(node)
+ for element, node in _yaml.node_items(workspaces)}
+
+ else:
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "Workspace configuration format version {} not supported."
+ "Your version of buildstream may be too old. Max supported version: {}"
+ .format(version, BST_WORKSPACE_FORMAT_VERSION))
+
+ return res
+
+ # _load_workspace():
+ #
+ # Loads a new workspace from a YAML node
+ #
+ # Args:
+ # node: A YAML dict
+ #
+ # Returns:
+ # (Workspace): A newly instantiated Workspace
+ #
+ def _load_workspace(self, node):
+ dictionary = {
+ 'prepared': _yaml.node_get(node, bool, 'prepared', default_value=False),
+ 'path': _yaml.node_get(node, str, 'path'),
+ 'last_successful': _yaml.node_get(node, str, 'last_successful', default_value=None),
+ 'running_files': _yaml.node_sanitize(
+ _yaml.node_get(node, dict, 'running_files', default_value=None),
+ dict_type=dict),
+ }
+ return Workspace.from_dict(self._toplevel_project, dictionary)
+
+ # _get_filename():
+ #
+ # Get the workspaces.yml file path.
+ #
+ # Returns:
+ # (str): The path to workspaces.yml file.
+ def _get_filename(self):
+ return os.path.join(self._bst_directory, "workspaces.yml")
diff --git a/src/buildstream/_yaml.py b/src/buildstream/_yaml.py
new file mode 100644
index 000000000..cdab4269e
--- /dev/null
+++ b/src/buildstream/_yaml.py
@@ -0,0 +1,1432 @@
+#
+# Copyright (C) 2018 Codethink Limited
+# Copyright (C) 2019 Bloomberg LLP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Daniel Silverstone <daniel.silverstone@codethink.co.uk>
+# James Ennis <james.ennis@codethink.co.uk>
+
+import sys
+import string
+from contextlib import ExitStack
+from collections import OrderedDict, namedtuple
+from collections.abc import Mapping, Sequence
+from copy import deepcopy
+from itertools import count
+
+from ruamel import yaml
+from ._exceptions import LoadError, LoadErrorReason
+
+
+# Without this, pylint complains about all the `type(foo) is blah` checks
+# because it feels isinstance() is more idiomatic. Sadly, it is much slower to
+# do `isinstance(foo, blah)` for reasons I am unable to fathom. As such, we
+# blanket disable the check for this module.
+#
+# pylint: disable=unidiomatic-typecheck
+
+
+# Node()
+#
+# Container for YAML loaded data and its provenance
+#
+# All nodes returned (and all internal lists/strings) have this type (rather
+# than a plain tuple, to distinguish them in things like node_sanitize)
+#
+# Members:
+# value (str/list/dict): The loaded value.
+# file_index (int): Index within _FILE_LIST (a list of loaded file paths).
+# Negative indices indicate synthetic nodes so that
+# they can be referenced.
+# line (int): The line number within the file where the value appears.
+# col (int): The column number within the file where the value appears.
+#
+# For efficiency, each field should be accessed by its integer index:
+# value = Node[0]
+# file_index = Node[1]
+# line = Node[2]
+# column = Node[3]
+#
+class Node(namedtuple('Node', ['value', 'file_index', 'line', 'column'])):
+ def __contains__(self, what):
+ # Delegate to the inner value, though this will likely not work
+ # very well if the node is a list or string, it's unlikely that
+ # code which has access to such nodes would do this.
+ return what in self[0]
+
+
+# File name handling
+_FILE_LIST = []
+
+
+# Purely synthetic node will have None for the file number, have line number
+# zero, and a negative column number which comes from inverting the next value
+# out of this counter. Synthetic nodes created with a reference node will
+# have a file number from the reference node, some unknown line number, and
+# a negative column number from this counter.
+_SYNTHETIC_COUNTER = count(start=-1, step=-1)
+
+
+# Returned from node_get_provenance
+class ProvenanceInformation:
+
+ __slots__ = (
+ "filename",
+ "shortname",
+ "displayname",
+ "line",
+ "col",
+ "toplevel",
+ "node",
+ "project",
+ "is_synthetic",
+ )
+
+ def __init__(self, nodeish):
+ self.node = nodeish
+ if (nodeish is None) or (nodeish[1] is None):
+ self.filename = ""
+ self.shortname = ""
+ self.displayname = ""
+ self.line = 1
+ self.col = 0
+ self.toplevel = None
+ self.project = None
+ else:
+ fileinfo = _FILE_LIST[nodeish[1]]
+ self.filename = fileinfo[0]
+ self.shortname = fileinfo[1]
+ self.displayname = fileinfo[2]
+ # We add 1 here to convert from computerish to humanish
+ self.line = nodeish[2] + 1
+ self.col = nodeish[3]
+ self.toplevel = fileinfo[3]
+ self.project = fileinfo[4]
+ self.is_synthetic = (self.filename == '') or (self.col < 0)
+
+ # Convert a Provenance to a string for error reporting
+ def __str__(self):
+ if self.is_synthetic:
+ return "{} [synthetic node]".format(self.displayname)
+ else:
+ return "{} [line {:d} column {:d}]".format(self.displayname, self.line, self.col)
+
+
+# These exceptions are intended to be caught entirely within
+# the BuildStream framework, hence they do not reside in the
+# public exceptions.py
+class CompositeError(Exception):
+ def __init__(self, path, message):
+ super(CompositeError, self).__init__(message)
+ self.path = path
+ self.message = message
+
+
+class YAMLLoadError(Exception):
+ pass
+
+
+# Representer for YAML events comprising input to the BuildStream format.
+#
+# All streams MUST represent a single document which must be a Mapping.
+# Anything else is considered an error.
+#
+# Mappings must only have string keys, values are always represented as
+# strings if they are scalar, or else as simple dictionaries and lists.
+#
+class Representer:
+ __slots__ = (
+ "_file_index",
+ "state",
+ "output",
+ "keys",
+ )
+
+ # Initialise a new representer
+ #
+ # The file index is used to store into the Node instances so that the
+ # provenance of the YAML can be tracked.
+ #
+ # Args:
+ # file_index (int): The index of this YAML file
+ def __init__(self, file_index):
+ self._file_index = file_index
+ self.state = "init"
+ self.output = []
+ self.keys = []
+
+ # Handle a YAML parse event
+ #
+ # Args:
+ # event (YAML Event): The event to be handled
+ #
+ # Raises:
+ # YAMLLoadError: Something went wrong.
+ def handle_event(self, event):
+ if getattr(event, "anchor", None) is not None:
+ raise YAMLLoadError("Anchors are disallowed in BuildStream at line {} column {}"
+ .format(event.start_mark.line, event.start_mark.column))
+
+ if event.__class__.__name__ == "ScalarEvent":
+ if event.tag is not None:
+ if not event.tag.startswith("tag:yaml.org,2002:"):
+ raise YAMLLoadError(
+ "Non-core tag expressed in input. " +
+ "This is disallowed in BuildStream. At line {} column {}"
+ .format(event.start_mark.line, event.start_mark.column))
+
+ handler = "_handle_{}_{}".format(self.state, event.__class__.__name__)
+ handler = getattr(self, handler, None)
+ if handler is None:
+ raise YAMLLoadError(
+ "Invalid input detected. No handler for {} in state {} at line {} column {}"
+ .format(event, self.state, event.start_mark.line, event.start_mark.column))
+
+ self.state = handler(event) # pylint: disable=not-callable
+
+ # Get the output of the YAML parse
+ #
+ # Returns:
+ # (Node or None): Return the Node instance of the top level mapping or
+ # None if there wasn't one.
+ def get_output(self):
+ try:
+ return self.output[0]
+ except IndexError:
+ return None
+
+ def _handle_init_StreamStartEvent(self, ev):
+ return "stream"
+
+ def _handle_stream_DocumentStartEvent(self, ev):
+ return "doc"
+
+ def _handle_doc_MappingStartEvent(self, ev):
+ newmap = Node({}, self._file_index, ev.start_mark.line, ev.start_mark.column)
+ self.output.append(newmap)
+ return "wait_key"
+
+ def _handle_wait_key_ScalarEvent(self, ev):
+ self.keys.append(ev.value)
+ return "wait_value"
+
+ def _handle_wait_value_ScalarEvent(self, ev):
+ key = self.keys.pop()
+ self.output[-1][0][key] = \
+ Node(ev.value, self._file_index, ev.start_mark.line, ev.start_mark.column)
+ return "wait_key"
+
+ def _handle_wait_value_MappingStartEvent(self, ev):
+ new_state = self._handle_doc_MappingStartEvent(ev)
+ key = self.keys.pop()
+ self.output[-2][0][key] = self.output[-1]
+ return new_state
+
+ def _handle_wait_key_MappingEndEvent(self, ev):
+ # We've finished a mapping, so pop it off the output stack
+ # unless it's the last one in which case we leave it
+ if len(self.output) > 1:
+ self.output.pop()
+ if type(self.output[-1][0]) is list:
+ return "wait_list_item"
+ else:
+ return "wait_key"
+ else:
+ return "doc"
+
+ def _handle_wait_value_SequenceStartEvent(self, ev):
+ self.output.append(Node([], self._file_index, ev.start_mark.line, ev.start_mark.column))
+ self.output[-2][0][self.keys[-1]] = self.output[-1]
+ return "wait_list_item"
+
+ def _handle_wait_list_item_SequenceStartEvent(self, ev):
+ self.keys.append(len(self.output[-1][0]))
+ self.output.append(Node([], self._file_index, ev.start_mark.line, ev.start_mark.column))
+ self.output[-2][0].append(self.output[-1])
+ return "wait_list_item"
+
+ def _handle_wait_list_item_SequenceEndEvent(self, ev):
+ # When ending a sequence, we need to pop a key because we retain the
+ # key until the end so that if we need to mutate the underlying entry
+ # we can.
+ key = self.keys.pop()
+ self.output.pop()
+ if type(key) is int:
+ return "wait_list_item"
+ else:
+ return "wait_key"
+
+ def _handle_wait_list_item_ScalarEvent(self, ev):
+ self.output[-1][0].append(
+ Node(ev.value, self._file_index, ev.start_mark.line, ev.start_mark.column))
+ return "wait_list_item"
+
+ def _handle_wait_list_item_MappingStartEvent(self, ev):
+ new_state = self._handle_doc_MappingStartEvent(ev)
+ self.output[-2][0].append(self.output[-1])
+ return new_state
+
+ def _handle_doc_DocumentEndEvent(self, ev):
+ if len(self.output) != 1:
+ raise YAMLLoadError("Zero, or more than one document found in YAML stream")
+ return "stream"
+
+ def _handle_stream_StreamEndEvent(self, ev):
+ return "init"
+
+
+# Loads a dictionary from some YAML
+#
+# Args:
+# filename (str): The YAML file to load
+# shortname (str): The filename in shorthand for error reporting (or None)
+# copy_tree (bool): Whether to make a copy, preserving the original toplevels
+# for later serialization
+# project (Project): The (optional) project to associate the parsed YAML with
+#
+# Returns (dict): A loaded copy of the YAML file with provenance information
+#
+# Raises: LoadError
+#
+def load(filename, shortname=None, copy_tree=False, *, project=None):
+ if not shortname:
+ shortname = filename
+
+ if (project is not None) and (project.junction is not None):
+ displayname = "{}:{}".format(project.junction.name, shortname)
+ else:
+ displayname = shortname
+
+ file_number = len(_FILE_LIST)
+ _FILE_LIST.append((filename, shortname, displayname, None, project))
+
+ try:
+ with open(filename) as f:
+ contents = f.read()
+
+ data = load_data(contents,
+ file_index=file_number,
+ file_name=filename,
+ copy_tree=copy_tree)
+
+ return data
+ except FileNotFoundError as e:
+ raise LoadError(LoadErrorReason.MISSING_FILE,
+ "Could not find file at {}".format(filename)) from e
+ except IsADirectoryError as e:
+ raise LoadError(LoadErrorReason.LOADING_DIRECTORY,
+ "{} is a directory. bst command expects a .bst file."
+ .format(filename)) from e
+ except LoadError as e:
+ raise LoadError(e.reason, "{}: {}".format(displayname, e)) from e
+
+
+# Like load(), but doesnt require the data to be in a file
+#
+def load_data(data, file_index=None, file_name=None, copy_tree=False):
+
+ try:
+ rep = Representer(file_index)
+ for event in yaml.parse(data, Loader=yaml.CBaseLoader):
+ rep.handle_event(event)
+ contents = rep.get_output()
+ except YAMLLoadError as e:
+ raise LoadError(LoadErrorReason.INVALID_YAML,
+ "Malformed YAML:\n\n{}\n\n".format(e)) from e
+ except Exception as e:
+ raise LoadError(LoadErrorReason.INVALID_YAML,
+ "Severely malformed YAML:\n\n{}\n\n".format(e)) from e
+
+ if not isinstance(contents, tuple) or not isinstance(contents[0], dict):
+ # Special case allowance for None, when the loaded file has only comments in it.
+ if contents is None:
+ contents = Node({}, file_index, 0, 0)
+ else:
+ raise LoadError(LoadErrorReason.INVALID_YAML,
+ "YAML file has content of type '{}' instead of expected type 'dict': {}"
+ .format(type(contents[0]).__name__, file_name))
+
+ # Store this away because we'll use it later for "top level" provenance
+ if file_index is not None:
+ _FILE_LIST[file_index] = (
+ _FILE_LIST[file_index][0], # Filename
+ _FILE_LIST[file_index][1], # Shortname
+ _FILE_LIST[file_index][2], # Displayname
+ contents,
+ _FILE_LIST[file_index][4], # Project
+ )
+
+ if copy_tree:
+ contents = node_copy(contents)
+ return contents
+
+
+# dump()
+#
+# Write a YAML node structure out to disk.
+#
+# This will always call `node_sanitize` on its input, so if you wanted
+# to output something close to what you read in, consider using the
+# `roundtrip_load` and `roundtrip_dump` function pair instead.
+#
+# Args:
+# contents (any): Content to write out
+# filename (str): The (optional) file name to write out to
+def dump(contents, filename=None):
+ roundtrip_dump(node_sanitize(contents), file=filename)
+
+
+# node_get_provenance()
+#
+# Gets the provenance for a node
+#
+# Args:
+# node (dict): a dictionary
+# key (str): key in the dictionary
+# indices (list of indexes): Index path, in the case of list values
+#
+# Returns: The Provenance of the dict, member or list element
+#
+def node_get_provenance(node, key=None, indices=None):
+ assert is_node(node)
+
+ if key is None:
+ # Retrieving the provenance for this node directly
+ return ProvenanceInformation(node)
+
+ if key and not indices:
+ return ProvenanceInformation(node[0].get(key))
+
+ nodeish = node[0].get(key)
+ for idx in indices:
+ nodeish = nodeish[0][idx]
+
+ return ProvenanceInformation(nodeish)
+
+
+# A sentinel to be used as a default argument for functions that need
+# to distinguish between a kwarg set to None and an unset kwarg.
+_sentinel = object()
+
+
+# node_get()
+#
+# Fetches a value from a dictionary node and checks it for
+# an expected value. Use default_value when parsing a value
+# which is only optionally supplied.
+#
+# Args:
+# node (dict): The dictionary node
+# expected_type (type): The expected type for the value being searched
+# key (str): The key to get a value for in node
+# indices (list of ints): Optionally decend into lists of lists
+# default_value: Optionally return this value if the key is not found
+# allow_none: (bool): Allow None to be a valid value
+#
+# Returns:
+# The value if found in node, otherwise default_value is returned
+#
+# Raises:
+# LoadError, when the value found is not of the expected type
+#
+# Note:
+# Returned strings are stripped of leading and trailing whitespace
+#
+def node_get(node, expected_type, key, indices=None, *, default_value=_sentinel, allow_none=False):
+ assert type(node) is Node
+
+ if indices is None:
+ if default_value is _sentinel:
+ value = node[0].get(key, Node(default_value, None, 0, 0))
+ else:
+ value = node[0].get(key, Node(default_value, None, 0, next(_SYNTHETIC_COUNTER)))
+
+ if value[0] is _sentinel:
+ provenance = node_get_provenance(node)
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Dictionary did not contain expected key '{}'".format(provenance, key))
+ else:
+ # Implied type check of the element itself
+ # No need to synthesise useful node content as we destructure it immediately
+ value = Node(node_get(node, list, key), None, 0, 0)
+ for index in indices:
+ value = value[0][index]
+ if type(value) is not Node:
+ value = (value,)
+
+ # Optionally allow None as a valid value for any type
+ if value[0] is None and (allow_none or default_value is None):
+ return None
+
+ if (expected_type is not None) and (not isinstance(value[0], expected_type)):
+ # Attempt basic conversions if possible, typically we want to
+ # be able to specify numeric values and convert them to strings,
+ # but we dont want to try converting dicts/lists
+ try:
+ if (expected_type == bool and isinstance(value[0], str)):
+ # Dont coerce booleans to string, this makes "False" strings evaluate to True
+ # We don't structure into full nodes since there's no need.
+ if value[0] in ('True', 'true'):
+ value = (True, None, 0, 0)
+ elif value[0] in ('False', 'false'):
+ value = (False, None, 0, 0)
+ else:
+ raise ValueError()
+ elif not (expected_type == list or
+ expected_type == dict or
+ isinstance(value[0], (list, dict))):
+ value = (expected_type(value[0]), None, 0, 0)
+ else:
+ raise ValueError()
+ except (ValueError, TypeError):
+ provenance = node_get_provenance(node, key=key, indices=indices)
+ if indices:
+ path = [key]
+ path.extend("[{:d}]".format(i) for i in indices)
+ path = "".join(path)
+ else:
+ path = key
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Value of '{}' is not of the expected type '{}'"
+ .format(provenance, path, expected_type.__name__))
+
+ # Now collapse lists, and scalars, to their value, leaving nodes as-is
+ if type(value[0]) is not dict:
+ value = value[0]
+
+ # Trim it at the bud, let all loaded strings from yaml be stripped of whitespace
+ if type(value) is str:
+ value = value.strip()
+
+ elif type(value) is list:
+ # Now we create a fresh list which unwraps the str and list types
+ # semi-recursively.
+ value = __trim_list_provenance(value)
+
+ return value
+
+
+def __trim_list_provenance(value):
+ ret = []
+ for entry in value:
+ if type(entry) is not Node:
+ entry = (entry, None, 0, 0)
+ if type(entry[0]) is list:
+ ret.append(__trim_list_provenance(entry[0]))
+ elif type(entry[0]) is dict:
+ ret.append(entry)
+ else:
+ ret.append(entry[0])
+ return ret
+
+
+# node_set()
+#
+# Set an item within the node. If using `indices` be aware that the entry must
+# already exist, or else a KeyError will be raised. Use `node_extend_list` to
+# create entries before using `node_set`
+#
+# Args:
+# node (tuple): The node
+# key (str): The key name
+# value: The value
+# indices: Any indices to index into the list referenced by key, like in
+# `node_get` (must be a list of integers)
+#
+def node_set(node, key, value, indices=None):
+ if indices:
+ node = node[0][key]
+ key = indices.pop()
+ for idx in indices:
+ node = node[0][idx]
+ if type(value) is Node:
+ node[0][key] = value
+ else:
+ try:
+ # Need to do this just in case we're modifying a list
+ old_value = node[0][key]
+ except KeyError:
+ old_value = None
+ if old_value is None:
+ node[0][key] = Node(value, node[1], node[2], next(_SYNTHETIC_COUNTER))
+ else:
+ node[0][key] = Node(value, old_value[1], old_value[2], old_value[3])
+
+
+# node_extend_list()
+#
+# Extend a list inside a node to a given length, using the passed
+# default value to fill it out.
+#
+# Valid default values are:
+# Any string
+# An empty dict
+# An empty list
+#
+# Args:
+# node (node): The node
+# key (str): The list name in the node
+# length (int): The length to extend the list to
+# default (any): The default value to extend with.
+def node_extend_list(node, key, length, default):
+ assert type(default) is str or default in ([], {})
+
+ list_node = node[0].get(key)
+ if list_node is None:
+ list_node = node[0][key] = Node([], node[1], node[2], next(_SYNTHETIC_COUNTER))
+
+ assert type(list_node[0]) is list
+
+ the_list = list_node[0]
+ def_type = type(default)
+
+ file_index = node[1]
+ if the_list:
+ line_num = the_list[-1][2]
+ else:
+ line_num = list_node[2]
+
+ while length > len(the_list):
+ if def_type is str:
+ value = default
+ elif def_type is list:
+ value = []
+ else:
+ value = {}
+
+ line_num += 1
+
+ the_list.append(Node(value, file_index, line_num, next(_SYNTHETIC_COUNTER)))
+
+
+# node_items()
+#
+# A convenience generator for iterating over loaded key/value
+# tuples in a dictionary loaded from project YAML.
+#
+# Args:
+# node (dict): The dictionary node
+#
+# Yields:
+# (str): The key name
+# (anything): The value for the key
+#
+def node_items(node):
+ if type(node) is not Node:
+ node = Node(node, None, 0, 0)
+ for key, value in node[0].items():
+ if type(value) is not Node:
+ value = Node(value, None, 0, 0)
+ if type(value[0]) is dict:
+ yield (key, value)
+ elif type(value[0]) is list:
+ yield (key, __trim_list_provenance(value[0]))
+ else:
+ yield (key, value[0])
+
+
+# node_keys()
+#
+# A convenience generator for iterating over loaded keys
+# in a dictionary loaded from project YAML.
+#
+# Args:
+# node (dict): The dictionary node
+#
+# Yields:
+# (str): The key name
+#
+def node_keys(node):
+ if type(node) is not Node:
+ node = Node(node, None, 0, 0)
+ yield from node[0].keys()
+
+
+# node_del()
+#
+# A convenience generator for iterating over loaded key/value
+# tuples in a dictionary loaded from project YAML.
+#
+# Args:
+# node (dict): The dictionary node
+# key (str): The key we want to remove
+# safe (bool): Whether to raise a KeyError if unable
+#
+def node_del(node, key, safe=False):
+ try:
+ del node[0][key]
+ except KeyError:
+ if not safe:
+ raise
+
+
+# is_node()
+#
+# A test method which returns whether or not the passed in value
+# is a valid YAML node. It is not valid to call this on a Node
+# object which is not a Mapping.
+#
+# Args:
+# maybenode (any): The object to test for nodeness
+#
+# Returns:
+# (bool): Whether or not maybenode was a Node
+#
+def is_node(maybenode):
+ # It's a programming error to give this a Node which isn't a mapping
+ # so assert that.
+ assert (type(maybenode) is not Node) or (type(maybenode[0]) is dict)
+ # Now return the type check
+ return type(maybenode) is Node
+
+
+# new_synthetic_file()
+#
+# Create a new synthetic mapping node, with an associated file entry
+# (in _FILE_LIST) such that later tracking can correctly determine which
+# file needs writing to in order to persist the changes.
+#
+# Args:
+# filename (str): The name of the synthetic file to create
+# project (Project): The optional project to associate this synthetic file with
+#
+# Returns:
+# (Node): An empty YAML mapping node, whose provenance is to this new
+# synthetic file
+#
+def new_synthetic_file(filename, project=None):
+ file_index = len(_FILE_LIST)
+ node = Node({}, file_index, 0, 0)
+ _FILE_LIST.append((filename,
+ filename,
+ "<synthetic {}>".format(filename),
+ node,
+ project))
+ return node
+
+
+# new_empty_node()
+#
+# Args:
+# ref_node (Node): Optional node whose provenance should be referenced
+#
+# Returns
+# (Node): A new empty YAML mapping node
+#
+def new_empty_node(ref_node=None):
+ if ref_node is not None:
+ return Node({}, ref_node[1], ref_node[2], next(_SYNTHETIC_COUNTER))
+ else:
+ return Node({}, None, 0, 0)
+
+
+# new_node_from_dict()
+#
+# Args:
+# indict (dict): The input dictionary
+#
+# Returns:
+# (Node): A new synthetic YAML tree which represents this dictionary
+#
+def new_node_from_dict(indict):
+ ret = {}
+ for k, v in indict.items():
+ vtype = type(v)
+ if vtype is dict:
+ ret[k] = new_node_from_dict(v)
+ elif vtype is list:
+ ret[k] = __new_node_from_list(v)
+ else:
+ ret[k] = Node(str(v), None, 0, next(_SYNTHETIC_COUNTER))
+ return Node(ret, None, 0, next(_SYNTHETIC_COUNTER))
+
+
+# Internal function to help new_node_from_dict() to handle lists
+def __new_node_from_list(inlist):
+ ret = []
+ for v in inlist:
+ vtype = type(v)
+ if vtype is dict:
+ ret.append(new_node_from_dict(v))
+ elif vtype is list:
+ ret.append(__new_node_from_list(v))
+ else:
+ ret.append(Node(str(v), None, 0, next(_SYNTHETIC_COUNTER)))
+ return Node(ret, None, 0, next(_SYNTHETIC_COUNTER))
+
+
+# _is_composite_list
+#
+# Checks if the given node is a Mapping with array composition
+# directives.
+#
+# Args:
+# node (value): Any node
+#
+# Returns:
+# (bool): True if node was a Mapping containing only
+# list composition directives
+#
+# Raises:
+# (LoadError): If node was a mapping and contained a mix of
+# list composition directives and other keys
+#
+def _is_composite_list(node):
+
+ if type(node[0]) is dict:
+ has_directives = False
+ has_keys = False
+
+ for key, _ in node_items(node):
+ if key in ['(>)', '(<)', '(=)']: # pylint: disable=simplifiable-if-statement
+ has_directives = True
+ else:
+ has_keys = True
+
+ if has_keys and has_directives:
+ provenance = node_get_provenance(node)
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Dictionary contains array composition directives and arbitrary keys"
+ .format(provenance))
+ return has_directives
+
+ return False
+
+
+# _compose_composite_list()
+#
+# Composes a composite list (i.e. a dict with list composition directives)
+# on top of a target list which is a composite list itself.
+#
+# Args:
+# target (Node): A composite list
+# source (Node): A composite list
+#
+def _compose_composite_list(target, source):
+ clobber = source[0].get("(=)")
+ prefix = source[0].get("(<)")
+ suffix = source[0].get("(>)")
+ if clobber is not None:
+ # We want to clobber the target list
+ # which basically means replacing the target list
+ # with ourselves
+ target[0]["(=)"] = clobber
+ if prefix is not None:
+ target[0]["(<)"] = prefix
+ elif "(<)" in target[0]:
+ target[0]["(<)"][0].clear()
+ if suffix is not None:
+ target[0]["(>)"] = suffix
+ elif "(>)" in target[0]:
+ target[0]["(>)"][0].clear()
+ else:
+ # Not clobbering, so prefix the prefix and suffix the suffix
+ if prefix is not None:
+ if "(<)" in target[0]:
+ for v in reversed(prefix[0]):
+ target[0]["(<)"][0].insert(0, v)
+ else:
+ target[0]["(<)"] = prefix
+ if suffix is not None:
+ if "(>)" in target[0]:
+ target[0]["(>)"][0].extend(suffix[0])
+ else:
+ target[0]["(>)"] = suffix
+
+
+# _compose_list()
+#
+# Compose a composite list (a dict with composition directives) on top of a
+# simple list.
+#
+# Args:
+# target (Node): The target list to be composed into
+# source (Node): The composition list to be composed from
+#
+def _compose_list(target, source):
+ clobber = source[0].get("(=)")
+ prefix = source[0].get("(<)")
+ suffix = source[0].get("(>)")
+ if clobber is not None:
+ target[0].clear()
+ target[0].extend(clobber[0])
+ if prefix is not None:
+ for v in reversed(prefix[0]):
+ target[0].insert(0, v)
+ if suffix is not None:
+ target[0].extend(suffix[0])
+
+
+# composite_dict()
+#
+# Compose one mapping node onto another
+#
+# Args:
+# target (Node): The target to compose into
+# source (Node): The source to compose from
+# path (list): The path to the current composition node
+#
+# Raises: CompositeError
+#
+def composite_dict(target, source, path=None):
+ if path is None:
+ path = []
+ for k, v in source[0].items():
+ path.append(k)
+ if type(v[0]) is list:
+ # List clobbers anything list-like
+ target_value = target[0].get(k)
+ if not (target_value is None or
+ type(target_value[0]) is list or
+ _is_composite_list(target_value)):
+ raise CompositeError(path,
+ "{}: List cannot overwrite {} at: {}"
+ .format(node_get_provenance(source, k),
+ k,
+ node_get_provenance(target, k)))
+ # Looks good, clobber it
+ target[0][k] = v
+ elif _is_composite_list(v):
+ if k not in target[0]:
+ # Composite list clobbers empty space
+ target[0][k] = v
+ elif type(target[0][k][0]) is list:
+ # Composite list composes into a list
+ _compose_list(target[0][k], v)
+ elif _is_composite_list(target[0][k]):
+ # Composite list merges into composite list
+ _compose_composite_list(target[0][k], v)
+ else:
+ # Else composing on top of normal dict or a scalar, so raise...
+ raise CompositeError(path,
+ "{}: Cannot compose lists onto {}".format(
+ node_get_provenance(v),
+ node_get_provenance(target[0][k])))
+ elif type(v[0]) is dict:
+ # We're composing a dict into target now
+ if k not in target[0]:
+ # Target lacks a dict at that point, make a fresh one with
+ # the same provenance as the incoming dict
+ target[0][k] = Node({}, v[1], v[2], v[3])
+ if type(target[0]) is not dict:
+ raise CompositeError(path,
+ "{}: Cannot compose dictionary onto {}".format(
+ node_get_provenance(v),
+ node_get_provenance(target[0][k])))
+ composite_dict(target[0][k], v, path)
+ else:
+ target_value = target[0].get(k)
+ if target_value is not None and type(target_value[0]) is not str:
+ raise CompositeError(path,
+ "{}: Cannot compose scalar on non-scalar at {}".format(
+ node_get_provenance(v),
+ node_get_provenance(target[0][k])))
+ target[0][k] = v
+ path.pop()
+
+
+# Like composite_dict(), but raises an all purpose LoadError for convenience
+#
+def composite(target, source):
+ assert type(source[0]) is dict
+ assert type(target[0]) is dict
+
+ try:
+ composite_dict(target, source)
+ except CompositeError as e:
+ source_provenance = node_get_provenance(source)
+ error_prefix = ""
+ if source_provenance:
+ error_prefix = "{}: ".format(source_provenance)
+ raise LoadError(LoadErrorReason.ILLEGAL_COMPOSITE,
+ "{}Failure composing {}: {}"
+ .format(error_prefix,
+ e.path,
+ e.message)) from e
+
+
+# Like composite(target, source), but where target overrides source instead.
+#
+def composite_and_move(target, source):
+ composite(source, target)
+
+ to_delete = [key for key in target[0].keys() if key not in source[0]]
+ for key, value in source[0].items():
+ target[0][key] = value
+ for key in to_delete:
+ del target[0][key]
+
+
+# Types we can short-circuit in node_sanitize for speed.
+__SANITIZE_SHORT_CIRCUIT_TYPES = (int, float, str, bool)
+
+
+# node_sanitize()
+#
+# Returns an alphabetically ordered recursive copy
+# of the source node with internal provenance information stripped.
+#
+# Only dicts are ordered, list elements are left in order.
+#
+def node_sanitize(node, *, dict_type=OrderedDict):
+ node_type = type(node)
+
+ # If we have an unwrappable node, unwrap it
+ if node_type is Node:
+ node = node[0]
+ node_type = type(node)
+
+ # Short-circuit None which occurs ca. twice per element
+ if node is None:
+ return node
+
+ # Next short-circuit integers, floats, strings, booleans, and tuples
+ if node_type in __SANITIZE_SHORT_CIRCUIT_TYPES:
+ return node
+
+ # Now short-circuit lists.
+ elif node_type is list:
+ return [node_sanitize(elt, dict_type=dict_type) for elt in node]
+
+ # Finally dict, and other Mappings need special handling
+ elif node_type is dict:
+ result = dict_type()
+
+ key_list = [key for key, _ in node.items()]
+ for key in sorted(key_list):
+ result[key] = node_sanitize(node[key], dict_type=dict_type)
+
+ return result
+
+ # Sometimes we're handed tuples and we can't be sure what they contain
+ # so we have to sanitize into them
+ elif node_type is tuple:
+ return tuple((node_sanitize(v, dict_type=dict_type) for v in node))
+
+ # Everything else just gets returned as-is.
+ return node
+
+
+# node_validate()
+#
+# Validate the node so as to ensure the user has not specified
+# any keys which are unrecognized by buildstream (usually this
+# means a typo which would otherwise not trigger an error).
+#
+# Args:
+# node (dict): A dictionary loaded from YAML
+# valid_keys (list): A list of valid keys for the specified node
+#
+# Raises:
+# LoadError: In the case that the specified node contained
+# one or more invalid keys
+#
+def node_validate(node, valid_keys):
+
+ # Probably the fastest way to do this: https://stackoverflow.com/a/23062482
+ valid_keys = set(valid_keys)
+ invalid = next((key for key in node[0] if key not in valid_keys), None)
+
+ if invalid:
+ provenance = node_get_provenance(node, key=invalid)
+ raise LoadError(LoadErrorReason.INVALID_DATA,
+ "{}: Unexpected key: {}".format(provenance, invalid))
+
+
+# Node copying
+#
+# Unfortunately we copy nodes a *lot* and `isinstance()` is super-slow when
+# things from collections.abc get involved. The result is the following
+# intricate but substantially faster group of tuples and the use of `in`.
+#
+# If any of the {node,list}_copy routines raise a ValueError
+# then it's likely additional types need adding to these tuples.
+
+
+# These types just have their value copied
+__QUICK_TYPES = (str, bool)
+
+# These are the directives used to compose lists, we need this because it's
+# slightly faster during the node_final_assertions checks
+__NODE_ASSERT_COMPOSITION_DIRECTIVES = ('(>)', '(<)', '(=)')
+
+
+# node_copy()
+#
+# Make a deep copy of the given YAML node, preserving provenance.
+#
+# Args:
+# source (Node): The YAML node to copy
+#
+# Returns:
+# (Node): A deep copy of source with provenance preserved.
+#
+def node_copy(source):
+ copy = {}
+ for key, value in source[0].items():
+ value_type = type(value[0])
+ if value_type is dict:
+ copy[key] = node_copy(value)
+ elif value_type is list:
+ copy[key] = _list_copy(value)
+ elif value_type in __QUICK_TYPES:
+ copy[key] = value
+ else:
+ raise ValueError("Unable to be quick about node_copy of {}".format(value_type))
+
+ return Node(copy, source[1], source[2], source[3])
+
+
+# Internal function to help node_copy() but for lists.
+def _list_copy(source):
+ copy = []
+ for item in source[0]:
+ item_type = type(item[0])
+ if item_type is dict:
+ copy.append(node_copy(item))
+ elif item_type is list:
+ copy.append(_list_copy(item))
+ elif item_type in __QUICK_TYPES:
+ copy.append(item)
+ else:
+ raise ValueError("Unable to be quick about list_copy of {}".format(item_type))
+
+ return Node(copy, source[1], source[2], source[3])
+
+
+# node_final_assertions()
+#
+# This must be called on a fully loaded and composited node,
+# after all composition has completed.
+#
+# Args:
+# node (Mapping): The final composited node
+#
+# Raises:
+# (LoadError): If any assertions fail
+#
+def node_final_assertions(node):
+ assert type(node) is Node
+
+ for key, value in node[0].items():
+
+ # Assert that list composition directives dont remain, this
+ # indicates that the user intended to override a list which
+ # never existed in the underlying data
+ #
+ if key in __NODE_ASSERT_COMPOSITION_DIRECTIVES:
+ provenance = node_get_provenance(node, key)
+ raise LoadError(LoadErrorReason.TRAILING_LIST_DIRECTIVE,
+ "{}: Attempt to override non-existing list".format(provenance))
+
+ value_type = type(value[0])
+
+ if value_type is dict:
+ node_final_assertions(value)
+ elif value_type is list:
+ _list_final_assertions(value)
+
+
+# Helper function for node_final_assertions(), but for lists.
+def _list_final_assertions(values):
+ for value in values[0]:
+ value_type = type(value[0])
+
+ if value_type is dict:
+ node_final_assertions(value)
+ elif value_type is list:
+ _list_final_assertions(value)
+
+
+# assert_symbol_name()
+#
+# A helper function to check if a loaded string is a valid symbol
+# name and to raise a consistent LoadError if not. For strings which
+# are required to be symbols.
+#
+# Args:
+# provenance (Provenance): The provenance of the loaded symbol, or None
+# symbol_name (str): The loaded symbol name
+# purpose (str): The purpose of the string, for an error message
+# allow_dashes (bool): Whether dashes are allowed for this symbol
+#
+# Raises:
+# LoadError: If the symbol_name is invalid
+#
+# Note that dashes are generally preferred for variable names and
+# usage in YAML, but things such as option names which will be
+# evaluated with jinja2 cannot use dashes.
+def assert_symbol_name(provenance, symbol_name, purpose, *, allow_dashes=True):
+ valid_chars = string.digits + string.ascii_letters + '_'
+ if allow_dashes:
+ valid_chars += '-'
+
+ valid = True
+ if not symbol_name:
+ valid = False
+ elif any(x not in valid_chars for x in symbol_name):
+ valid = False
+ elif symbol_name[0] in string.digits:
+ valid = False
+
+ if not valid:
+ detail = "Symbol names must contain only alphanumeric characters, " + \
+ "may not start with a digit, and may contain underscores"
+ if allow_dashes:
+ detail += " or dashes"
+
+ message = "Invalid symbol name for {}: '{}'".format(purpose, symbol_name)
+ if provenance is not None:
+ message = "{}: {}".format(provenance, message)
+
+ raise LoadError(LoadErrorReason.INVALID_SYMBOL_NAME,
+ message, detail=detail)
+
+
+# node_find_target()
+#
+# Searches the given node tree for the given target node.
+#
+# This is typically used when trying to walk a path to a given node
+# for the purpose of then modifying a similar tree of objects elsewhere
+#
+# If the key is provided, then we actually hunt for the node represented by
+# target[key] and return its container, rather than hunting for target directly
+#
+# Args:
+# node (Node): The node at the root of the tree to search
+# target (Node): The node you are looking for in that tree
+# key (str): Optional string key within target node
+#
+# Returns:
+# (list): A path from `node` to `target` or None if `target` is not in the subtree
+def node_find_target(node, target, *, key=None):
+ assert type(node) is Node
+ assert type(target) is Node
+ if key is not None:
+ target = target[0][key]
+
+ path = []
+ if _walk_find_target(node, path, target):
+ if key:
+ # Remove key from end of path
+ path = path[:-1]
+ return path
+ return None
+
+
+# Helper for node_find_target() which walks a value
+def _walk_find_target(node, path, target):
+ if node[1:] == target[1:]:
+ return True
+ elif type(node[0]) is dict:
+ return _walk_dict_node(node, path, target)
+ elif type(node[0]) is list:
+ return _walk_list_node(node, path, target)
+ return False
+
+
+# Helper for node_find_target() which walks a list
+def _walk_list_node(node, path, target):
+ for i, v in enumerate(node[0]):
+ path.append(i)
+ if _walk_find_target(v, path, target):
+ return True
+ del path[-1]
+ return False
+
+
+# Helper for node_find_target() which walks a mapping
+def _walk_dict_node(node, path, target):
+ for k, v in node[0].items():
+ path.append(k)
+ if _walk_find_target(v, path, target):
+ return True
+ del path[-1]
+ return False
+
+
+###############################################################################
+
+# Roundtrip code
+
+# Always represent things consistently:
+
+yaml.RoundTripRepresenter.add_representer(OrderedDict,
+ yaml.SafeRepresenter.represent_dict)
+
+# Always parse things consistently
+
+yaml.RoundTripConstructor.add_constructor(u'tag:yaml.org,2002:int',
+ yaml.RoundTripConstructor.construct_yaml_str)
+yaml.RoundTripConstructor.add_constructor(u'tag:yaml.org,2002:float',
+ yaml.RoundTripConstructor.construct_yaml_str)
+yaml.RoundTripConstructor.add_constructor(u'tag:yaml.org,2002:bool',
+ yaml.RoundTripConstructor.construct_yaml_str)
+yaml.RoundTripConstructor.add_constructor(u'tag:yaml.org,2002:null',
+ yaml.RoundTripConstructor.construct_yaml_str)
+yaml.RoundTripConstructor.add_constructor(u'tag:yaml.org,2002:timestamp',
+ yaml.RoundTripConstructor.construct_yaml_str)
+
+
+# HardlineDumper
+#
+# This is a dumper used during roundtrip_dump which forces every scalar to be
+# a plain string, in order to match the output format to the input format.
+#
+# If you discover something is broken, please add a test case to the roundtrip
+# test in tests/internals/yaml/roundtrip-test.yaml
+#
+class HardlineDumper(yaml.RoundTripDumper):
+ def __init__(self, *args, **kwargs):
+ yaml.RoundTripDumper.__init__(self, *args, **kwargs)
+ # For each of YAML 1.1 and 1.2, force everything to be a plain string
+ for version in [(1, 1), (1, 2), None]:
+ self.add_version_implicit_resolver(
+ version,
+ u'tag:yaml.org,2002:str',
+ yaml.util.RegExp(r'.*'),
+ None)
+
+
+# roundtrip_load()
+#
+# Load a YAML file into memory in a form which allows roundtripping as best
+# as ruamel permits.
+#
+# Note, the returned objects can be treated as Mappings and Lists and Strings
+# but replacing content wholesale with plain dicts and lists may result
+# in a loss of comments and formatting.
+#
+# Args:
+# filename (str): The file to load in
+# allow_missing (bool): Optionally set this to True to allow missing files
+#
+# Returns:
+# (Mapping): The loaded YAML mapping.
+#
+# Raises:
+# (LoadError): If the file is missing, or a directory, this is raised.
+# Also if the YAML is malformed.
+#
+def roundtrip_load(filename, *, allow_missing=False):
+ try:
+ with open(filename, "r") as fh:
+ data = fh.read()
+ contents = roundtrip_load_data(data, filename=filename)
+ except FileNotFoundError as e:
+ if allow_missing:
+ # Missing files are always empty dictionaries
+ return {}
+ else:
+ raise LoadError(LoadErrorReason.MISSING_FILE,
+ "Could not find file at {}".format(filename)) from e
+ except IsADirectoryError as e:
+ raise LoadError(LoadErrorReason.LOADING_DIRECTORY,
+ "{} is a directory."
+ .format(filename)) from e
+ return contents
+
+
+# roundtrip_load_data()
+#
+# Parse the given contents as YAML, returning them as a roundtrippable data
+# structure.
+#
+# A lack of content will be returned as an empty mapping.
+#
+# Args:
+# contents (str): The contents to be parsed as YAML
+# filename (str): Optional filename to be used in error reports
+#
+# Returns:
+# (Mapping): The loaded YAML mapping
+#
+# Raises:
+# (LoadError): Raised on invalid YAML, or YAML which parses to something other
+# than a Mapping
+#
+def roundtrip_load_data(contents, *, filename=None):
+ try:
+ contents = yaml.load(contents, yaml.RoundTripLoader, preserve_quotes=True)
+ except (yaml.scanner.ScannerError, yaml.composer.ComposerError, yaml.parser.ParserError) as e:
+ raise LoadError(LoadErrorReason.INVALID_YAML,
+ "Malformed YAML:\n\n{}\n\n{}\n".format(e.problem, e.problem_mark)) from e
+
+ # Special case empty files at this point
+ if contents is None:
+ # We'll make them empty mappings like the main Node loader
+ contents = {}
+
+ if not isinstance(contents, Mapping):
+ raise LoadError(LoadErrorReason.INVALID_YAML,
+ "YAML file has content of type '{}' instead of expected type 'dict': {}"
+ .format(type(contents).__name__, filename))
+
+ return contents
+
+
+# roundtrip_dump()
+#
+# Dumps the given contents as a YAML file. Ideally the contents came from
+# parsing with `roundtrip_load` or `roundtrip_load_data` so that they will be
+# dumped in the same form as they came from.
+#
+# If `file` is a string, it is the filename to write to, if `file` has a
+# `write` method, it's treated as a stream, otherwise output is to stdout.
+#
+# Args:
+# contents (Mapping or list): The content to write out as YAML.
+# file (any): The file to write to
+#
+def roundtrip_dump(contents, file=None):
+ assert type(contents) is not Node
+
+ def stringify_dict(thing):
+ for k, v in thing.items():
+ if type(v) is str:
+ pass
+ elif isinstance(v, Mapping):
+ stringify_dict(v)
+ elif isinstance(v, Sequence):
+ stringify_list(v)
+ else:
+ thing[k] = str(v)
+
+ def stringify_list(thing):
+ for i, v in enumerate(thing):
+ if type(v) is str:
+ pass
+ elif isinstance(v, Mapping):
+ stringify_dict(v)
+ elif isinstance(v, Sequence):
+ stringify_list(v)
+ else:
+ thing[i] = str(v)
+
+ contents = deepcopy(contents)
+ stringify_dict(contents)
+
+ with ExitStack() as stack:
+ if type(file) is str:
+ from . import utils
+ f = stack.enter_context(utils.save_file_atomic(file, 'w'))
+ elif hasattr(file, 'write'):
+ f = file
+ else:
+ f = sys.stdout
+ yaml.round_trip_dump(contents, f, Dumper=HardlineDumper)
diff --git a/src/buildstream/buildelement.py b/src/buildstream/buildelement.py
new file mode 100644
index 000000000..158f5fc11
--- /dev/null
+++ b/src/buildstream/buildelement.py
@@ -0,0 +1,299 @@
+#
+# Copyright (C) 2016 Codethink Limited
+# Copyright (C) 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+"""
+BuildElement - Abstract class for build elements
+================================================
+The BuildElement class is a convenience element one can derive from for
+implementing the most common case of element.
+
+.. _core_buildelement_builtins:
+
+Built-in functionality
+----------------------
+
+The BuildElement base class provides built in functionality that could be
+overridden by the individual plugins.
+
+This section will give a brief summary of how some of the common features work,
+some of them or the variables they use will be further detailed in the following
+sections.
+
+The `strip-binaries` variable
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The `strip-binaries` variable is by default **empty**. You need to use the
+appropiate commands depending of the system you are building.
+If you are targetting Linux, ones known to work are the ones used by the
+`freedesktop-sdk <https://freedesktop-sdk.io/>`_, you can take a look to them in their
+`project.conf <https://gitlab.com/freedesktop-sdk/freedesktop-sdk/blob/freedesktop-sdk-18.08.21/project.conf#L74>`_
+
+Location for running commands
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``command-subdir`` variable sets where the build commands will be executed,
+if the directory does not exist it will be created, it is defined relative to
+the buildroot.
+
+Location for configuring the project
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``conf-root`` is defined by default as ``.`` and is the location that
+specific build element can use to look for build configuration files. This is
+used by elements such as autotools, cmake, distutils, meson, pip and qmake.
+
+The configuration commands are run in ``command-subdir`` and by default
+``conf-root`` is ``.`` so if ``conf-root`` is not set the configuration files
+in ``command-subdir`` will be used.
+
+By setting ``conf-root`` to ``"%{build-root}/Source/conf_location"`` and your
+source elements ``directory`` variable to ``Source`` then the configuration
+files in the directory ``conf_location`` with in your Source will be used.
+The current working directory when your configuration command is run will still
+be wherever you set your ``command-subdir`` to be, regardless of where the
+configure scripts are set with ``conf-root``.
+
+.. note::
+
+ The ``conf-root`` variable is available since :ref:`format version 17 <project_format_version>`
+
+Install Location
+~~~~~~~~~~~~~~~~
+
+You should not change the ``install-root`` variable as it is a special
+writeable location in the sandbox but it is useful when writing custom
+install instructions as it may need to be supplied as the ``DESTDIR``, please
+see the :mod:`cmake <elements.cmake>` build element for example.
+
+Abstract method implementations
+-------------------------------
+
+Element.configure_sandbox()
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+In :func:`Element.configure_sandbox() <buildstream.element.Element.configure_sandbox>`,
+the BuildElement will ensure that the sandbox locations described by the ``%{build-root}``
+and ``%{install-root}`` variables are marked and will be mounted read-write for the
+:func:`assemble phase<buildstream.element.Element.configure_sandbox>`.
+
+The working directory for the sandbox will be configured to be the ``%{build-root}``,
+unless the ``%{command-subdir}`` variable is specified for the element in question,
+in which case the working directory will be configured as ``%{build-root}/%{command-subdir}``.
+
+
+Element.stage()
+~~~~~~~~~~~~~~~
+In :func:`Element.stage() <buildstream.element.Element.stage>`, the BuildElement
+will do the following operations:
+
+* Stage all the dependencies in the :func:`Scope.BUILD <buildstream.element.Scope.BUILD>`
+ scope into the sandbox root.
+
+* Run the integration commands for all staged dependencies using
+ :func:`Element.integrate() <buildstream.element.Element.integrate>`
+
+* Stage any Source on the given element to the ``%{build-root}`` location
+ inside the sandbox, using
+ :func:`Element.stage_sources() <buildstream.element.Element.integrate>`
+
+
+Element.prepare()
+~~~~~~~~~~~~~~~~~
+In :func:`Element.prepare() <buildstream.element.Element.prepare>`,
+the BuildElement will run ``configure-commands``, which are used to
+run one-off preparations that should not be repeated for a single
+build directory.
+
+
+Element.assemble()
+~~~~~~~~~~~~~~~~~~
+In :func:`Element.assemble() <buildstream.element.Element.assemble>`, the
+BuildElement will proceed to run sandboxed commands which are expected to be
+found in the element configuration.
+
+Commands are run in the following order:
+
+* ``build-commands``: Commands to build the element
+* ``install-commands``: Commands to install the results into ``%{install-root}``
+* ``strip-commands``: Commands to strip debugging symbols installed binaries
+
+The result of the build is expected to end up in ``%{install-root}``, and
+as such; Element.assemble() method will return the ``%{install-root}`` for
+artifact collection purposes.
+"""
+
+import os
+
+from .element import Element
+from .sandbox import SandboxFlags
+from .types import Scope
+
+
+# This list is preserved because of an unfortunate situation, we
+# need to remove these older commands which were secret and never
+# documented, but without breaking the cache keys.
+_legacy_command_steps = ['bootstrap-commands',
+ 'configure-commands',
+ 'build-commands',
+ 'test-commands',
+ 'install-commands',
+ 'strip-commands']
+
+_command_steps = ['configure-commands',
+ 'build-commands',
+ 'install-commands',
+ 'strip-commands']
+
+
+class BuildElement(Element):
+
+ #############################################################
+ # Abstract Method Implementations #
+ #############################################################
+ def configure(self, node):
+
+ self.__commands = {} # pylint: disable=attribute-defined-outside-init
+
+ # FIXME: Currently this forcefully validates configurations
+ # for all BuildElement subclasses so they are unable to
+ # extend the configuration
+ self.node_validate(node, _command_steps)
+
+ for command_name in _legacy_command_steps:
+ if command_name in _command_steps:
+ self.__commands[command_name] = self.__get_commands(node, command_name)
+ else:
+ self.__commands[command_name] = []
+
+ def preflight(self):
+ pass
+
+ def get_unique_key(self):
+ dictionary = {}
+
+ for command_name, command_list in self.__commands.items():
+ dictionary[command_name] = command_list
+
+ # Specifying notparallel for a given element effects the
+ # cache key, while having the side effect of setting max-jobs to 1,
+ # which is normally automatically resolved and does not affect
+ # the cache key.
+ if self.get_variable('notparallel'):
+ dictionary['notparallel'] = True
+
+ return dictionary
+
+ def configure_sandbox(self, sandbox):
+ build_root = self.get_variable('build-root')
+ install_root = self.get_variable('install-root')
+
+ # Tell the sandbox to mount the build root and install root
+ sandbox.mark_directory(build_root)
+ sandbox.mark_directory(install_root)
+
+ # Allow running all commands in a specified subdirectory
+ command_subdir = self.get_variable('command-subdir')
+ if command_subdir:
+ command_dir = os.path.join(build_root, command_subdir)
+ else:
+ command_dir = build_root
+ sandbox.set_work_directory(command_dir)
+
+ # Tell sandbox which directory is preserved in the finished artifact
+ sandbox.set_output_directory(install_root)
+
+ # Setup environment
+ sandbox.set_environment(self.get_environment())
+
+ def stage(self, sandbox):
+
+ # Stage deps in the sandbox root
+ with self.timed_activity("Staging dependencies", silent_nested=True):
+ self.stage_dependency_artifacts(sandbox, Scope.BUILD)
+
+ # Run any integration commands provided by the dependencies
+ # once they are all staged and ready
+ with sandbox.batch(SandboxFlags.NONE, label="Integrating sandbox"):
+ for dep in self.dependencies(Scope.BUILD):
+ dep.integrate(sandbox)
+
+ # Stage sources in the build root
+ self.stage_sources(sandbox, self.get_variable('build-root'))
+
+ def assemble(self, sandbox):
+ # Run commands
+ for command_name in _command_steps:
+ commands = self.__commands[command_name]
+ if not commands or command_name == 'configure-commands':
+ continue
+
+ with sandbox.batch(SandboxFlags.ROOT_READ_ONLY, label="Running {}".format(command_name)):
+ for cmd in commands:
+ self.__run_command(sandbox, cmd)
+
+ # %{install-root}/%{build-root} should normally not be written
+ # to - if an element later attempts to stage to a location
+ # that is not empty, we abort the build - in this case this
+ # will almost certainly happen.
+ staged_build = os.path.join(self.get_variable('install-root'),
+ self.get_variable('build-root'))
+
+ if os.path.isdir(staged_build) and os.listdir(staged_build):
+ self.warn("Writing to %{install-root}/%{build-root}.",
+ detail="Writing to this directory will almost " +
+ "certainly cause an error, since later elements " +
+ "will not be allowed to stage to %{build-root}.")
+
+ # Return the payload, this is configurable but is generally
+ # always the /buildstream-install directory
+ return self.get_variable('install-root')
+
+ def prepare(self, sandbox):
+ commands = self.__commands['configure-commands']
+ if commands:
+ with sandbox.batch(SandboxFlags.ROOT_READ_ONLY, label="Running configure-commands"):
+ for cmd in commands:
+ self.__run_command(sandbox, cmd)
+
+ def generate_script(self):
+ script = ""
+ for command_name in _command_steps:
+ commands = self.__commands[command_name]
+
+ for cmd in commands:
+ script += "(set -ex; {}\n) || exit 1\n".format(cmd)
+
+ return script
+
+ #############################################################
+ # Private Local Methods #
+ #############################################################
+ def __get_commands(self, node, name):
+ list_node = self.node_get_member(node, list, name, [])
+ commands = []
+
+ for i in range(len(list_node)):
+ command = self.node_subst_list_element(node, name, [i])
+ commands.append(command)
+
+ return commands
+
+ def __run_command(self, sandbox, cmd):
+ # Note the -e switch to 'sh' means to exit with an error
+ # if any untested command fails.
+ #
+ sandbox.run(['sh', '-c', '-e', cmd + '\n'],
+ SandboxFlags.ROOT_READ_ONLY,
+ label=cmd)
diff --git a/src/buildstream/data/bst b/src/buildstream/data/bst
new file mode 100644
index 000000000..e38720f77
--- /dev/null
+++ b/src/buildstream/data/bst
@@ -0,0 +1,21 @@
+# BuildStream bash completion scriptlet.
+#
+# On systems which use the bash-completion module for
+# completion discovery with bash, this can be installed at:
+#
+# pkg-config --variable=completionsdir bash-completion
+#
+# If BuildStream is not installed system wide, you can
+# simply source this script to enable completions or append
+# this script to your ~/.bash_completion file.
+#
+_bst_completion() {
+ local IFS=$'
+'
+ COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \
+ COMP_CWORD=$COMP_CWORD \
+ _BST_COMPLETION=complete $1 ) )
+ return 0
+}
+
+complete -F _bst_completion -o nospace bst;
diff --git a/src/buildstream/data/build-all.sh.in b/src/buildstream/data/build-all.sh.in
new file mode 100644
index 000000000..bf5c9f880
--- /dev/null
+++ b/src/buildstream/data/build-all.sh.in
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# DO NOT EDIT THIS FILE
+#
+# This is a build script generated by
+# [BuildStream](https://wiki.gnome.org/Projects/BuildStream/).
+#
+# Builds all given modules using their respective scripts.
+
+set -eu
+
+echo "Buildstream native bootstrap script"
+
+export PATH='/usr/bin:/usr/sbin/:/sbin:/bin:/tools/bin:/tools/sbin'
+export SRCDIR='./source'
+
+SUCCESS=false
+CURRENT_MODULE='None'
+
+echo 'Setting up build environment...'
+
+except() {{
+ if [ "$SUCCESS" = true ]; then
+ echo "Done!"
+ else
+ echo "Error building module ${{CURRENT_MODULE}}."
+ fi
+}}
+trap "except" EXIT
+
+for module in {modules}; do
+ CURRENT_MODULE="$module"
+ "$SRCDIR/build-$module"
+
+ if [ -e /sbin/ldconfig ]; then
+ /sbin/ldconfig || true;
+ fi
+done
+
+SUCCESS=true
diff --git a/src/buildstream/data/build-module.sh.in b/src/buildstream/data/build-module.sh.in
new file mode 100644
index 000000000..6e9ea4552
--- /dev/null
+++ b/src/buildstream/data/build-module.sh.in
@@ -0,0 +1,43 @@
+#!/bin/sh
+#
+# DO NOT EDIT THIS FILE
+#
+# This is a build script generated by
+# [BuildStream](https://wiki.gnome.org/Projects/BuildStream/).
+#
+# Builds the module {name}.
+
+set -e
+
+# Prepare the build environment
+echo 'Building {name}'
+
+if [ -d '{build_root}' ]; then
+ rm -rf '{build_root}'
+fi
+
+if [ -d '{install_root}' ]; then
+ rm -rf '{install_root}'
+fi
+
+mkdir -p '{build_root}'
+mkdir -p '{install_root}'
+
+if [ -d "$SRCDIR/{name}/" ]; then
+ cp -a "$SRCDIR/{name}/." '{build_root}'
+fi
+cd '{build_root}'
+
+export PREFIX='{install_root}'
+
+export {variables}
+
+# Build the module
+{commands}
+
+rm -rf '{build_root}'
+
+# Install the module
+echo 'Installing {name}'
+
+(cd '{install_root}'; find . | cpio -umdp /)
diff --git a/src/buildstream/data/projectconfig.yaml b/src/buildstream/data/projectconfig.yaml
new file mode 100644
index 000000000..ee4055cf5
--- /dev/null
+++ b/src/buildstream/data/projectconfig.yaml
@@ -0,0 +1,183 @@
+# Default BuildStream project configuration.
+
+
+# General configuration defaults
+#
+
+# Require format version 0
+format-version: 0
+
+# Elements are found at the project root
+element-path: .
+
+# Store source references in element files
+ref-storage: inline
+
+# Variable Configuration
+#
+variables:
+ # Path configuration, to be used in build instructions.
+ prefix: "/usr"
+ exec_prefix: "%{prefix}"
+ bindir: "%{exec_prefix}/bin"
+ sbindir: "%{exec_prefix}/sbin"
+ libexecdir: "%{exec_prefix}/libexec"
+ datadir: "%{prefix}/share"
+ sysconfdir: "/etc"
+ sharedstatedir: "%{prefix}/com"
+ localstatedir: "/var"
+ lib: "lib"
+ libdir: "%{prefix}/%{lib}"
+ debugdir: "%{libdir}/debug"
+ includedir: "%{prefix}/include"
+ docdir: "%{datadir}/doc"
+ infodir: "%{datadir}/info"
+ mandir: "%{datadir}/man"
+
+ # Indicates the default build directory where input is
+ # normally staged
+ build-root: /buildstream/%{project-name}/%{element-name}
+
+ # Indicates where the build system should look for configuration files
+ conf-root: .
+
+ # Indicates the build installation directory in the sandbox
+ install-root: /buildstream-install
+
+ # You need to override this with the commands specific for your system
+ strip-binaries: ""
+
+ # Generic implementation for reproducible python builds
+ fix-pyc-timestamps: |
+
+ find "%{install-root}" -name '*.pyc' -exec \
+ dd if=/dev/zero of={} bs=1 count=4 seek=4 conv=notrunc ';'
+
+# Base sandbox environment, can be overridden by plugins
+environment:
+ PATH: /usr/bin:/bin:/usr/sbin:/sbin
+ SHELL: /bin/sh
+ TERM: dumb
+ USER: tomjon
+ USERNAME: tomjon
+ LOGNAME: tomjon
+ LC_ALL: C
+ HOME: /tmp
+ TZ: UTC
+
+ # For reproducible builds we use 2011-11-11 as a constant
+ SOURCE_DATE_EPOCH: 1320937200
+
+# List of environment variables which should not be taken into
+# account when calculating a cache key for a given element.
+#
+environment-nocache: []
+
+# Configuration for the sandbox other than environment variables
+# should go in 'sandbox'. This just contains the UID and GID that
+# the user in the sandbox will have. Not all sandboxes will support
+# changing the values.
+sandbox:
+ build-uid: 0
+ build-gid: 0
+
+# Defaults for the 'split-rules' public data found on elements
+# in the 'bst' domain.
+#
+split-rules:
+
+ # The runtime domain includes whatever is needed for the
+ # built element to run, this includes stripped executables
+ # and shared libraries by default.
+ runtime:
+ - |
+ %{bindir}
+ - |
+ %{bindir}/*
+ - |
+ %{sbindir}
+ - |
+ %{sbindir}/*
+ - |
+ %{libexecdir}
+ - |
+ %{libexecdir}/*
+ - |
+ %{libdir}/lib*.so*
+
+ # The devel domain includes additional things which
+ # you may need for development.
+ #
+ # By default this includes header files, static libraries
+ # and other metadata such as pkgconfig files, m4 macros and
+ # libtool archives.
+ devel:
+ - |
+ %{includedir}
+ - |
+ %{includedir}/**
+ - |
+ %{libdir}/lib*.a
+ - |
+ %{libdir}/lib*.la
+ - |
+ %{libdir}/pkgconfig/*.pc
+ - |
+ %{datadir}/pkgconfig/*.pc
+ - |
+ %{datadir}/aclocal/*.m4
+
+ # The debug domain includes debugging information stripped
+ # away from libraries and executables
+ debug:
+ - |
+ %{debugdir}
+ - |
+ %{debugdir}/**
+
+ # The doc domain includes documentation
+ doc:
+ - |
+ %{docdir}
+ - |
+ %{docdir}/**
+ - |
+ %{infodir}
+ - |
+ %{infodir}/**
+ - |
+ %{mandir}
+ - |
+ %{mandir}/**
+
+ # The locale domain includes translations etc
+ locale:
+ - |
+ %{datadir}/locale
+ - |
+ %{datadir}/locale/**
+ - |
+ %{datadir}/i18n
+ - |
+ %{datadir}/i18n/**
+ - |
+ %{datadir}/zoneinfo
+ - |
+ %{datadir}/zoneinfo/**
+
+
+# Default behavior for `bst shell`
+#
+shell:
+
+ # Command to run when `bst shell` does not provide a command
+ #
+ command: [ 'sh', '-i' ]
+
+# Defaults for bst commands
+#
+defaults:
+
+ # Set default target elements to use when none are passed on the command line.
+ # If none are configured in the project, default to all project elements.
+ targets: []
diff --git a/src/buildstream/data/userconfig.yaml b/src/buildstream/data/userconfig.yaml
new file mode 100644
index 000000000..34fd300d1
--- /dev/null
+++ b/src/buildstream/data/userconfig.yaml
@@ -0,0 +1,113 @@
+# Default BuildStream user configuration.
+
+#
+# Work Directories
+#
+#
+# Note that BuildStream forces the XDG Base Directory names
+# into the environment if they are not already set, and allows
+# expansion of '~' and environment variables when specifying
+# paths.
+#
+
+# Location to store sources
+sourcedir: ${XDG_CACHE_HOME}/buildstream/sources
+
+# Root location for other directories in the cache
+cachedir: ${XDG_CACHE_HOME}/buildstream
+
+# Location to store build logs
+logdir: ${XDG_CACHE_HOME}/buildstream/logs
+
+# Default root location for workspaces, blank for no default set.
+workspacedir: .
+
+#
+# Cache
+#
+cache:
+ # Size of the artifact cache in bytes - BuildStream will attempt to keep the
+ # artifact cache within this size.
+ # If the value is suffixed with K, M, G or T, the specified memory size is
+ # parsed as Kilobytes, Megabytes, Gigabytes, or Terabytes (with the base
+ # 1024), respectively.
+ # Alternatively, a percentage value may be specified, which is taken relative
+ # to the isize of the file system containing the cache.
+ quota: infinity
+
+ # Whether to pull build trees when downloading element artifacts
+ pull-buildtrees: False
+
+ # Whether to cache build trees on artifact creation:
+ #
+ # always - Always cache artifact build tree content
+ # auto - Only cache build trees when necessary, e.g., for failed builds
+ # never - Never cache artifact build tree content. This is not recommended
+ # for normal users as this breaks core functionality such as
+ # debugging failed builds and may break additional functionality
+ # in future versions.
+ #
+ cache-buildtrees: auto
+
+
+#
+# Scheduler
+#
+scheduler:
+
+ # Maximum number of simultaneous downloading tasks.
+ fetchers: 10
+
+ # Maximum number of simultaneous build tasks.
+ builders: 4
+
+ # Maximum number of simultaneous uploading tasks.
+ pushers: 4
+
+ # Maximum number of retries for network tasks.
+ network-retries: 2
+
+ # What to do when an element fails, if not running in
+ # interactive mode:
+ #
+ # continue - Continue queueing jobs as much as possible
+ # quit - Exit after all ongoing jobs complete
+ # terminate - Terminate any ongoing jobs and exit
+ #
+ on-error: quit
+
+
+#
+# Logging
+#
+logging:
+
+ # The abbreviated cache key length to display in the UI
+ key-length: 8
+
+ # Whether to show extra detailed messages
+ verbose: True
+
+ # Maximum number of lines to print from the
+ # end of a failing build log
+ error-lines: 20
+
+ # Maximum number of lines to print in a detailed
+ # message on the console or in the master log (the full
+ # messages are always recorded in the individual build
+ # logs)
+ message-lines: 20
+
+ # Whether to enable debugging messages
+ debug: False
+
+ # Format string for printing the pipeline at startup, this
+ # also determines the default display format for `bst show`
+ element-format: |
+
+ %{state: >12} %{full-key} %{name} %{workspace-dirs}
+
+ # Format string for all log messages.
+ message-format: |
+
+ [%{elapsed}][%{key}][%{element}] %{action} %{message}
diff --git a/src/buildstream/element.py b/src/buildstream/element.py
new file mode 100644
index 000000000..70158f778
--- /dev/null
+++ b/src/buildstream/element.py
@@ -0,0 +1,3062 @@
+#
+# Copyright (C) 2016-2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+Element - Base element class
+============================
+
+
+.. _core_element_abstract_methods:
+
+Abstract Methods
+----------------
+For loading and configuration purposes, Elements must implement the
+:ref:`Plugin base class abstract methods <core_plugin_abstract_methods>`.
+
+
+.. _core_element_build_phase:
+
+Build Phase
+~~~~~~~~~~~
+The following methods are the foundation of the element's *build
+phase*, they must be implemented by all Element classes, unless
+explicitly stated otherwise.
+
+* :func:`Element.configure_sandbox() <buildstream.element.Element.configure_sandbox>`
+
+ Configures the :class:`.Sandbox`. This is called before anything else
+
+* :func:`Element.stage() <buildstream.element.Element.stage>`
+
+ Stage dependencies and :class:`Sources <buildstream.source.Source>` into
+ the sandbox.
+
+* :func:`Element.prepare() <buildstream.element.Element.prepare>`
+
+ Call preparation methods that should only be performed once in the
+ lifetime of a build directory (e.g. autotools' ./configure).
+
+ **Optional**: If left unimplemented, this step will be skipped.
+
+* :func:`Element.assemble() <buildstream.element.Element.assemble>`
+
+ Perform the actual assembly of the element
+
+
+Miscellaneous
+~~~~~~~~~~~~~
+Miscellaneous abstract methods also exist:
+
+* :func:`Element.generate_script() <buildstream.element.Element.generate_script>`
+
+ For the purpose of ``bst source checkout --include-build-scripts``, an Element may optionally implement this.
+
+
+Class Reference
+---------------
+"""
+
+import os
+import re
+import stat
+import copy
+from collections import OrderedDict
+from collections.abc import Mapping
+import contextlib
+from contextlib import contextmanager
+from functools import partial
+from itertools import chain
+import tempfile
+import string
+
+from pyroaring import BitMap # pylint: disable=no-name-in-module
+
+from . import _yaml
+from ._variables import Variables
+from ._versions import BST_CORE_ARTIFACT_VERSION
+from ._exceptions import BstError, LoadError, LoadErrorReason, ImplError, \
+ ErrorDomain, SourceCacheError
+from .utils import UtilError
+from . import utils
+from . import _cachekey
+from . import _signals
+from . import _site
+from ._platform import Platform
+from .plugin import Plugin
+from .sandbox import SandboxFlags, SandboxCommandError
+from .sandbox._config import SandboxConfig
+from .sandbox._sandboxremote import SandboxRemote
+from .types import Consistency, CoreWarnings, Scope, _KeyStrength, _UniquePriorityQueue
+from ._artifact import Artifact
+
+from .storage.directory import Directory
+from .storage._filebaseddirectory import FileBasedDirectory
+from .storage._casbaseddirectory import CasBasedDirectory
+from .storage.directory import VirtualDirectoryError
+
+
+class ElementError(BstError):
+ """This exception should be raised by :class:`.Element` implementations
+ to report errors to the user.
+
+ Args:
+ message (str): The error message to report to the user
+ detail (str): A possibly multiline, more detailed error message
+ reason (str): An optional machine readable reason string, used for test cases
+ collect (str): An optional directory containing partial install contents
+ temporary (bool): An indicator to whether the error may occur if the operation was run again. (*Since: 1.2*)
+ """
+ def __init__(self, message, *, detail=None, reason=None, collect=None, temporary=False):
+ super().__init__(message, detail=detail, domain=ErrorDomain.ELEMENT, reason=reason, temporary=temporary)
+
+ self.collect = collect
+
+
+class Element(Plugin):
+ """Element()
+
+ Base Element class.
+
+ All elements derive from this class, this interface defines how
+ the core will be interacting with Elements.
+ """
+ __defaults = None # The defaults from the yaml file and project
+ __instantiated_elements = {} # A hash of Element by MetaElement
+ __redundant_source_refs = [] # A list of (source, ref) tuples which were redundantly specified
+
+ BST_ARTIFACT_VERSION = 0
+ """The element plugin's artifact version
+
+ Elements must first set this to 1 if they change their unique key
+ structure in a way that would produce a different key for the
+ same input, or introduce a change in the build output for the
+ same unique key. Further changes of this nature require bumping the
+ artifact version.
+ """
+
+ BST_STRICT_REBUILD = False
+ """Whether to rebuild this element in non strict mode if
+ any of the dependencies have changed.
+ """
+
+ BST_FORBID_RDEPENDS = False
+ """Whether to raise exceptions if an element has runtime dependencies.
+
+ *Since: 1.2*
+ """
+
+ BST_FORBID_BDEPENDS = False
+ """Whether to raise exceptions if an element has build dependencies.
+
+ *Since: 1.2*
+ """
+
+ BST_FORBID_SOURCES = False
+ """Whether to raise exceptions if an element has sources.
+
+ *Since: 1.2*
+ """
+
+ BST_VIRTUAL_DIRECTORY = False
+ """Whether to raise exceptions if an element uses Sandbox.get_directory
+ instead of Sandbox.get_virtual_directory.
+
+ *Since: 1.4*
+ """
+
+ BST_RUN_COMMANDS = True
+ """Whether the element may run commands using Sandbox.run.
+
+ *Since: 1.4*
+ """
+
+ def __init__(self, context, project, meta, plugin_conf):
+
+ self.__cache_key_dict = None # Dict for cache key calculation
+ self.__cache_key = None # Our cached cache key
+
+ super().__init__(meta.name, context, project, meta.provenance, "element")
+
+ # Ensure the project is fully loaded here rather than later on
+ if not meta.is_junction:
+ project.ensure_fully_loaded()
+
+ self.normal_name = _get_normal_name(self.name)
+ """A normalized element name
+
+ This is the original element without path separators or
+ the extension, it's used mainly for composing log file names
+ and creating directory names and such.
+ """
+
+ self.__runtime_dependencies = [] # Direct runtime dependency Elements
+ self.__build_dependencies = [] # Direct build dependency Elements
+ self.__reverse_dependencies = set() # Direct reverse dependency Elements
+ self.__ready_for_runtime = False # Wether the element has all its dependencies ready and has a cache key
+ self.__sources = [] # List of Sources
+ self.__weak_cache_key = None # Our cached weak cache key
+ self.__strict_cache_key = None # Our cached cache key for strict builds
+ self.__artifacts = context.artifactcache # Artifact cache
+ self.__sourcecache = context.sourcecache # Source cache
+ self.__consistency = Consistency.INCONSISTENT # Cached overall consistency state
+ self.__assemble_scheduled = False # Element is scheduled to be assembled
+ self.__assemble_done = False # Element is assembled
+ self.__tracking_scheduled = False # Sources are scheduled to be tracked
+ self.__tracking_done = False # Sources have been tracked
+ self.__pull_done = False # Whether pull was attempted
+ self.__splits = None # Resolved regex objects for computing split domains
+ self.__whitelist_regex = None # Resolved regex object to check if file is allowed to overlap
+ self.__staged_sources_directory = None # Location where Element.stage_sources() was called
+ self.__tainted = None # Whether the artifact is tainted and should not be shared
+ self.__required = False # Whether the artifact is required in the current session
+ self.__artifact_files_required = False # Whether artifact files are required in the local cache
+ self.__build_result = None # The result of assembling this Element (success, description, detail)
+ self._build_log_path = None # The path of the build log for this Element
+ self.__artifact = None # Artifact class for direct artifact composite interaction
+ self.__strict_artifact = None # Artifact for strict cache key
+
+ # the index of the last source in this element that requires previous
+ # sources for staging
+ self.__last_source_requires_previous_ix = None
+
+ self.__batch_prepare_assemble = False # Whether batching across prepare()/assemble() is configured
+ self.__batch_prepare_assemble_flags = 0 # Sandbox flags for batching across prepare()/assemble()
+ self.__batch_prepare_assemble_collect = None # Collect dir for batching across prepare()/assemble()
+
+ # Ensure we have loaded this class's defaults
+ self.__init_defaults(project, plugin_conf, meta.kind, meta.is_junction)
+
+ # Collect the composited variables and resolve them
+ variables = self.__extract_variables(project, meta)
+ _yaml.node_set(variables, 'element-name', self.name)
+ self.__variables = Variables(variables)
+
+ # Collect the composited environment now that we have variables
+ unexpanded_env = self.__extract_environment(project, meta)
+ self.__environment = self.__expand_environment(unexpanded_env)
+
+ # Collect the environment nocache blacklist list
+ nocache = self.__extract_env_nocache(project, meta)
+ self.__env_nocache = nocache
+
+ # Grab public domain data declared for this instance
+ unexpanded_public = self.__extract_public(meta)
+ self.__public = self.__expand_splits(unexpanded_public)
+ self.__dynamic_public = None
+
+ # Collect the composited element configuration and
+ # ask the element to configure itself.
+ self.__config = self.__extract_config(meta)
+ self._configure(self.__config)
+
+ # Extract remote execution URL
+ if meta.is_junction:
+ self.__remote_execution_specs = None
+ else:
+ self.__remote_execution_specs = project.remote_execution_specs
+
+ # Extract Sandbox config
+ self.__sandbox_config = self.__extract_sandbox_config(project, meta)
+
+ self.__sandbox_config_supported = True
+ if not self.__use_remote_execution():
+ platform = Platform.get_platform()
+ if not platform.check_sandbox_config(self.__sandbox_config):
+ # Local sandbox does not fully support specified sandbox config.
+ # This will taint the artifact, disable pushing.
+ self.__sandbox_config_supported = False
+
+ def __lt__(self, other):
+ return self.name < other.name
+
+ #############################################################
+ # Abstract Methods #
+ #############################################################
+ def configure_sandbox(self, sandbox):
+ """Configures the the sandbox for execution
+
+ Args:
+ sandbox (:class:`.Sandbox`): The build sandbox
+
+ Raises:
+ (:class:`.ElementError`): When the element raises an error
+
+ Elements must implement this method to configure the sandbox object
+ for execution.
+ """
+ raise ImplError("element plugin '{kind}' does not implement configure_sandbox()".format(
+ kind=self.get_kind()))
+
+ def stage(self, sandbox):
+ """Stage inputs into the sandbox directories
+
+ Args:
+ sandbox (:class:`.Sandbox`): The build sandbox
+
+ Raises:
+ (:class:`.ElementError`): When the element raises an error
+
+ Elements must implement this method to populate the sandbox
+ directory with data. This is done either by staging :class:`.Source`
+ objects, by staging the artifacts of the elements this element depends
+ on, or both.
+ """
+ raise ImplError("element plugin '{kind}' does not implement stage()".format(
+ kind=self.get_kind()))
+
+ def prepare(self, sandbox):
+ """Run one-off preparation commands.
+
+ This is run before assemble(), but is guaranteed to run only
+ the first time if we build incrementally - this makes it
+ possible to run configure-like commands without causing the
+ entire element to rebuild.
+
+ Args:
+ sandbox (:class:`.Sandbox`): The build sandbox
+
+ Raises:
+ (:class:`.ElementError`): When the element raises an error
+
+ By default, this method does nothing, but may be overriden to
+ allow configure-like commands.
+
+ *Since: 1.2*
+ """
+
+ def assemble(self, sandbox):
+ """Assemble the output artifact
+
+ Args:
+ sandbox (:class:`.Sandbox`): The build sandbox
+
+ Returns:
+ (str): An absolute path within the sandbox to collect the artifact from
+
+ Raises:
+ (:class:`.ElementError`): When the element raises an error
+
+ Elements must implement this method to create an output
+ artifact from its sources and dependencies.
+ """
+ raise ImplError("element plugin '{kind}' does not implement assemble()".format(
+ kind=self.get_kind()))
+
+ def generate_script(self):
+ """Generate a build (sh) script to build this element
+
+ Returns:
+ (str): A string containing the shell commands required to build the element
+
+ BuildStream guarantees the following environment when the
+ generated script is run:
+
+ - All element variables have been exported.
+ - The cwd is `self.get_variable('build-root')/self.normal_name`.
+ - $PREFIX is set to `self.get_variable('install-root')`.
+ - The directory indicated by $PREFIX is an empty directory.
+
+ Files are expected to be installed to $PREFIX.
+
+ If the script fails, it is expected to return with an exit
+ code != 0.
+ """
+ raise ImplError("element plugin '{kind}' does not implement write_script()".format(
+ kind=self.get_kind()))
+
+ #############################################################
+ # Public Methods #
+ #############################################################
+ def sources(self):
+ """A generator function to enumerate the element sources
+
+ Yields:
+ (:class:`.Source`): The sources of this element
+ """
+ for source in self.__sources:
+ yield source
+
+ def dependencies(self, scope, *, recurse=True, visited=None):
+ """dependencies(scope, *, recurse=True)
+
+ A generator function which yields the dependencies of the given element.
+
+ If `recurse` is specified (the default), the full dependencies will be listed
+ in deterministic staging order, starting with the basemost elements in the
+ given `scope`. Otherwise, if `recurse` is not specified then only the direct
+ dependencies in the given `scope` will be traversed, and the element itself
+ will be omitted.
+
+ Args:
+ scope (:class:`.Scope`): The scope to iterate in
+ recurse (bool): Whether to recurse
+
+ Yields:
+ (:class:`.Element`): The dependencies in `scope`, in deterministic staging order
+ """
+ # The format of visited is (BitMap(), BitMap()), with the first BitMap
+ # containing element that have been visited for the `Scope.BUILD` case
+ # and the second one relating to the `Scope.RUN` case.
+ if not recurse:
+ if scope in (Scope.BUILD, Scope.ALL):
+ yield from self.__build_dependencies
+ if scope in (Scope.RUN, Scope.ALL):
+ yield from self.__runtime_dependencies
+ else:
+ def visit(element, scope, visited):
+ if scope == Scope.ALL:
+ visited[0].add(element._unique_id)
+ visited[1].add(element._unique_id)
+
+ for dep in chain(element.__build_dependencies, element.__runtime_dependencies):
+ if dep._unique_id not in visited[0] and dep._unique_id not in visited[1]:
+ yield from visit(dep, Scope.ALL, visited)
+
+ yield element
+ elif scope == Scope.BUILD:
+ visited[0].add(element._unique_id)
+
+ for dep in element.__build_dependencies:
+ if dep._unique_id not in visited[1]:
+ yield from visit(dep, Scope.RUN, visited)
+
+ elif scope == Scope.RUN:
+ visited[1].add(element._unique_id)
+
+ for dep in element.__runtime_dependencies:
+ if dep._unique_id not in visited[1]:
+ yield from visit(dep, Scope.RUN, visited)
+
+ yield element
+ else:
+ yield element
+
+ if visited is None:
+ # Visited is of the form (Visited for Scope.BUILD, Visited for Scope.RUN)
+ visited = (BitMap(), BitMap())
+ else:
+ # We have already a visited set passed. we might be able to short-circuit
+ if scope in (Scope.BUILD, Scope.ALL) and self._unique_id in visited[0]:
+ return
+ if scope in (Scope.RUN, Scope.ALL) and self._unique_id in visited[1]:
+ return
+
+ yield from visit(self, scope, visited)
+
+ def search(self, scope, name):
+ """Search for a dependency by name
+
+ Args:
+ scope (:class:`.Scope`): The scope to search
+ name (str): The dependency to search for
+
+ Returns:
+ (:class:`.Element`): The dependency element, or None if not found.
+ """
+ for dep in self.dependencies(scope):
+ if dep.name == name:
+ return dep
+
+ return None
+
+ def node_subst_member(self, node, member_name, default=_yaml._sentinel):
+ """Fetch the value of a string node member, substituting any variables
+ in the loaded value with the element contextual variables.
+
+ Args:
+ node (dict): A dictionary loaded from YAML
+ member_name (str): The name of the member to fetch
+ default (str): A value to return when *member_name* is not specified in *node*
+
+ Returns:
+ The value of *member_name* in *node*, otherwise *default*
+
+ Raises:
+ :class:`.LoadError`: When *member_name* is not found and no *default* was provided
+
+ This is essentially the same as :func:`~buildstream.plugin.Plugin.node_get_member`
+ except that it assumes the expected type is a string and will also perform variable
+ substitutions.
+
+ **Example:**
+
+ .. code:: python
+
+ # Expect a string 'name' in 'node', substituting any
+ # variables in the returned string
+ name = self.node_subst_member(node, 'name')
+ """
+ value = self.node_get_member(node, str, member_name, default)
+ try:
+ return self.__variables.subst(value)
+ except LoadError as e:
+ provenance = _yaml.node_get_provenance(node, key=member_name)
+ raise LoadError(e.reason, '{}: {}'.format(provenance, e), detail=e.detail) from e
+
+ def node_subst_list(self, node, member_name):
+ """Fetch a list from a node member, substituting any variables in the list
+
+ Args:
+ node (dict): A dictionary loaded from YAML
+ member_name (str): The name of the member to fetch (a list)
+
+ Returns:
+ The list in *member_name*
+
+ Raises:
+ :class:`.LoadError`
+
+ This is essentially the same as :func:`~buildstream.plugin.Plugin.node_get_member`
+ except that it assumes the expected type is a list of strings and will also
+ perform variable substitutions.
+ """
+ value = self.node_get_member(node, list, member_name)
+ ret = []
+ for index, x in enumerate(value):
+ try:
+ ret.append(self.__variables.subst(x))
+ except LoadError as e:
+ provenance = _yaml.node_get_provenance(node, key=member_name, indices=[index])
+ raise LoadError(e.reason, '{}: {}'.format(provenance, e), detail=e.detail) from e
+ return ret
+
+ def node_subst_list_element(self, node, member_name, indices):
+ """Fetch the value of a list element from a node member, substituting any variables
+ in the loaded value with the element contextual variables.
+
+ Args:
+ node (dict): A dictionary loaded from YAML
+ member_name (str): The name of the member to fetch
+ indices (list of int): List of indices to search, in case of nested lists
+
+ Returns:
+ The value of the list element in *member_name* at the specified *indices*
+
+ Raises:
+ :class:`.LoadError`
+
+ This is essentially the same as :func:`~buildstream.plugin.Plugin.node_get_list_element`
+ except that it assumes the expected type is a string and will also perform variable
+ substitutions.
+
+ **Example:**
+
+ .. code:: python
+
+ # Fetch the list itself
+ strings = self.node_get_member(node, list, 'strings')
+
+ # Iterate over the list indices
+ for i in range(len(strings)):
+
+ # Fetch the strings in this list, substituting content
+ # with our element's variables if needed
+ string = self.node_subst_list_element(
+ node, 'strings', [ i ])
+ """
+ value = self.node_get_list_element(node, str, member_name, indices)
+ try:
+ return self.__variables.subst(value)
+ except LoadError as e:
+ provenance = _yaml.node_get_provenance(node, key=member_name, indices=indices)
+ raise LoadError(e.reason, '{}: {}'.format(provenance, e), detail=e.detail) from e
+
+ def compute_manifest(self, *, include=None, exclude=None, orphans=True):
+ """Compute and return this element's selective manifest
+
+ The manifest consists on the list of file paths in the
+ artifact. The files in the manifest are selected according to
+ `include`, `exclude` and `orphans` parameters. If `include` is
+ not specified then all files spoken for by any domain are
+ included unless explicitly excluded with an `exclude` domain.
+
+ Args:
+ include (list): An optional list of domains to include files from
+ exclude (list): An optional list of domains to exclude files from
+ orphans (bool): Whether to include files not spoken for by split domains
+
+ Yields:
+ (str): The paths of the files in manifest
+ """
+ self.__assert_cached()
+ return self.__compute_splits(include, exclude, orphans)
+
+ def get_artifact_name(self, key=None):
+ """Compute and return this element's full artifact name
+
+ Generate a full name for an artifact, including the project
+ namespace, element name and cache key.
+
+ This can also be used as a relative path safely, and
+ will normalize parts of the element name such that only
+ digits, letters and some select characters are allowed.
+
+ Args:
+ key (str): The element's cache key. Defaults to None
+
+ Returns:
+ (str): The relative path for the artifact
+ """
+ project = self._get_project()
+ if key is None:
+ key = self._get_cache_key()
+
+ assert key is not None
+
+ return _compose_artifact_name(project.name, self.normal_name, key)
+
+ def stage_artifact(self, sandbox, *, path=None, include=None, exclude=None, orphans=True, update_mtimes=None):
+ """Stage this element's output artifact in the sandbox
+
+ This will stage the files from the artifact to the sandbox at specified location.
+ The files are selected for staging according to the `include`, `exclude` and `orphans`
+ parameters; if `include` is not specified then all files spoken for by any domain
+ are included unless explicitly excluded with an `exclude` domain.
+
+ Args:
+ sandbox (:class:`.Sandbox`): The build sandbox
+ path (str): An optional sandbox relative path
+ include (list): An optional list of domains to include files from
+ exclude (list): An optional list of domains to exclude files from
+ orphans (bool): Whether to include files not spoken for by split domains
+ update_mtimes (list): An optional list of files whose mtimes to set to the current time.
+
+ Raises:
+ (:class:`.ElementError`): If the element has not yet produced an artifact.
+
+ Returns:
+ (:class:`~.utils.FileListResult`): The result describing what happened while staging
+
+ .. note::
+
+ Directories in `dest` are replaced with files from `src`,
+ unless the existing directory in `dest` is not empty in
+ which case the path will be reported in the return value.
+
+ **Example:**
+
+ .. code:: python
+
+ # Stage the dependencies for a build of 'self'
+ for dep in self.dependencies(Scope.BUILD):
+ dep.stage_artifact(sandbox)
+ """
+
+ if not self._cached():
+ detail = "No artifacts have been cached yet for that element\n" + \
+ "Try building the element first with `bst build`\n"
+ raise ElementError("No artifacts to stage",
+ detail=detail, reason="uncached-checkout-attempt")
+
+ if update_mtimes is None:
+ update_mtimes = []
+
+ # Time to use the artifact, check once more that it's there
+ self.__assert_cached()
+
+ with self.timed_activity("Staging {}/{}".format(self.name, self._get_brief_display_key())):
+ files_vdir = self.__artifact.get_files()
+
+ # Hard link it into the staging area
+ #
+ vbasedir = sandbox.get_virtual_directory()
+ vstagedir = vbasedir \
+ if path is None \
+ else vbasedir.descend(*path.lstrip(os.sep).split(os.sep))
+
+ split_filter = self.__split_filter_func(include, exclude, orphans)
+
+ # We must not hardlink files whose mtimes we want to update
+ if update_mtimes:
+ def link_filter(path):
+ return ((split_filter is None or split_filter(path)) and
+ path not in update_mtimes)
+
+ def copy_filter(path):
+ return ((split_filter is None or split_filter(path)) and
+ path in update_mtimes)
+ else:
+ link_filter = split_filter
+
+ result = vstagedir.import_files(files_vdir, filter_callback=link_filter,
+ report_written=True, can_link=True)
+
+ if update_mtimes:
+ copy_result = vstagedir.import_files(files_vdir, filter_callback=copy_filter,
+ report_written=True, update_mtime=True)
+ result = result.combine(copy_result)
+
+ return result
+
+ def stage_dependency_artifacts(self, sandbox, scope, *, path=None,
+ include=None, exclude=None, orphans=True):
+ """Stage element dependencies in scope
+
+ This is primarily a convenience wrapper around
+ :func:`Element.stage_artifact() <buildstream.element.Element.stage_artifact>`
+ which takes care of staging all the dependencies in `scope` and issueing the
+ appropriate warnings.
+
+ Args:
+ sandbox (:class:`.Sandbox`): The build sandbox
+ scope (:class:`.Scope`): The scope to stage dependencies in
+ path (str): An optional sandbox relative path
+ include (list): An optional list of domains to include files from
+ exclude (list): An optional list of domains to exclude files from
+ orphans (bool): Whether to include files not spoken for by split domains
+
+ Raises:
+ (:class:`.ElementError`): If any of the dependencies in `scope` have not
+ yet produced artifacts, or if forbidden overlaps
+ occur.
+ """
+ ignored = {}
+ overlaps = OrderedDict()
+ files_written = {}
+ old_dep_keys = None
+ workspace = self._get_workspace()
+ context = self._get_context()
+
+ if self.__can_build_incrementally() and workspace.last_successful:
+
+ # Try to perform an incremental build if the last successful
+ # build is still in the artifact cache
+ #
+ if self.__artifacts.contains(self, workspace.last_successful):
+ last_successful = Artifact(self, context, strong_key=workspace.last_successful)
+ # Get a dict of dependency strong keys
+ old_dep_keys = last_successful.get_metadata_dependencies()
+ else:
+ # Last successful build is no longer in the artifact cache,
+ # so let's reset it and perform a full build now.
+ workspace.prepared = False
+ workspace.last_successful = None
+
+ self.info("Resetting workspace state, last successful build is no longer in the cache")
+
+ # In case we are staging in the main process
+ if utils._is_main_process():
+ context.get_workspaces().save_config()
+
+ for dep in self.dependencies(scope):
+ # If we are workspaced, and we therefore perform an
+ # incremental build, we must ensure that we update the mtimes
+ # of any files created by our dependencies since the last
+ # successful build.
+ to_update = None
+ if workspace and old_dep_keys:
+ dep.__assert_cached()
+
+ if dep.name in old_dep_keys:
+ key_new = dep._get_cache_key()
+ key_old = old_dep_keys[dep.name]
+
+ # We only need to worry about modified and added
+ # files, since removed files will be picked up by
+ # build systems anyway.
+ to_update, _, added = self.__artifacts.diff(dep, key_old, key_new)
+ workspace.add_running_files(dep.name, to_update + added)
+ to_update.extend(workspace.running_files[dep.name])
+
+ # In case we are running `bst shell`, this happens in the
+ # main process and we need to update the workspace config
+ if utils._is_main_process():
+ context.get_workspaces().save_config()
+
+ result = dep.stage_artifact(sandbox,
+ path=path,
+ include=include,
+ exclude=exclude,
+ orphans=orphans,
+ update_mtimes=to_update)
+ if result.overwritten:
+ for overwrite in result.overwritten:
+ # Completely new overwrite
+ if overwrite not in overlaps:
+ # Find the overwritten element by checking where we've
+ # written the element before
+ for elm, contents in files_written.items():
+ if overwrite in contents:
+ overlaps[overwrite] = [elm, dep.name]
+ else:
+ overlaps[overwrite].append(dep.name)
+ files_written[dep.name] = result.files_written
+
+ if result.ignored:
+ ignored[dep.name] = result.ignored
+
+ if overlaps:
+ overlap_warning = False
+ warning_detail = "Staged files overwrite existing files in staging area:\n"
+ for f, elements in overlaps.items():
+ overlap_warning_elements = []
+ # The bottom item overlaps nothing
+ overlapping_elements = elements[1:]
+ for elm in overlapping_elements:
+ element = self.search(scope, elm)
+ if not element.__file_is_whitelisted(f):
+ overlap_warning_elements.append(elm)
+ overlap_warning = True
+
+ warning_detail += _overlap_error_detail(f, overlap_warning_elements, elements)
+
+ if overlap_warning:
+ self.warn("Non-whitelisted overlaps detected", detail=warning_detail,
+ warning_token=CoreWarnings.OVERLAPS)
+
+ if ignored:
+ detail = "Not staging files which would replace non-empty directories:\n"
+ for key, value in ignored.items():
+ detail += "\nFrom {}:\n".format(key)
+ detail += " " + " ".join(["/" + f + "\n" for f in value])
+ self.warn("Ignored files", detail=detail)
+
+ def integrate(self, sandbox):
+ """Integrate currently staged filesystem against this artifact.
+
+ Args:
+ sandbox (:class:`.Sandbox`): The build sandbox
+
+ This modifies the sysroot staged inside the sandbox so that
+ the sysroot is *integrated*. Only an *integrated* sandbox
+ may be trusted for running the software therein, as the integration
+ commands will create and update important system cache files
+ required for running the installed software (such as the ld.so.cache).
+ """
+ bstdata = self.get_public_data('bst')
+ environment = self.get_environment()
+
+ if bstdata is not None:
+ with sandbox.batch(SandboxFlags.NONE):
+ commands = self.node_get_member(bstdata, list, 'integration-commands', [])
+ for i in range(len(commands)):
+ cmd = self.node_subst_list_element(bstdata, 'integration-commands', [i])
+
+ sandbox.run(['sh', '-e', '-c', cmd], 0, env=environment, cwd='/',
+ label=cmd)
+
+ def stage_sources(self, sandbox, directory):
+ """Stage this element's sources to a directory in the sandbox
+
+ Args:
+ sandbox (:class:`.Sandbox`): The build sandbox
+ directory (str): An absolute path within the sandbox to stage the sources at
+ """
+
+ # Hold on to the location where a plugin decided to stage sources,
+ # this will be used to reconstruct the failed sysroot properly
+ # after a failed build.
+ #
+ assert self.__staged_sources_directory is None
+ self.__staged_sources_directory = directory
+
+ self._stage_sources_in_sandbox(sandbox, directory)
+
+ def get_public_data(self, domain):
+ """Fetch public data on this element
+
+ Args:
+ domain (str): A public domain name to fetch data for
+
+ Returns:
+ (dict): The public data dictionary for the given domain
+
+ .. note::
+
+ This can only be called the abstract methods which are
+ called as a part of the :ref:`build phase <core_element_build_phase>`
+ and never before.
+ """
+ if self.__dynamic_public is None:
+ self.__load_public_data()
+
+ data = _yaml.node_get(self.__dynamic_public, Mapping, domain, default_value=None)
+ if data is not None:
+ data = _yaml.node_copy(data)
+
+ return data
+
+ def set_public_data(self, domain, data):
+ """Set public data on this element
+
+ Args:
+ domain (str): A public domain name to fetch data for
+ data (dict): The public data dictionary for the given domain
+
+ This allows an element to dynamically mutate public data of
+ elements or add new domains as the result of success completion
+ of the :func:`Element.assemble() <buildstream.element.Element.assemble>`
+ method.
+ """
+ if self.__dynamic_public is None:
+ self.__load_public_data()
+
+ if data is not None:
+ data = _yaml.node_copy(data)
+
+ _yaml.node_set(self.__dynamic_public, domain, data)
+
+ def get_environment(self):
+ """Fetch the environment suitable for running in the sandbox
+
+ Returns:
+ (dict): A dictionary of string key/values suitable for passing
+ to :func:`Sandbox.run() <buildstream.sandbox.Sandbox.run>`
+ """
+ return _yaml.node_sanitize(self.__environment)
+
+ def get_variable(self, varname):
+ """Fetch the value of a variable resolved for this element.
+
+ Args:
+ varname (str): The name of the variable to fetch
+
+ Returns:
+ (str): The resolved value for *varname*, or None if no
+ variable was declared with the given name.
+ """
+ return self.__variables.flat.get(varname)
+
+ def batch_prepare_assemble(self, flags, *, collect=None):
+ """ Configure command batching across prepare() and assemble()
+
+ Args:
+ flags (:class:`.SandboxFlags`): The sandbox flags for the command batch
+ collect (str): An optional directory containing partial install contents
+ on command failure.
+
+ This may be called in :func:`Element.configure_sandbox() <buildstream.element.Element.configure_sandbox>`
+ to enable batching of all sandbox commands issued in prepare() and assemble().
+ """
+ if self.__batch_prepare_assemble:
+ raise ElementError("{}: Command batching for prepare/assemble is already configured".format(self))
+
+ self.__batch_prepare_assemble = True
+ self.__batch_prepare_assemble_flags = flags
+ self.__batch_prepare_assemble_collect = collect
+
+ #############################################################
+ # Private Methods used in BuildStream #
+ #############################################################
+
+ # _new_from_meta():
+ #
+ # Recursively instantiate a new Element instance, its sources
+ # and its dependencies from a meta element.
+ #
+ # Args:
+ # meta (MetaElement): The meta element
+ #
+ # Returns:
+ # (Element): A newly created Element instance
+ #
+ @classmethod
+ def _new_from_meta(cls, meta):
+
+ if not meta.first_pass:
+ meta.project.ensure_fully_loaded()
+
+ if meta in cls.__instantiated_elements:
+ return cls.__instantiated_elements[meta]
+
+ element = meta.project.create_element(meta, first_pass=meta.first_pass)
+ cls.__instantiated_elements[meta] = element
+
+ # Instantiate sources and generate their keys
+ for meta_source in meta.sources:
+ meta_source.first_pass = meta.is_junction
+ source = meta.project.create_source(meta_source,
+ first_pass=meta.first_pass)
+
+ redundant_ref = source._load_ref()
+ element.__sources.append(source)
+
+ # Collect redundant refs which occurred at load time
+ if redundant_ref is not None:
+ cls.__redundant_source_refs.append((source, redundant_ref))
+
+ # Instantiate dependencies
+ for meta_dep in meta.dependencies:
+ dependency = Element._new_from_meta(meta_dep)
+ element.__runtime_dependencies.append(dependency)
+ dependency.__reverse_dependencies.add(element)
+
+ for meta_dep in meta.build_dependencies:
+ dependency = Element._new_from_meta(meta_dep)
+ element.__build_dependencies.append(dependency)
+ dependency.__reverse_dependencies.add(element)
+
+ return element
+
+ # _clear_meta_elements_cache()
+ #
+ # Clear the internal meta elements cache.
+ #
+ # When loading elements from meta, we cache already instantiated elements
+ # in order to not have to load the same elements twice.
+ # This clears the cache.
+ #
+ # It should be called whenever we are done loading all elements in order
+ # to save memory.
+ #
+ @classmethod
+ def _clear_meta_elements_cache(cls):
+ cls.__instantiated_elements = {}
+
+ # _get_redundant_source_refs()
+ #
+ # Fetches a list of (Source, ref) tuples of all the Sources
+ # which were loaded with a ref specified in the element declaration
+ # for projects which use project.refs ref-storage.
+ #
+ # This is used to produce a warning
+ @classmethod
+ def _get_redundant_source_refs(cls):
+ return cls.__redundant_source_refs
+
+ # _reset_load_state()
+ #
+ # This is called by Pipeline.cleanup() and is used to
+ # reset the loader state between multiple sessions.
+ #
+ @classmethod
+ def _reset_load_state(cls):
+ cls.__instantiated_elements = {}
+ cls.__redundant_source_refs = []
+
+ # _get_consistency()
+ #
+ # Returns cached consistency state
+ #
+ def _get_consistency(self):
+ return self.__consistency
+
+ # _cached():
+ #
+ # Returns:
+ # (bool): Whether this element is already present in
+ # the artifact cache
+ #
+ def _cached(self):
+ if not self.__artifact:
+ return False
+
+ return self.__artifact.cached()
+
+ # _get_build_result():
+ #
+ # Returns:
+ # (bool): Whether the artifact of this element present in the artifact cache is of a success
+ # (str): Short description of the result
+ # (str): Detailed description of the result
+ #
+ def _get_build_result(self):
+ if self.__build_result is None:
+ self.__load_build_result()
+
+ return self.__build_result
+
+ # __set_build_result():
+ #
+ # Sets the assembly result
+ #
+ # Args:
+ # success (bool): Whether the result is a success
+ # description (str): Short description of the result
+ # detail (str): Detailed description of the result
+ #
+ def __set_build_result(self, success, description, detail=None):
+ self.__build_result = (success, description, detail)
+
+ # _cached_success():
+ #
+ # Returns:
+ # (bool): Whether this element is already present in
+ # the artifact cache and the element assembled successfully
+ #
+ def _cached_success(self):
+ if not self._cached():
+ return False
+
+ success, _, _ = self._get_build_result()
+ return success
+
+ # _cached_failure():
+ #
+ # Returns:
+ # (bool): Whether this element is already present in
+ # the artifact cache and the element did not assemble successfully
+ #
+ def _cached_failure(self):
+ if not self._cached():
+ return False
+
+ success, _, _ = self._get_build_result()
+ return not success
+
+ # _buildable():
+ #
+ # Returns:
+ # (bool): Whether this element can currently be built
+ #
+ def _buildable(self):
+ if self._get_consistency() < Consistency.CACHED and \
+ not self._source_cached():
+ return False
+
+ for dependency in self.dependencies(Scope.BUILD):
+ # In non-strict mode an element's strong cache key may not be available yet
+ # even though an artifact is available in the local cache. This can happen
+ # if the pull job is still pending as the remote cache may have an artifact
+ # that matches the strict cache key, which is preferred over a locally
+ # cached artifact with a weak cache key match.
+ if not dependency._cached_success() or not dependency._get_cache_key(strength=_KeyStrength.STRONG):
+ return False
+
+ if not self.__assemble_scheduled:
+ return False
+
+ return True
+
+ # _get_cache_key():
+ #
+ # Returns the cache key
+ #
+ # Args:
+ # strength (_KeyStrength): Either STRONG or WEAK key strength
+ #
+ # Returns:
+ # (str): A hex digest cache key for this Element, or None
+ #
+ # None is returned if information for the cache key is missing.
+ #
+ def _get_cache_key(self, strength=_KeyStrength.STRONG):
+ if strength == _KeyStrength.STRONG:
+ return self.__cache_key
+ else:
+ return self.__weak_cache_key
+
+ # _can_query_cache():
+ #
+ # Returns whether the cache key required for cache queries is available.
+ #
+ # Returns:
+ # (bool): True if cache can be queried
+ #
+ def _can_query_cache(self):
+ # If build has already been scheduled, we know that the element is
+ # not cached and thus can allow cache query even if the strict cache key
+ # is not available yet.
+ # This special case is required for workspaced elements to prevent
+ # them from getting blocked in the pull queue.
+ if self.__assemble_scheduled:
+ return True
+
+ # cache cannot be queried until strict cache key is available
+ return self.__strict_cache_key is not None
+
+ # _update_state()
+ #
+ # Keep track of element state. Calculate cache keys if possible and
+ # check whether artifacts are cached.
+ #
+ # This must be called whenever the state of an element may have changed.
+ #
+ def _update_state(self):
+ context = self._get_context()
+
+ # Compute and determine consistency of sources
+ self.__update_source_state()
+
+ if self._get_consistency() == Consistency.INCONSISTENT:
+ # Tracking may still be pending
+ return
+
+ if self._get_workspace() and self.__assemble_scheduled:
+ # If we have an active workspace and are going to build, then
+ # discard current cache key values as their correct values can only
+ # be calculated once the build is complete
+ self.__reset_cache_data()
+ return
+
+ self.__update_cache_keys()
+ self.__update_artifact_state()
+
+ # Workspaced sources are considered unstable if a build is pending
+ # as the build will modify the contents of the workspace.
+ # Determine as early as possible if a build is pending to discard
+ # unstable cache keys.
+ # Also, uncached workspaced elements must be assembled so we can know
+ # the cache key.
+ if (not self.__assemble_scheduled and not self.__assemble_done and
+ self.__artifact and
+ (self._is_required() or self._get_workspace()) and
+ not self._cached_success() and
+ not self._pull_pending()):
+ self._schedule_assemble()
+ return
+
+ if not context.get_strict():
+ self.__update_cache_key_non_strict()
+
+ if not self.__ready_for_runtime and self.__cache_key is not None:
+ self.__ready_for_runtime = all(
+ dep.__ready_for_runtime for dep in self.__runtime_dependencies)
+
+ # _get_display_key():
+ #
+ # Returns cache keys for display purposes
+ #
+ # Returns:
+ # (str): A full hex digest cache key for this Element
+ # (str): An abbreviated hex digest cache key for this Element
+ # (bool): True if key should be shown as dim, False otherwise
+ #
+ # Question marks are returned if information for the cache key is missing.
+ #
+ def _get_display_key(self):
+ context = self._get_context()
+ dim_key = True
+
+ cache_key = self._get_cache_key()
+
+ if not cache_key:
+ cache_key = "{:?<64}".format('')
+ elif self._get_cache_key() == self.__strict_cache_key:
+ # Strong cache key used in this session matches cache key
+ # that would be used in strict build mode
+ dim_key = False
+
+ length = min(len(cache_key), context.log_key_length)
+ return (cache_key, cache_key[0:length], dim_key)
+
+ # _get_brief_display_key()
+ #
+ # Returns an abbreviated cache key for display purposes
+ #
+ # Returns:
+ # (str): An abbreviated hex digest cache key for this Element
+ #
+ # Question marks are returned if information for the cache key is missing.
+ #
+ def _get_brief_display_key(self):
+ _, display_key, _ = self._get_display_key()
+ return display_key
+
+ # _preflight():
+ #
+ # A wrapper for calling the abstract preflight() method on
+ # the element and its sources.
+ #
+ def _preflight(self):
+
+ if self.BST_FORBID_RDEPENDS and self.BST_FORBID_BDEPENDS:
+ if any(self.dependencies(Scope.RUN, recurse=False)) or any(self.dependencies(Scope.BUILD, recurse=False)):
+ raise ElementError("{}: Dependencies are forbidden for '{}' elements"
+ .format(self, self.get_kind()), reason="element-forbidden-depends")
+
+ if self.BST_FORBID_RDEPENDS:
+ if any(self.dependencies(Scope.RUN, recurse=False)):
+ raise ElementError("{}: Runtime dependencies are forbidden for '{}' elements"
+ .format(self, self.get_kind()), reason="element-forbidden-rdepends")
+
+ if self.BST_FORBID_BDEPENDS:
+ if any(self.dependencies(Scope.BUILD, recurse=False)):
+ raise ElementError("{}: Build dependencies are forbidden for '{}' elements"
+ .format(self, self.get_kind()), reason="element-forbidden-bdepends")
+
+ if self.BST_FORBID_SOURCES:
+ if any(self.sources()):
+ raise ElementError("{}: Sources are forbidden for '{}' elements"
+ .format(self, self.get_kind()), reason="element-forbidden-sources")
+
+ try:
+ self.preflight()
+ except BstError as e:
+ # Prepend provenance to the error
+ raise ElementError("{}: {}".format(self, e), reason=e.reason, detail=e.detail) from e
+
+ # Ensure that the first source does not need access to previous soruces
+ if self.__sources and self.__sources[0]._requires_previous_sources():
+ raise ElementError("{}: {} cannot be the first source of an element "
+ "as it requires access to previous sources"
+ .format(self, self.__sources[0]))
+
+ # Preflight the sources
+ for source in self.sources():
+ source._preflight()
+
+ # _schedule_tracking():
+ #
+ # Force an element state to be inconsistent. Any sources appear to be
+ # inconsistent.
+ #
+ # This is used across the pipeline in sessions where the
+ # elements in question are going to be tracked, causing the
+ # pipeline to rebuild safely by ensuring cache key recalculation
+ # and reinterrogation of element state after tracking of elements
+ # succeeds.
+ #
+ def _schedule_tracking(self):
+ self.__tracking_scheduled = True
+
+ # _tracking_done():
+ #
+ # This is called in the main process after the element has been tracked
+ #
+ def _tracking_done(self):
+ assert self.__tracking_scheduled
+
+ self.__tracking_scheduled = False
+ self.__tracking_done = True
+
+ self.__update_state_recursively()
+
+ # _track():
+ #
+ # Calls track() on the Element sources
+ #
+ # Raises:
+ # SourceError: If one of the element sources has an error
+ #
+ # Returns:
+ # (list): A list of Source object ids and their new references
+ #
+ def _track(self):
+ refs = []
+ for index, source in enumerate(self.__sources):
+ old_ref = source.get_ref()
+ new_ref = source._track(self.__sources[0:index])
+ refs.append((source._unique_id, new_ref))
+
+ # Complimentary warning that the new ref will be unused.
+ if old_ref != new_ref and self._get_workspace():
+ detail = "This source has an open workspace.\n" \
+ + "To start using the new reference, please close the existing workspace."
+ source.warn("Updated reference will be ignored as source has open workspace", detail=detail)
+
+ return refs
+
+ # _prepare_sandbox():
+ #
+ # This stages things for either _shell() (below) or also
+ # is used to stage things by the `bst artifact checkout` codepath
+ #
+ @contextmanager
+ def _prepare_sandbox(self, scope, directory, shell=False, integrate=True, usebuildtree=False):
+ # bst shell and bst artifact checkout require a local sandbox.
+ bare_directory = bool(directory)
+ with self.__sandbox(directory, config=self.__sandbox_config, allow_remote=False,
+ bare_directory=bare_directory) as sandbox:
+ sandbox._usebuildtree = usebuildtree
+
+ # Configure always comes first, and we need it.
+ self.__configure_sandbox(sandbox)
+
+ # Stage something if we need it
+ if not directory:
+ if shell and scope == Scope.BUILD:
+ self.stage(sandbox)
+ else:
+ # Stage deps in the sandbox root
+ with self.timed_activity("Staging dependencies", silent_nested=True):
+ self.stage_dependency_artifacts(sandbox, scope)
+
+ # Run any integration commands provided by the dependencies
+ # once they are all staged and ready
+ if integrate:
+ with self.timed_activity("Integrating sandbox"):
+ for dep in self.dependencies(scope):
+ dep.integrate(sandbox)
+
+ yield sandbox
+
+ # _stage_sources_in_sandbox():
+ #
+ # Stage this element's sources to a directory inside sandbox
+ #
+ # Args:
+ # sandbox (:class:`.Sandbox`): The build sandbox
+ # directory (str): An absolute path to stage the sources at
+ # mount_workspaces (bool): mount workspaces if True, copy otherwise
+ #
+ def _stage_sources_in_sandbox(self, sandbox, directory, mount_workspaces=True):
+
+ # Only artifact caches that implement diff() are allowed to
+ # perform incremental builds.
+ if mount_workspaces and self.__can_build_incrementally():
+ workspace = self._get_workspace()
+ sandbox.mark_directory(directory)
+ sandbox._set_mount_source(directory, workspace.get_absolute_path())
+
+ # Stage all sources that need to be copied
+ sandbox_vroot = sandbox.get_virtual_directory()
+ host_vdirectory = sandbox_vroot.descend(*directory.lstrip(os.sep).split(os.sep), create=True)
+ self._stage_sources_at(host_vdirectory, mount_workspaces=mount_workspaces, usebuildtree=sandbox._usebuildtree)
+
+ # _stage_sources_at():
+ #
+ # Stage this element's sources to a directory
+ #
+ # Args:
+ # vdirectory (:class:`.storage.Directory`): A virtual directory object to stage sources into.
+ # mount_workspaces (bool): mount workspaces if True, copy otherwise
+ # usebuildtree (bool): use a the elements build tree as its source.
+ #
+ def _stage_sources_at(self, vdirectory, mount_workspaces=True, usebuildtree=False):
+
+ context = self._get_context()
+
+ # It's advantageous to have this temporary directory on
+ # the same file system as the rest of our cache.
+ with self.timed_activity("Staging sources", silent_nested=True), \
+ utils._tempdir(dir=context.tmpdir, prefix='staging-temp') as temp_staging_directory:
+
+ import_dir = temp_staging_directory
+
+ if not isinstance(vdirectory, Directory):
+ vdirectory = FileBasedDirectory(vdirectory)
+ if not vdirectory.is_empty():
+ raise ElementError("Staging directory '{}' is not empty".format(vdirectory))
+
+ workspace = self._get_workspace()
+ if workspace:
+ # If mount_workspaces is set and we're doing incremental builds,
+ # the workspace is already mounted into the sandbox.
+ if not (mount_workspaces and self.__can_build_incrementally()):
+ with self.timed_activity("Staging local files at {}"
+ .format(workspace.get_absolute_path())):
+ workspace.stage(import_dir)
+
+ # Check if we have a cached buildtree to use
+ elif usebuildtree:
+ import_dir = self.__artifact.get_buildtree()
+ if import_dir.is_empty():
+ detail = "Element type either does not expect a buildtree or it was explictily cached without one."
+ self.warn("WARNING: {} Artifact contains an empty buildtree".format(self.name), detail=detail)
+
+ # No workspace or cached buildtree, stage source from source cache
+ else:
+ # Ensure sources are cached
+ self.__cache_sources()
+
+ if self.__sources:
+
+ sourcecache = context.sourcecache
+ # find last required source
+ last_required_previous_ix = self.__last_source_requires_previous()
+ import_dir = CasBasedDirectory(context.get_cascache())
+
+ try:
+ for source in self.__sources[last_required_previous_ix:]:
+ source_dir = sourcecache.export(source)
+ import_dir.import_files(source_dir)
+ except SourceCacheError as e:
+ raise ElementError("Error trying to export source for {}: {}"
+ .format(self.name, e))
+ except VirtualDirectoryError as e:
+ raise ElementError("Error trying to import sources together for {}: {}"
+ .format(self.name, e),
+ reason="import-source-files-fail")
+
+ with utils._deterministic_umask():
+ vdirectory.import_files(import_dir)
+
+ # Ensure deterministic mtime of sources at build time
+ vdirectory.set_deterministic_mtime()
+ # Ensure deterministic owners of sources at build time
+ vdirectory.set_deterministic_user()
+
+ # _set_required():
+ #
+ # Mark this element and its runtime dependencies as required.
+ # This unblocks pull/fetch/build.
+ #
+ def _set_required(self):
+ if self.__required:
+ # Already done
+ return
+
+ self.__required = True
+
+ # Request artifacts of runtime dependencies
+ for dep in self.dependencies(Scope.RUN, recurse=False):
+ dep._set_required()
+
+ self._update_state()
+
+ # _is_required():
+ #
+ # Returns whether this element has been marked as required.
+ #
+ def _is_required(self):
+ return self.__required
+
+ # _set_artifact_files_required():
+ #
+ # Mark artifact files for this element and its runtime dependencies as
+ # required in the local cache.
+ #
+ def _set_artifact_files_required(self):
+ if self.__artifact_files_required:
+ # Already done
+ return
+
+ self.__artifact_files_required = True
+
+ # Request artifact files of runtime dependencies
+ for dep in self.dependencies(Scope.RUN, recurse=False):
+ dep._set_artifact_files_required()
+
+ # _artifact_files_required():
+ #
+ # Returns whether artifact files for this element have been marked as required.
+ #
+ def _artifact_files_required(self):
+ return self.__artifact_files_required
+
+ # _schedule_assemble():
+ #
+ # This is called in the main process before the element is assembled
+ # in a subprocess.
+ #
+ def _schedule_assemble(self):
+ assert not self.__assemble_scheduled
+ self.__assemble_scheduled = True
+
+ # Requests artifacts of build dependencies
+ for dep in self.dependencies(Scope.BUILD, recurse=False):
+ dep._set_required()
+
+ self._set_required()
+
+ # Invalidate workspace key as the build modifies the workspace directory
+ workspace = self._get_workspace()
+ if workspace:
+ workspace.invalidate_key()
+
+ self._update_state()
+
+ # _assemble_done():
+ #
+ # This is called in the main process after the element has been assembled
+ # and in the a subprocess after assembly completes.
+ #
+ # This will result in updating the element state.
+ #
+ def _assemble_done(self):
+ assert self.__assemble_scheduled
+
+ self.__assemble_scheduled = False
+ self.__assemble_done = True
+
+ self.__update_state_recursively()
+
+ if self._get_workspace() and self._cached_success():
+ assert utils._is_main_process(), \
+ "Attempted to save workspace configuration from child process"
+ #
+ # Note that this block can only happen in the
+ # main process, since `self._cached_success()` cannot
+ # be true when assembly is successful in the task.
+ #
+ # For this reason, it is safe to update and
+ # save the workspaces configuration
+ #
+ key = self._get_cache_key()
+ workspace = self._get_workspace()
+ workspace.last_successful = key
+ workspace.clear_running_files()
+ self._get_context().get_workspaces().save_config()
+
+ # This element will have already been marked as
+ # required, but we bump the atime again, in case
+ # we did not know the cache key until now.
+ #
+ # FIXME: This is not exactly correct, we should be
+ # doing this at the time which we have discovered
+ # a new cache key, this just happens to be the
+ # last place where that can happen.
+ #
+ # Ultimately, we should be refactoring
+ # Element._update_state() such that we know
+ # when a cache key is actually discovered.
+ #
+ self.__artifacts.mark_required_elements([self])
+
+ # _assemble():
+ #
+ # Internal method for running the entire build phase.
+ #
+ # This will:
+ # - Prepare a sandbox for the build
+ # - Call the public abstract methods for the build phase
+ # - Cache the resulting artifact
+ #
+ # Returns:
+ # (int): The size of the newly cached artifact
+ #
+ def _assemble(self):
+
+ # Assert call ordering
+ assert not self._cached_success()
+
+ context = self._get_context()
+ with self._output_file() as output_file:
+
+ if not self.__sandbox_config_supported:
+ self.warn("Sandbox configuration is not supported by the platform.",
+ detail="Falling back to UID {} GID {}. Artifact will not be pushed."
+ .format(self.__sandbox_config.build_uid, self.__sandbox_config.build_gid))
+
+ # Explicitly clean it up, keep the build dir around if exceptions are raised
+ os.makedirs(context.builddir, exist_ok=True)
+ rootdir = tempfile.mkdtemp(prefix="{}-".format(self.normal_name), dir=context.builddir)
+
+ # Cleanup the build directory on explicit SIGTERM
+ def cleanup_rootdir():
+ utils._force_rmtree(rootdir)
+
+ with _signals.terminator(cleanup_rootdir), \
+ self.__sandbox(rootdir, output_file, output_file, self.__sandbox_config) as sandbox: # noqa
+
+ # Let the sandbox know whether the buildtree will be required.
+ # This allows the remote execution sandbox to skip buildtree
+ # download when it's not needed.
+ buildroot = self.get_variable('build-root')
+ cache_buildtrees = context.cache_buildtrees
+ if cache_buildtrees != 'never':
+ always_cache_buildtrees = cache_buildtrees == 'always'
+ sandbox._set_build_directory(buildroot, always=always_cache_buildtrees)
+
+ if not self.BST_RUN_COMMANDS:
+ # Element doesn't need to run any commands in the sandbox.
+ #
+ # Disable Sandbox.run() to allow CasBasedDirectory for all
+ # sandboxes.
+ sandbox._disable_run()
+
+ # By default, the dynamic public data is the same as the static public data.
+ # The plugin's assemble() method may modify this, though.
+ self.__dynamic_public = _yaml.node_copy(self.__public)
+
+ # Call the abstract plugin methods
+
+ # Step 1 - Configure
+ self.__configure_sandbox(sandbox)
+ # Step 2 - Stage
+ self.stage(sandbox)
+ try:
+ if self.__batch_prepare_assemble:
+ cm = sandbox.batch(self.__batch_prepare_assemble_flags,
+ collect=self.__batch_prepare_assemble_collect)
+ else:
+ cm = contextlib.suppress()
+
+ with cm:
+ # Step 3 - Prepare
+ self.__prepare(sandbox)
+ # Step 4 - Assemble
+ collect = self.assemble(sandbox) # pylint: disable=assignment-from-no-return
+
+ self.__set_build_result(success=True, description="succeeded")
+ except (ElementError, SandboxCommandError) as e:
+ # Shelling into a sandbox is useful to debug this error
+ e.sandbox = True
+
+ # If there is a workspace open on this element, it will have
+ # been mounted for sandbox invocations instead of being staged.
+ #
+ # In order to preserve the correct failure state, we need to
+ # copy over the workspace files into the appropriate directory
+ # in the sandbox.
+ #
+ workspace = self._get_workspace()
+ if workspace and self.__staged_sources_directory:
+ sandbox_vroot = sandbox.get_virtual_directory()
+ path_components = self.__staged_sources_directory.lstrip(os.sep).split(os.sep)
+ sandbox_vpath = sandbox_vroot.descend(*path_components)
+ try:
+ sandbox_vpath.import_files(workspace.get_absolute_path())
+ except UtilError as e2:
+ self.warn("Failed to preserve workspace state for failed build sysroot: {}"
+ .format(e2))
+
+ self.__set_build_result(success=False, description=str(e), detail=e.detail)
+ self._cache_artifact(rootdir, sandbox, e.collect)
+
+ raise
+ else:
+ return self._cache_artifact(rootdir, sandbox, collect)
+ finally:
+ cleanup_rootdir()
+
+ def _cache_artifact(self, rootdir, sandbox, collect):
+
+ context = self._get_context()
+ buildresult = self.__build_result
+ publicdata = self.__dynamic_public
+ sandbox_vroot = sandbox.get_virtual_directory()
+ collectvdir = None
+ sandbox_build_dir = None
+
+ cache_buildtrees = context.cache_buildtrees
+ build_success = buildresult[0]
+
+ # cache_buildtrees defaults to 'auto', only caching buildtrees
+ # when necessary, which includes failed builds.
+ # If only caching failed artifact buildtrees, then query the build
+ # result. Element types without a build-root dir will be cached
+ # with an empty buildtreedir regardless of this configuration.
+
+ if cache_buildtrees == 'always' or (cache_buildtrees == 'auto' and not build_success):
+ try:
+ sandbox_build_dir = sandbox_vroot.descend(
+ *self.get_variable('build-root').lstrip(os.sep).split(os.sep))
+ except VirtualDirectoryError:
+ # Directory could not be found. Pre-virtual
+ # directory behaviour was to continue silently
+ # if the directory could not be found.
+ pass
+
+ if collect is not None:
+ try:
+ collectvdir = sandbox_vroot.descend(*collect.lstrip(os.sep).split(os.sep))
+ except VirtualDirectoryError:
+ pass
+
+ # ensure we have cache keys
+ self._assemble_done()
+
+ with self.timed_activity("Caching artifact"):
+ artifact_size = self.__artifact.cache(rootdir, sandbox_build_dir, collectvdir,
+ buildresult, publicdata)
+
+ if collect is not None and collectvdir is None:
+ raise ElementError(
+ "Directory '{}' was not found inside the sandbox, "
+ "unable to collect artifact contents"
+ .format(collect))
+
+ return artifact_size
+
+ def _get_build_log(self):
+ return self._build_log_path
+
+ # _fetch_done()
+ #
+ # Indicates that fetching the sources for this element has been done.
+ #
+ def _fetch_done(self):
+ # We are not updating the state recursively here since fetching can
+ # never end up in updating them.
+
+ # Fetching changes the source state from RESOLVED to CACHED
+ # Fetching cannot change the source state from INCONSISTENT to CACHED because
+ # we prevent fetching when it's INCONSISTENT.
+ # Therefore, only the source state will change.
+ self.__update_source_state()
+
+ # _pull_pending()
+ #
+ # Check whether the artifact will be pulled. If the pull operation is to
+ # include a specific subdir of the element artifact (from cli or user conf)
+ # then the local cache is queried for the subdirs existence.
+ #
+ # Returns:
+ # (bool): Whether a pull operation is pending
+ #
+ def _pull_pending(self):
+ if self._get_workspace():
+ # Workspace builds are never pushed to artifact servers
+ return False
+
+ # Check whether the pull has been invoked with a specific subdir requested
+ # in user context, as to complete a partial artifact
+ pull_buildtrees = self._get_context().pull_buildtrees
+
+ if self.__strict_artifact:
+ if self.__strict_artifact.cached() and pull_buildtrees:
+ # If we've specified a subdir, check if the subdir is cached locally
+ # or if it's possible to get
+ if self._cached_buildtree() or not self._buildtree_exists():
+ return False
+ elif self.__strict_artifact.cached():
+ return False
+
+ # Pull is pending if artifact remote server available
+ # and pull has not been attempted yet
+ return self.__artifacts.has_fetch_remotes(plugin=self) and not self.__pull_done
+
+ # _pull_done()
+ #
+ # Indicate that pull was attempted.
+ #
+ # This needs to be called in the main process after a pull
+ # succeeds or fails so that we properly update the main
+ # process data model
+ #
+ # This will result in updating the element state.
+ #
+ def _pull_done(self):
+ self.__pull_done = True
+
+ self.__update_state_recursively()
+
+ # _pull():
+ #
+ # Pull artifact from remote artifact repository into local artifact cache.
+ #
+ # Returns: True if the artifact has been downloaded, False otherwise
+ #
+ def _pull(self):
+ context = self._get_context()
+
+ # Get optional specific subdir to pull and optional list to not pull
+ # based off of user context
+ pull_buildtrees = context.pull_buildtrees
+
+ # Attempt to pull artifact without knowing whether it's available
+ pulled = self.__pull_strong(pull_buildtrees=pull_buildtrees)
+
+ if not pulled and not self._cached() and not context.get_strict():
+ pulled = self.__pull_weak(pull_buildtrees=pull_buildtrees)
+
+ if not pulled:
+ return False
+
+ # Notify successfull download
+ return True
+
+ def _skip_source_push(self):
+ if not self.__sources or self._get_workspace():
+ return True
+ return not (self.__sourcecache.has_push_remotes(plugin=self) and
+ self._source_cached())
+
+ def _source_push(self):
+ # try and push sources if we've got them
+ if self.__sourcecache.has_push_remotes(plugin=self) and self._source_cached():
+ for source in self.sources():
+ if not self.__sourcecache.push(source):
+ return False
+
+ # Notify successful upload
+ return True
+
+ # _skip_push():
+ #
+ # Determine whether we should create a push job for this element.
+ #
+ # Returns:
+ # (bool): True if this element does not need a push job to be created
+ #
+ def _skip_push(self):
+ if not self.__artifacts.has_push_remotes(plugin=self):
+ # No push remotes for this element's project
+ return True
+
+ # Do not push elements that aren't cached, or that are cached with a dangling buildtree
+ # ref unless element type is expected to have an an empty buildtree directory
+ if not self._cached_buildtree() and self._buildtree_exists():
+ return True
+
+ # Do not push tainted artifact
+ if self.__get_tainted():
+ return True
+
+ return False
+
+ # _push():
+ #
+ # Push locally cached artifact to remote artifact repository.
+ #
+ # Returns:
+ # (bool): True if the remote was updated, False if it already existed
+ # and no updated was required
+ #
+ def _push(self):
+ self.__assert_cached()
+
+ if self.__get_tainted():
+ self.warn("Not pushing tainted artifact.")
+ return False
+
+ # Push all keys used for local commit via the Artifact member
+ pushed = self.__artifacts.push(self, self.__artifact)
+ if not pushed:
+ return False
+
+ # Notify successful upload
+ return True
+
+ # _shell():
+ #
+ # Connects the terminal with a shell running in a staged
+ # environment
+ #
+ # Args:
+ # scope (Scope): Either BUILD or RUN scopes are valid, or None
+ # directory (str): A directory to an existing sandbox, or None
+ # mounts (list): A list of (str, str) tuples, representing host/target paths to mount
+ # isolate (bool): Whether to isolate the environment like we do in builds
+ # prompt (str): A suitable prompt string for PS1
+ # command (list): An argv to launch in the sandbox
+ # usebuildtree (bool): Use the buildtree as its source
+ #
+ # Returns: Exit code
+ #
+ # If directory is not specified, one will be staged using scope
+ def _shell(self, scope=None, directory=None, *, mounts=None, isolate=False, prompt=None, command=None,
+ usebuildtree=False):
+
+ with self._prepare_sandbox(scope, directory, shell=True, usebuildtree=usebuildtree) as sandbox:
+ environment = self.get_environment()
+ environment = copy.copy(environment)
+ flags = SandboxFlags.INTERACTIVE | SandboxFlags.ROOT_READ_ONLY
+
+ # Fetch the main toplevel project, in case this is a junctioned
+ # subproject, we want to use the rules defined by the main one.
+ context = self._get_context()
+ project = context.get_toplevel_project()
+ shell_command, shell_environment, shell_host_files = project.get_shell_config()
+
+ if prompt is not None:
+ environment['PS1'] = prompt
+
+ # Special configurations for non-isolated sandboxes
+ if not isolate:
+
+ # Open the network, and reuse calling uid/gid
+ #
+ flags |= SandboxFlags.NETWORK_ENABLED | SandboxFlags.INHERIT_UID
+
+ # Apply project defined environment vars to set for a shell
+ for key, value in _yaml.node_items(shell_environment):
+ environment[key] = value
+
+ # Setup any requested bind mounts
+ if mounts is None:
+ mounts = []
+
+ for mount in shell_host_files + mounts:
+ if not os.path.exists(mount.host_path):
+ if not mount.optional:
+ self.warn("Not mounting non-existing host file: {}".format(mount.host_path))
+ else:
+ sandbox.mark_directory(mount.path)
+ sandbox._set_mount_source(mount.path, mount.host_path)
+
+ if command:
+ argv = [arg for arg in command]
+ else:
+ argv = shell_command
+
+ self.status("Running command", detail=" ".join(argv))
+
+ # Run shells with network enabled and readonly root.
+ return sandbox.run(argv, flags, env=environment)
+
+ # _open_workspace():
+ #
+ # "Open" a workspace for this element
+ #
+ # This requires that a workspace already be created in
+ # the workspaces metadata first.
+ #
+ def _open_workspace(self):
+ context = self._get_context()
+ workspace = self._get_workspace()
+ assert workspace is not None
+
+ # First lets get a temp dir in our build directory
+ # and stage there, then link the files over to the desired
+ # path.
+ #
+ # We do this so that force opening workspaces which overwrites
+ # files in the target directory actually works without any
+ # additional support from Source implementations.
+ #
+ os.makedirs(context.builddir, exist_ok=True)
+ with utils._tempdir(dir=context.builddir, prefix='workspace-{}'
+ .format(self.normal_name)) as temp:
+ for source in self.sources():
+ source._init_workspace(temp)
+
+ # Now hardlink the files into the workspace target.
+ utils.link_files(temp, workspace.get_absolute_path())
+
+ # _get_workspace():
+ #
+ # Returns:
+ # (Workspace|None): A workspace associated with this element
+ #
+ def _get_workspace(self):
+ workspaces = self._get_context().get_workspaces()
+ return workspaces.get_workspace(self._get_full_name())
+
+ # _write_script():
+ #
+ # Writes a script to the given directory.
+ def _write_script(self, directory):
+ with open(_site.build_module_template, "r") as f:
+ script_template = f.read()
+
+ variable_string = ""
+ for var, val in self.get_environment().items():
+ variable_string += "{0}={1} ".format(var, val)
+
+ script = script_template.format(
+ name=self.normal_name,
+ build_root=self.get_variable('build-root'),
+ install_root=self.get_variable('install-root'),
+ variables=variable_string,
+ commands=self.generate_script()
+ )
+
+ os.makedirs(directory, exist_ok=True)
+ script_path = os.path.join(directory, "build-" + self.normal_name)
+
+ with self.timed_activity("Writing build script", silent_nested=True):
+ with utils.save_file_atomic(script_path, "w") as script_file:
+ script_file.write(script)
+
+ os.chmod(script_path, stat.S_IEXEC | stat.S_IREAD)
+
+ # _subst_string()
+ #
+ # Substitue a string, this is an internal function related
+ # to how junctions are loaded and needs to be more generic
+ # than the public node_subst_member()
+ #
+ # Args:
+ # value (str): A string value
+ #
+ # Returns:
+ # (str): The string after substitutions have occurred
+ #
+ def _subst_string(self, value):
+ return self.__variables.subst(value)
+
+ # Returns the element whose sources this element is ultimately derived from.
+ #
+ # This is intended for being used to redirect commands that operate on an
+ # element to the element whose sources it is ultimately derived from.
+ #
+ # For example, element A is a build element depending on source foo,
+ # element B is a filter element that depends on element A. The source
+ # element of B is A, since B depends on A, and A has sources.
+ #
+ def _get_source_element(self):
+ return self
+
+ # _cached_buildtree()
+ #
+ # Check if element artifact contains expected buildtree. An
+ # element's buildtree artifact will not be present if the rest
+ # of the partial artifact is not cached.
+ #
+ # Returns:
+ # (bool): True if artifact cached with buildtree, False if
+ # element not cached or missing expected buildtree.
+ # Note this only confirms if a buildtree is present,
+ # not its contents.
+ #
+ def _cached_buildtree(self):
+ if not self._cached():
+ return False
+
+ return self.__artifact.cached_buildtree()
+
+ # _buildtree_exists()
+ #
+ # Check if artifact was created with a buildtree. This does not check
+ # whether the buildtree is present in the local cache.
+ #
+ # Returns:
+ # (bool): True if artifact was created with buildtree, False if
+ # element not cached or not created with a buildtree.
+ #
+ def _buildtree_exists(self):
+ if not self._cached():
+ return False
+
+ return self.__artifact.buildtree_exists()
+
+ # _cached_logs()
+ #
+ # Check if the artifact is cached with log files.
+ #
+ # Returns:
+ # (bool): True if artifact is cached with logs, False if
+ # element not cached or missing logs.
+ #
+ def _cached_logs(self):
+ return self.__artifact.cached_logs()
+
+ # _fetch()
+ #
+ # Fetch the element's sources.
+ #
+ # Raises:
+ # SourceError: If one of the element sources has an error
+ #
+ def _fetch(self, fetch_original=False):
+ previous_sources = []
+ sources = self.__sources
+ fetch_needed = False
+ if sources and not fetch_original:
+ for source in self.__sources:
+ if self.__sourcecache.contains(source):
+ continue
+
+ # try and fetch from source cache
+ if source._get_consistency() < Consistency.CACHED and \
+ self.__sourcecache.has_fetch_remotes():
+ if self.__sourcecache.pull(source):
+ continue
+
+ fetch_needed = True
+
+ # We need to fetch original sources
+ if fetch_needed or fetch_original:
+ for source in self.sources():
+ source_consistency = source._get_consistency()
+ if source_consistency != Consistency.CACHED:
+ source._fetch(previous_sources)
+ previous_sources.append(source)
+
+ self.__cache_sources()
+
+ # _calculate_cache_key():
+ #
+ # Calculates the cache key
+ #
+ # Returns:
+ # (str): A hex digest cache key for this Element, or None
+ #
+ # None is returned if information for the cache key is missing.
+ #
+ def _calculate_cache_key(self, dependencies):
+ # No cache keys for dependencies which have no cache keys
+ if None in dependencies:
+ return None
+
+ # Generate dict that is used as base for all cache keys
+ if self.__cache_key_dict is None:
+ # Filter out nocache variables from the element's environment
+ cache_env = {
+ key: value
+ for key, value in self.__environment.items()
+ if key not in self.__env_nocache
+ }
+
+ context = self._get_context()
+ project = self._get_project()
+ workspace = self._get_workspace()
+
+ self.__cache_key_dict = {
+ 'artifact-version': "{}.{}".format(BST_CORE_ARTIFACT_VERSION,
+ self.BST_ARTIFACT_VERSION),
+ 'context': context.get_cache_key(),
+ 'project': project.get_cache_key(),
+ 'element': self.get_unique_key(),
+ 'execution-environment': self.__sandbox_config.get_unique_key(),
+ 'environment': cache_env,
+ 'sources': [s._get_unique_key(workspace is None) for s in self.__sources],
+ 'workspace': '' if workspace is None else workspace.get_key(self._get_project()),
+ 'public': self.__public,
+ 'cache': 'CASCache'
+ }
+
+ self.__cache_key_dict['fatal-warnings'] = sorted(project._fatal_warnings)
+
+ cache_key_dict = self.__cache_key_dict.copy()
+ cache_key_dict['dependencies'] = dependencies
+
+ return _cachekey.generate_key(cache_key_dict)
+
+ # Check if sources are cached, generating the source key if it hasn't been
+ def _source_cached(self):
+ if self.__sources:
+ sourcecache = self._get_context().sourcecache
+
+ # Go through sources we'll cache generating keys
+ for ix, source in enumerate(self.__sources):
+ if not source._key:
+ if source.BST_REQUIRES_PREVIOUS_SOURCES_STAGE:
+ source._generate_key(self.__sources[:ix])
+ else:
+ source._generate_key([])
+
+ # Check all sources are in source cache
+ for source in self.__sources:
+ if not sourcecache.contains(source):
+ return False
+
+ return True
+
+ def _should_fetch(self, fetch_original=False):
+ """ return bool of if we need to run the fetch stage for this element
+
+ Args:
+ fetch_original (bool): whether we need to original unstaged source
+ """
+ if (self._get_consistency() == Consistency.CACHED and fetch_original) or \
+ (self._source_cached() and not fetch_original):
+ return False
+ else:
+ return True
+
+ #############################################################
+ # Private Local Methods #
+ #############################################################
+
+ # __update_source_state()
+ #
+ # Updates source consistency state
+ #
+ # An element's source state must be resolved before it may compute
+ # cache keys, because the source's ref, whether defined in yaml or
+ # from the workspace, is a component of the element's cache keys.
+ #
+ def __update_source_state(self):
+
+ # Cannot resolve source state until tracked
+ if self.__tracking_scheduled:
+ return
+
+ self.__consistency = Consistency.CACHED
+ workspace = self._get_workspace()
+
+ # Special case for workspaces
+ if workspace:
+
+ # A workspace is considered inconsistent in the case
+ # that its directory went missing
+ #
+ fullpath = workspace.get_absolute_path()
+ if not os.path.exists(fullpath):
+ self.__consistency = Consistency.INCONSISTENT
+ else:
+
+ # Determine overall consistency of the element
+ for source in self.__sources:
+ source._update_state()
+ self.__consistency = min(self.__consistency, source._get_consistency())
+
+ # __can_build_incrementally()
+ #
+ # Check if the element can be built incrementally, this
+ # is used to decide how to stage things
+ #
+ # Returns:
+ # (bool): Whether this element can be built incrementally
+ #
+ def __can_build_incrementally(self):
+ return bool(self._get_workspace())
+
+ # __configure_sandbox():
+ #
+ # Internal method for calling public abstract configure_sandbox() method.
+ #
+ def __configure_sandbox(self, sandbox):
+ self.__batch_prepare_assemble = False
+
+ self.configure_sandbox(sandbox)
+
+ # __prepare():
+ #
+ # Internal method for calling public abstract prepare() method.
+ #
+ def __prepare(self, sandbox):
+ workspace = self._get_workspace()
+
+ # We need to ensure that the prepare() method is only called
+ # once in workspaces, because the changes will persist across
+ # incremental builds - not desirable, for example, in the case
+ # of autotools' `./configure`.
+ if not (workspace and workspace.prepared):
+ self.prepare(sandbox)
+
+ if workspace:
+ def mark_workspace_prepared():
+ workspace.prepared = True
+
+ # Defer workspace.prepared setting until pending batch commands
+ # have been executed.
+ sandbox._callback(mark_workspace_prepared)
+
+ # __assert_cached()
+ #
+ # Raises an error if the artifact is not cached.
+ #
+ def __assert_cached(self):
+ assert self._cached(), "{}: Missing artifact {}".format(
+ self, self._get_brief_display_key())
+
+ # __get_tainted():
+ #
+ # Checkes whether this artifact should be pushed to an artifact cache.
+ #
+ # Args:
+ # recalculate (bool) - Whether to force recalculation
+ #
+ # Returns:
+ # (bool) False if this artifact should be excluded from pushing.
+ #
+ # Note:
+ # This method should only be called after the element's
+ # artifact is present in the local artifact cache.
+ #
+ def __get_tainted(self, recalculate=False):
+ if recalculate or self.__tainted is None:
+
+ # Whether this artifact has a workspace
+ workspaced = self.__artifact.get_metadata_workspaced()
+
+ # Whether this artifact's dependencies have workspaces
+ workspaced_dependencies = self.__artifact.get_metadata_workspaced_dependencies()
+
+ # Other conditions should be or-ed
+ self.__tainted = (workspaced or workspaced_dependencies or
+ not self.__sandbox_config_supported)
+
+ return self.__tainted
+
+ # __use_remote_execution():
+ #
+ # Returns True if remote execution is configured and the element plugin
+ # supports it.
+ #
+ def __use_remote_execution(self):
+ return bool(self.__remote_execution_specs)
+
+ # __sandbox():
+ #
+ # A context manager to prepare a Sandbox object at the specified directory,
+ # if the directory is None, then a directory will be chosen automatically
+ # in the configured build directory.
+ #
+ # Args:
+ # directory (str): The local directory where the sandbox will live, or None
+ # stdout (fileobject): The stream for stdout for the sandbox
+ # stderr (fileobject): The stream for stderr for the sandbox
+ # config (SandboxConfig): The SandboxConfig object
+ # allow_remote (bool): Whether the sandbox is allowed to be remote
+ # bare_directory (bool): Whether the directory is bare i.e. doesn't have
+ # a separate 'root' subdir
+ #
+ # Yields:
+ # (Sandbox): A usable sandbox
+ #
+ @contextmanager
+ def __sandbox(self, directory, stdout=None, stderr=None, config=None, allow_remote=True, bare_directory=False):
+ context = self._get_context()
+ project = self._get_project()
+ platform = Platform.get_platform()
+
+ if directory is not None and allow_remote and self.__use_remote_execution():
+
+ if not self.BST_VIRTUAL_DIRECTORY:
+ raise ElementError("Element {} is configured to use remote execution but plugin does not support it."
+ .format(self.name), detail="Plugin '{kind}' does not support virtual directories."
+ .format(kind=self.get_kind()))
+
+ self.info("Using a remote sandbox for artifact {} with directory '{}'".format(self.name, directory))
+
+ output_files_required = context.require_artifact_files or self._artifact_files_required()
+
+ sandbox = SandboxRemote(context, project,
+ directory,
+ plugin=self,
+ stdout=stdout,
+ stderr=stderr,
+ config=config,
+ specs=self.__remote_execution_specs,
+ bare_directory=bare_directory,
+ allow_real_directory=False,
+ output_files_required=output_files_required)
+ yield sandbox
+
+ elif directory is not None and os.path.exists(directory):
+
+ sandbox = platform.create_sandbox(context, project,
+ directory,
+ plugin=self,
+ stdout=stdout,
+ stderr=stderr,
+ config=config,
+ bare_directory=bare_directory,
+ allow_real_directory=not self.BST_VIRTUAL_DIRECTORY)
+ yield sandbox
+
+ else:
+ os.makedirs(context.builddir, exist_ok=True)
+ rootdir = tempfile.mkdtemp(prefix="{}-".format(self.normal_name), dir=context.builddir)
+
+ # Recursive contextmanager...
+ with self.__sandbox(rootdir, stdout=stdout, stderr=stderr, config=config,
+ allow_remote=allow_remote, bare_directory=False) as sandbox:
+ yield sandbox
+
+ # Cleanup the build dir
+ utils._force_rmtree(rootdir)
+
+ @classmethod
+ def __compose_default_splits(cls, project, defaults, is_junction):
+
+ element_public = _yaml.node_get(defaults, Mapping, 'public', default_value={})
+ element_bst = _yaml.node_get(element_public, Mapping, 'bst', default_value={})
+ element_splits = _yaml.node_get(element_bst, Mapping, 'split-rules', default_value={})
+
+ if is_junction:
+ splits = _yaml.node_copy(element_splits)
+ else:
+ assert project._splits is not None
+
+ splits = _yaml.node_copy(project._splits)
+ # Extend project wide split rules with any split rules defined by the element
+ _yaml.composite(splits, element_splits)
+
+ _yaml.node_set(element_bst, 'split-rules', splits)
+ _yaml.node_set(element_public, 'bst', element_bst)
+ _yaml.node_set(defaults, 'public', element_public)
+
+ @classmethod
+ def __init_defaults(cls, project, plugin_conf, kind, is_junction):
+ # Defaults are loaded once per class and then reused
+ #
+ if cls.__defaults is None:
+ defaults = _yaml.new_empty_node()
+
+ if plugin_conf is not None:
+ # Load the plugin's accompanying .yaml file if one was provided
+ try:
+ defaults = _yaml.load(plugin_conf, os.path.basename(plugin_conf))
+ except LoadError as e:
+ if e.reason != LoadErrorReason.MISSING_FILE:
+ raise e
+
+ # Special case; compose any element-wide split-rules declarations
+ cls.__compose_default_splits(project, defaults, is_junction)
+
+ # Override the element's defaults with element specific
+ # overrides from the project.conf
+ if is_junction:
+ elements = project.first_pass_config.element_overrides
+ else:
+ elements = project.element_overrides
+
+ overrides = _yaml.node_get(elements, Mapping, kind, default_value=None)
+ if overrides:
+ _yaml.composite(defaults, overrides)
+
+ # Set the data class wide
+ cls.__defaults = defaults
+
+ # This will acquire the environment to be used when
+ # creating sandboxes for this element
+ #
+ @classmethod
+ def __extract_environment(cls, project, meta):
+ default_env = _yaml.node_get(cls.__defaults, Mapping, 'environment', default_value={})
+
+ if meta.is_junction:
+ environment = _yaml.new_empty_node()
+ else:
+ environment = _yaml.node_copy(project.base_environment)
+
+ _yaml.composite(environment, default_env)
+ _yaml.composite(environment, meta.environment)
+ _yaml.node_final_assertions(environment)
+
+ return environment
+
+ # This will resolve the final environment to be used when
+ # creating sandboxes for this element
+ #
+ def __expand_environment(self, environment):
+ # Resolve variables in environment value strings
+ final_env = {}
+ for key, _ in self.node_items(environment):
+ final_env[key] = self.node_subst_member(environment, key)
+
+ return final_env
+
+ @classmethod
+ def __extract_env_nocache(cls, project, meta):
+ if meta.is_junction:
+ project_nocache = []
+ else:
+ project_nocache = project.base_env_nocache
+
+ default_nocache = _yaml.node_get(cls.__defaults, list, 'environment-nocache', default_value=[])
+ element_nocache = meta.env_nocache
+
+ # Accumulate values from the element default, the project and the element
+ # itself to form a complete list of nocache env vars.
+ env_nocache = set(project_nocache + default_nocache + element_nocache)
+
+ # Convert back to list now we know they're unique
+ return list(env_nocache)
+
+ # This will resolve the final variables to be used when
+ # substituting command strings to be run in the sandbox
+ #
+ @classmethod
+ def __extract_variables(cls, project, meta):
+ default_vars = _yaml.node_get(cls.__defaults, Mapping, 'variables',
+ default_value={})
+
+ if meta.is_junction:
+ variables = _yaml.node_copy(project.first_pass_config.base_variables)
+ else:
+ variables = _yaml.node_copy(project.base_variables)
+
+ _yaml.composite(variables, default_vars)
+ _yaml.composite(variables, meta.variables)
+ _yaml.node_final_assertions(variables)
+
+ for var in ('project-name', 'element-name', 'max-jobs'):
+ provenance = _yaml.node_get_provenance(variables, var)
+ if provenance and not provenance.is_synthetic:
+ raise LoadError(LoadErrorReason.PROTECTED_VARIABLE_REDEFINED,
+ "{}: invalid redefinition of protected variable '{}'"
+ .format(provenance, var))
+
+ return variables
+
+ # This will resolve the final configuration to be handed
+ # off to element.configure()
+ #
+ @classmethod
+ def __extract_config(cls, meta):
+
+ # The default config is already composited with the project overrides
+ config = _yaml.node_get(cls.__defaults, Mapping, 'config', default_value={})
+ config = _yaml.node_copy(config)
+
+ _yaml.composite(config, meta.config)
+ _yaml.node_final_assertions(config)
+
+ return config
+
+ # Sandbox-specific configuration data, to be passed to the sandbox's constructor.
+ #
+ @classmethod
+ def __extract_sandbox_config(cls, project, meta):
+ if meta.is_junction:
+ sandbox_config = _yaml.new_node_from_dict({
+ 'build-uid': 0,
+ 'build-gid': 0
+ })
+ else:
+ sandbox_config = _yaml.node_copy(project._sandbox)
+
+ # Get the platform to ask for host architecture
+ platform = Platform.get_platform()
+ host_arch = platform.get_host_arch()
+ host_os = platform.get_host_os()
+
+ # The default config is already composited with the project overrides
+ sandbox_defaults = _yaml.node_get(cls.__defaults, Mapping, 'sandbox', default_value={})
+ sandbox_defaults = _yaml.node_copy(sandbox_defaults)
+
+ _yaml.composite(sandbox_config, sandbox_defaults)
+ _yaml.composite(sandbox_config, meta.sandbox)
+ _yaml.node_final_assertions(sandbox_config)
+
+ # Sandbox config, unlike others, has fixed members so we should validate them
+ _yaml.node_validate(sandbox_config, ['build-uid', 'build-gid', 'build-os', 'build-arch'])
+
+ build_arch = _yaml.node_get(sandbox_config, str, 'build-arch', default_value=None)
+ if build_arch:
+ build_arch = Platform.canonicalize_arch(build_arch)
+ else:
+ build_arch = host_arch
+
+ return SandboxConfig(
+ _yaml.node_get(sandbox_config, int, 'build-uid'),
+ _yaml.node_get(sandbox_config, int, 'build-gid'),
+ _yaml.node_get(sandbox_config, str, 'build-os', default_value=host_os),
+ build_arch)
+
+ # This makes a special exception for the split rules, which
+ # elements may extend but whos defaults are defined in the project.
+ #
+ @classmethod
+ def __extract_public(cls, meta):
+ base_public = _yaml.node_get(cls.__defaults, Mapping, 'public', default_value={})
+ base_public = _yaml.node_copy(base_public)
+
+ base_bst = _yaml.node_get(base_public, Mapping, 'bst', default_value={})
+ base_splits = _yaml.node_get(base_bst, Mapping, 'split-rules', default_value={})
+
+ element_public = _yaml.node_copy(meta.public)
+ element_bst = _yaml.node_get(element_public, Mapping, 'bst', default_value={})
+ element_splits = _yaml.node_get(element_bst, Mapping, 'split-rules', default_value={})
+
+ # Allow elements to extend the default splits defined in their project or
+ # element specific defaults
+ _yaml.composite(base_splits, element_splits)
+
+ _yaml.node_set(element_bst, 'split-rules', base_splits)
+ _yaml.node_set(element_public, 'bst', element_bst)
+
+ _yaml.node_final_assertions(element_public)
+
+ return element_public
+
+ # Expand the splits in the public data using the Variables in the element
+ def __expand_splits(self, element_public):
+ element_bst = _yaml.node_get(element_public, Mapping, 'bst', default_value={})
+ element_splits = _yaml.node_get(element_bst, Mapping, 'split-rules', default_value={})
+
+ # Resolve any variables in the public split rules directly
+ for domain, splits in self.node_items(element_splits):
+ splits = [
+ self.__variables.subst(split.strip())
+ for split in splits
+ ]
+ _yaml.node_set(element_splits, domain, splits)
+
+ return element_public
+
+ def __init_splits(self):
+ bstdata = self.get_public_data('bst')
+ splits = self.node_get_member(bstdata, dict, 'split-rules')
+ self.__splits = {
+ domain: re.compile('^(?:' + '|'.join([utils._glob2re(r) for r in rules]) + ')$')
+ for domain, rules in self.node_items(splits)
+ }
+
+ # __split_filter():
+ #
+ # Returns True if the file with the specified `path` is included in the
+ # specified split domains. This is used by `__split_filter_func()` to create
+ # a filter callback.
+ #
+ # Args:
+ # element_domains (list): All domains for this element
+ # include (list): A list of domains to include files from
+ # exclude (list): A list of domains to exclude files from
+ # orphans (bool): Whether to include files not spoken for by split domains
+ # path (str): The relative path of the file
+ #
+ # Returns:
+ # (bool): Whether to include the specified file
+ #
+ def __split_filter(self, element_domains, include, exclude, orphans, path):
+ # Absolute path is required for matching
+ filename = os.path.join(os.sep, path)
+
+ include_file = False
+ exclude_file = False
+ claimed_file = False
+
+ for domain in element_domains:
+ if self.__splits[domain].match(filename):
+ claimed_file = True
+ if domain in include:
+ include_file = True
+ if domain in exclude:
+ exclude_file = True
+
+ if orphans and not claimed_file:
+ include_file = True
+
+ return include_file and not exclude_file
+
+ # __split_filter_func():
+ #
+ # Returns callable split filter function for use with `copy_files()`,
+ # `link_files()` or `Directory.import_files()`.
+ #
+ # Args:
+ # include (list): An optional list of domains to include files from
+ # exclude (list): An optional list of domains to exclude files from
+ # orphans (bool): Whether to include files not spoken for by split domains
+ #
+ # Returns:
+ # (callable): Filter callback that returns True if the file is included
+ # in the specified split domains.
+ #
+ def __split_filter_func(self, include=None, exclude=None, orphans=True):
+ # No splitting requested, no filter needed
+ if orphans and not (include or exclude):
+ return None
+
+ if not self.__splits:
+ self.__init_splits()
+
+ element_domains = list(self.__splits.keys())
+ if not include:
+ include = element_domains
+ if not exclude:
+ exclude = []
+
+ # Ignore domains that dont apply to this element
+ #
+ include = [domain for domain in include if domain in element_domains]
+ exclude = [domain for domain in exclude if domain in element_domains]
+
+ # The arguments element_domains, include, exclude, and orphans are
+ # the same for all files. Use `partial` to create a function with
+ # the required callback signature: a single `path` parameter.
+ return partial(self.__split_filter, element_domains, include, exclude, orphans)
+
+ def __compute_splits(self, include=None, exclude=None, orphans=True):
+ filter_func = self.__split_filter_func(include=include, exclude=exclude, orphans=orphans)
+
+ files_vdir = self.__artifact.get_files()
+
+ element_files = files_vdir.list_relative_paths()
+
+ if not filter_func:
+ # No splitting requested, just report complete artifact
+ yield from element_files
+ else:
+ for filename in element_files:
+ if filter_func(filename):
+ yield filename
+
+ def __file_is_whitelisted(self, path):
+ # Considered storing the whitelist regex for re-use, but public data
+ # can be altered mid-build.
+ # Public data is not guaranteed to stay the same for the duration of
+ # the build, but I can think of no reason to change it mid-build.
+ # If this ever changes, things will go wrong unexpectedly.
+ if not self.__whitelist_regex:
+ bstdata = self.get_public_data('bst')
+ whitelist = _yaml.node_get(bstdata, list, 'overlap-whitelist', default_value=[])
+ whitelist_expressions = [utils._glob2re(self.__variables.subst(exp.strip())) for exp in whitelist]
+ expression = ('^(?:' + '|'.join(whitelist_expressions) + ')$')
+ self.__whitelist_regex = re.compile(expression)
+ return self.__whitelist_regex.match(os.path.join(os.sep, path))
+
+ # __load_public_data():
+ #
+ # Loads the public data from the cached artifact
+ #
+ def __load_public_data(self):
+ self.__assert_cached()
+ assert self.__dynamic_public is None
+
+ self.__dynamic_public = self.__artifact.load_public_data()
+
+ def __load_build_result(self):
+ self.__assert_cached()
+ assert self.__build_result is None
+
+ self.__build_result = self.__artifact.load_build_result()
+
+ # __pull_strong():
+ #
+ # Attempt pulling given element from configured artifact caches with
+ # the strict cache key
+ #
+ # Args:
+ # progress (callable): The progress callback, if any
+ # subdir (str): The optional specific subdir to pull
+ # excluded_subdirs (list): The optional list of subdirs to not pull
+ #
+ # Returns:
+ # (bool): Whether or not the pull was successful
+ #
+ def __pull_strong(self, *, pull_buildtrees):
+ weak_key = self._get_cache_key(strength=_KeyStrength.WEAK)
+ key = self.__strict_cache_key
+ if not self.__artifacts.pull(self, key, pull_buildtrees=pull_buildtrees):
+ return False
+
+ # update weak ref by pointing it to this newly fetched artifact
+ self.__artifacts.link_key(self, key, weak_key)
+
+ return True
+
+ # __pull_weak():
+ #
+ # Attempt pulling given element from configured artifact caches with
+ # the weak cache key
+ #
+ # Args:
+ # subdir (str): The optional specific subdir to pull
+ # excluded_subdirs (list): The optional list of subdirs to not pull
+ #
+ # Returns:
+ # (bool): Whether or not the pull was successful
+ #
+ def __pull_weak(self, *, pull_buildtrees):
+ weak_key = self._get_cache_key(strength=_KeyStrength.WEAK)
+ if not self.__artifacts.pull(self, weak_key,
+ pull_buildtrees=pull_buildtrees):
+ return False
+
+ # extract strong cache key from this newly fetched artifact
+ self._pull_done()
+
+ # create tag for strong cache key
+ key = self._get_cache_key(strength=_KeyStrength.STRONG)
+ self.__artifacts.link_key(self, weak_key, key)
+
+ return True
+
+ # __cache_sources():
+ #
+ # Caches the sources into the local CAS
+ #
+ def __cache_sources(self):
+ if self.__sources and not self._source_cached():
+ last_requires_previous = 0
+ # commit all other sources by themselves
+ for ix, source in enumerate(self.__sources):
+ if source.BST_REQUIRES_PREVIOUS_SOURCES_STAGE:
+ self.__sourcecache.commit(source, self.__sources[last_requires_previous:ix])
+ last_requires_previous = ix
+ else:
+ self.__sourcecache.commit(source, [])
+
+ # __last_source_requires_previous
+ #
+ # This is the last source that requires previous sources to be cached.
+ # Sources listed after this will be cached separately.
+ #
+ # Returns:
+ # (int): index of last source that requires previous sources
+ #
+ def __last_source_requires_previous(self):
+ if self.__last_source_requires_previous_ix is None:
+ last_requires_previous = 0
+ for ix, source in enumerate(self.__sources):
+ if source.BST_REQUIRES_PREVIOUS_SOURCES_STAGE:
+ last_requires_previous = ix
+ self.__last_source_requires_previous_ix = last_requires_previous
+ return self.__last_source_requires_previous_ix
+
+ # __update_state_recursively()
+ #
+ # Update the state of all reverse dependencies, recursively.
+ #
+ def __update_state_recursively(self):
+ queue = _UniquePriorityQueue()
+ queue.push(self._unique_id, self)
+
+ while queue:
+ element = queue.pop()
+
+ old_ready_for_runtime = element.__ready_for_runtime
+ old_strict_cache_key = element.__strict_cache_key
+ element._update_state()
+
+ if element.__ready_for_runtime != old_ready_for_runtime or \
+ element.__strict_cache_key != old_strict_cache_key:
+ for rdep in element.__reverse_dependencies:
+ queue.push(rdep._unique_id, rdep)
+
+ # __reset_cache_data()
+ #
+ # Resets all data related to cache key calculation and whether an artifact
+ # is cached.
+ #
+ # This is useful because we need to know whether a workspace is cached
+ # before we know whether to assemble it, and doing that would generate a
+ # different cache key to the initial one.
+ #
+ def __reset_cache_data(self):
+ self.__build_result = None
+ self.__cache_key_dict = None
+ self.__cache_key = None
+ self.__weak_cache_key = None
+ self.__strict_cache_key = None
+ self.__artifact = None
+ self.__strict_artifact = None
+
+ # __update_cache_keys()
+ #
+ # Updates weak and strict cache keys
+ #
+ # Note that it does not update *all* cache keys - In non-strict mode, the
+ # strong cache key is updated in __update_cache_key_non_strict()
+ #
+ # If the cache keys are not stable (i.e. workspace that isn't cached),
+ # then cache keys are erased.
+ # Otherwise, the weak and strict cache keys will be calculated if not
+ # already set.
+ # The weak cache key is a cache key that doesn't necessarily change when
+ # its dependencies change, useful for avoiding full rebuilds when one's
+ # dependencies guarantee stability across versions.
+ # The strict cache key is a cache key that changes if any build-dependency
+ # has changed.
+ #
+ def __update_cache_keys(self):
+ if self.__weak_cache_key is None:
+ # Calculate weak cache key
+ # Weak cache key includes names of direct build dependencies
+ # but does not include keys of dependencies.
+ if self.BST_STRICT_REBUILD:
+ dependencies = [
+ e._get_cache_key(strength=_KeyStrength.WEAK)
+ for e in self.dependencies(Scope.BUILD)
+ ]
+ else:
+ dependencies = [
+ e.name for e in self.dependencies(Scope.BUILD, recurse=False)
+ ]
+
+ self.__weak_cache_key = self._calculate_cache_key(dependencies)
+
+ if self.__weak_cache_key is None:
+ # Weak cache key could not be calculated yet, therefore
+ # the Strict cache key also can't be calculated yet.
+ return
+
+ if self.__strict_cache_key is None:
+ dependencies = [
+ e.__strict_cache_key for e in self.dependencies(Scope.BUILD)
+ ]
+ self.__strict_cache_key = self._calculate_cache_key(dependencies)
+
+ # __update_artifact_state()
+ #
+ # Updates the data involved in knowing about the artifact corresponding
+ # to this element.
+ #
+ # This involves erasing all data pertaining to artifacts if the cache
+ # key is unstable.
+ #
+ # Element.__update_cache_keys() must be called before this to have
+ # meaningful results, because the element must know its cache key before
+ # it can check whether an artifact exists for that cache key.
+ #
+ def __update_artifact_state(self):
+ context = self._get_context()
+
+ if not self.__weak_cache_key:
+ return
+
+ if not context.get_strict() and not self.__artifact:
+ # We've calculated the weak_key, so instantiate artifact instance member
+ self.__artifact = Artifact(self, context, weak_key=self.__weak_cache_key)
+
+ if not self.__strict_cache_key:
+ return
+
+ if not self.__strict_artifact:
+ self.__strict_artifact = Artifact(self, context, strong_key=self.__strict_cache_key,
+ weak_key=self.__weak_cache_key)
+
+ # In strict mode, the strong cache key always matches the strict cache key
+ if context.get_strict():
+ self.__cache_key = self.__strict_cache_key
+ self.__artifact = self.__strict_artifact
+
+ # Allow caches to be queried, since they may now be cached
+ # The next invocation of Artifact.cached() will access the filesystem.
+ # Note that this will safely do nothing if the artifacts are already cached.
+ self.__strict_artifact.reset_cached()
+ self.__artifact.reset_cached()
+
+ # __update_cache_key_non_strict()
+ #
+ # Calculates the strong cache key if it hasn't already been set.
+ #
+ # When buildstream runs in strict mode, this is identical to the
+ # strict cache key, so no work needs to be done.
+ #
+ # When buildstream is not run in strict mode, this requires the artifact
+ # state (as set in Element.__update_artifact_state()) to be set accordingly,
+ # as the cache key can be loaded from the cache (possibly pulling from
+ # a remote cache).
+ #
+ def __update_cache_key_non_strict(self):
+ if not self.__strict_artifact:
+ return
+
+ # The final cache key can be None here only in non-strict mode
+ if self.__cache_key is None:
+ if self._pull_pending():
+ # Effective strong cache key is unknown until after the pull
+ pass
+ elif self._cached():
+ # Load the strong cache key from the artifact
+ strong_key, _ = self.__artifact.get_metadata_keys()
+ self.__cache_key = strong_key
+ elif self.__assemble_scheduled or self.__assemble_done:
+ # Artifact will or has been built, not downloaded
+ dependencies = [
+ e._get_cache_key() for e in self.dependencies(Scope.BUILD)
+ ]
+ self.__cache_key = self._calculate_cache_key(dependencies)
+
+ if self.__cache_key is None:
+ # Strong cache key could not be calculated yet
+ return
+
+ # Now we have the strong cache key, update the Artifact
+ self.__artifact._cache_key = self.__cache_key
+
+
+def _overlap_error_detail(f, forbidden_overlap_elements, elements):
+ if forbidden_overlap_elements:
+ return ("/{}: {} {} not permitted to overlap other elements, order {} \n"
+ .format(f, " and ".join(forbidden_overlap_elements),
+ "is" if len(forbidden_overlap_elements) == 1 else "are",
+ " above ".join(reversed(elements))))
+ else:
+ return ""
+
+
+# _get_normal_name():
+#
+# Get the element name without path separators or
+# the extension.
+#
+# Args:
+# element_name (str): The element's name
+#
+# Returns:
+# (str): The normalised element name
+#
+def _get_normal_name(element_name):
+ return os.path.splitext(element_name.replace(os.sep, '-'))[0]
+
+
+# _compose_artifact_name():
+#
+# Compose the completely resolved 'artifact_name' as a filepath
+#
+# Args:
+# project_name (str): The project's name
+# normal_name (str): The element's normalised name
+# cache_key (str): The relevant cache key
+#
+# Returns:
+# (str): The constructed artifact name path
+#
+def _compose_artifact_name(project_name, normal_name, cache_key):
+ valid_chars = string.digits + string.ascii_letters + '-._'
+ normal_name = ''.join([
+ x if x in valid_chars else '_'
+ for x in normal_name
+ ])
+
+ # Note that project names are not allowed to contain slashes. Element names containing
+ # a '/' will have this replaced with a '-' upon Element object instantiation.
+ return '{0}/{1}/{2}'.format(project_name, normal_name, cache_key)
diff --git a/src/buildstream/plugin.py b/src/buildstream/plugin.py
new file mode 100644
index 000000000..d8b6a7359
--- /dev/null
+++ b/src/buildstream/plugin.py
@@ -0,0 +1,929 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+"""
+Plugin - Base plugin class
+==========================
+BuildStream supports third party plugins to define additional kinds of
+:mod:`Elements <buildstream.element>` and :mod:`Sources <buildstream.source>`.
+
+The common API is documented here, along with some information on how
+external plugin packages are structured.
+
+
+.. _core_plugin_abstract_methods:
+
+Abstract Methods
+----------------
+For both :mod:`Elements <buildstream.element>` and :mod:`Sources <buildstream.source>`,
+it is mandatory to implement the following abstract methods:
+
+* :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>`
+
+ Loads the user provided configuration YAML for the given source or element
+
+* :func:`Plugin.preflight() <buildstream.plugin.Plugin.preflight>`
+
+ Early preflight checks allow plugins to bail out early with an error
+ in the case that it can predict that failure is inevitable.
+
+* :func:`Plugin.get_unique_key() <buildstream.plugin.Plugin.get_unique_key>`
+
+ Once all configuration has been loaded and preflight checks have passed,
+ this method is used to inform the core of a plugin's unique configuration.
+
+Configurable Warnings
+---------------------
+Warnings raised through calling :func:`Plugin.warn() <buildstream.plugin.Plugin.warn>` can provide an optional
+parameter ``warning_token``, this will raise a :class:`PluginError` if the warning is configured as fatal within
+the project configuration.
+
+Configurable warnings will be prefixed with :func:`Plugin.get_kind() <buildstream.plugin.Plugin.get_kind>`
+within buildstream and must be prefixed as such in project configurations. For more detail on project configuration
+see :ref:`Configurable Warnings <configurable_warnings>`.
+
+It is important to document these warnings in your plugin documentation to allow users to make full use of them
+while configuring their projects.
+
+Example
+~~~~~~~
+If the :class:`git <buildstream.plugins.sources.git.GitSource>` plugin uses the warning ``"inconsistent-submodule"``
+then it could be referenced in project configuration as ``"git:inconsistent-submodule"``.
+
+Plugin Structure
+----------------
+A plugin should consist of a `setuptools package
+<http://setuptools.readthedocs.io/en/latest/setuptools.html>`_ that
+advertises contained plugins using `entry points
+<http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_.
+
+A plugin entry point must be a module that extends a class in the
+:ref:`core_framework` to be discovered by BuildStream. A YAML file
+defining plugin default settings with the same name as the module can
+also be defined in the same directory as the plugin module.
+
+.. note::
+
+ BuildStream does not support function/class entry points.
+
+A sample plugin could be structured as such:
+
+.. code-block:: text
+
+ .
+ ├── elements
+ │   ├── autotools.py
+ │   ├── autotools.yaml
+ │   └── __init__.py
+ ├── MANIFEST.in
+ └── setup.py
+
+The setuptools configuration should then contain at least:
+
+setup.py:
+
+.. literalinclude:: ../source/sample_plugin/setup.py
+ :language: python
+
+MANIFEST.in:
+
+.. literalinclude:: ../source/sample_plugin/MANIFEST.in
+ :language: text
+
+Class Reference
+---------------
+"""
+
+import itertools
+import os
+import subprocess
+import sys
+from contextlib import contextmanager
+from weakref import WeakValueDictionary
+
+from . import _yaml
+from . import utils
+from ._exceptions import PluginError, ImplError
+from ._message import Message, MessageType
+from .types import CoreWarnings
+
+
+class Plugin():
+ """Plugin()
+
+ Base Plugin class.
+
+ Some common features to both Sources and Elements are found
+ in this class.
+
+ .. note::
+
+ Derivation of plugins is not supported. Plugins may only
+ derive from the base :mod:`Source <buildstream.source>` and
+ :mod:`Element <buildstream.element>` types, and any convenience
+ subclasses (like :mod:`BuildElement <buildstream.buildelement>`)
+ which are included in the buildstream namespace.
+ """
+
+ BST_REQUIRED_VERSION_MAJOR = 0
+ """Minimum required major version"""
+
+ BST_REQUIRED_VERSION_MINOR = 0
+ """Minimum required minor version"""
+
+ BST_FORMAT_VERSION = 0
+ """The plugin's YAML format version
+
+ This should be set to ``1`` the first time any new configuration
+ is understood by your :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>`
+ implementation and subsequently bumped every time your
+ configuration is enhanced.
+
+ .. note::
+
+ Plugins are expected to maintain backward compatibility
+ in the format and configurations they expose. The versioning
+ is intended to track availability of new features only.
+
+ For convenience, the format version for plugins maintained and
+ distributed with BuildStream are revisioned with BuildStream's
+ core format version :ref:`core format version <project_format_version>`.
+ """
+
+ BST_PLUGIN_DEPRECATED = False
+ """True if this element plugin has been deprecated.
+
+ If this is set to true, BuildStream will emmit a deprecation
+ warning when this plugin is loaded. This deprecation warning may
+ be suppressed on a plugin by plugin basis by setting
+ ``suppress-deprecation-warnings: true`` in the relevent section of
+ the project's :ref:`plugin configuration overrides <project_overrides>`.
+
+ """
+
+ BST_PLUGIN_DEPRECATION_MESSAGE = ""
+ """ The message printed when this element shows a deprecation warning.
+
+ This should be set if BST_PLUGIN_DEPRECATED is True and should direct the user
+ to the deprecated plug-in's replacement.
+
+ """
+
+ # Unique id generator for Plugins
+ #
+ # Each plugin gets a unique id at creation.
+ #
+ # Ids are a monotically increasing integer which
+ # starts as 1 (a falsy plugin ID is considered unset
+ # in various parts of the codebase).
+ #
+ __id_generator = itertools.count(1)
+
+ # Hold on to a lookup table by counter of all instantiated plugins.
+ # We use this to send the id back from child processes so we can lookup
+ # corresponding element/source in the master process.
+ #
+ # Use WeakValueDictionary() so the map we use to lookup objects does not
+ # keep the plugins alive after pipeline destruction.
+ #
+ # Note that Plugins can only be instantiated in the main process before
+ # scheduling tasks.
+ __TABLE = WeakValueDictionary()
+
+ def __init__(self, name, context, project, provenance, type_tag, unique_id=None):
+
+ self.name = name
+ """The plugin name
+
+ For elements, this is the project relative bst filename,
+ for sources this is the owning element's name with a suffix
+ indicating its index on the owning element.
+
+ For sources this is for display purposes only.
+ """
+
+ # Unique ID
+ #
+ # This id allows to uniquely identify a plugin.
+ #
+ # /!\ the unique id must be an increasing value /!\
+ # This is because we are depending on it in buildstream.element.Element
+ # to give us a topological sort over all elements.
+ # Modifying how we handle ids here will modify the behavior of the
+ # Element's state handling.
+ if unique_id is None:
+ # Register ourself in the table containing all existing plugins
+ self._unique_id = next(self.__id_generator)
+ self.__TABLE[self._unique_id] = self
+ else:
+ # If the unique ID is passed in the constructor, then it is a cloned
+ # plugin in a subprocess and should use the same ID.
+ self._unique_id = unique_id
+
+ self.__context = context # The Context object
+ self.__project = project # The Project object
+ self.__provenance = provenance # The Provenance information
+ self.__type_tag = type_tag # The type of plugin (element or source)
+ self.__configuring = False # Whether we are currently configuring
+
+ # Infer the kind identifier
+ modulename = type(self).__module__
+ self.__kind = modulename.split('.')[-1]
+ self.debug("Created: {}".format(self))
+
+ # If this plugin has been deprecated, emit a warning.
+ if self.BST_PLUGIN_DEPRECATED and not self.__deprecation_warning_silenced():
+ detail = "Using deprecated plugin {}: {}".format(self.__kind,
+ self.BST_PLUGIN_DEPRECATION_MESSAGE)
+ self.__message(MessageType.WARN, detail)
+
+ def __del__(self):
+ # Dont send anything through the Message() pipeline at destruction time,
+ # any subsequent lookup of plugin by unique id would raise KeyError.
+ if self.__context.log_debug:
+ sys.stderr.write("DEBUG: Destroyed: {}\n".format(self))
+
+ def __str__(self):
+ return "{kind} {typetag} at {provenance}".format(
+ kind=self.__kind,
+ typetag=self.__type_tag,
+ provenance=self.__provenance)
+
+ #############################################################
+ # Abstract Methods #
+ #############################################################
+ def configure(self, node):
+ """Configure the Plugin from loaded configuration data
+
+ Args:
+ node (dict): The loaded configuration dictionary
+
+ Raises:
+ :class:`.SourceError`: If it's a :class:`.Source` implementation
+ :class:`.ElementError`: If it's an :class:`.Element` implementation
+
+ Plugin implementors should implement this method to read configuration
+ data and store it.
+
+ Plugins should use the :func:`Plugin.node_get_member() <buildstream.plugin.Plugin.node_get_member>`
+ and :func:`Plugin.node_get_list_element() <buildstream.plugin.Plugin.node_get_list_element>`
+ methods to fetch values from the passed `node`. This will ensure that a nice human readable error
+ message will be raised if the expected configuration is not found, indicating the filename,
+ line and column numbers.
+
+ Further the :func:`Plugin.node_validate() <buildstream.plugin.Plugin.node_validate>` method
+ should be used to ensure that the user has not specified keys in `node` which are unsupported
+ by the plugin.
+
+ .. note::
+
+ For Elements, when variable substitution is desirable, the
+ :func:`Element.node_subst_member() <buildstream.element.Element.node_subst_member>`
+ and :func:`Element.node_subst_list_element() <buildstream.element.Element.node_subst_list_element>`
+ methods can be used.
+ """
+ raise ImplError("{tag} plugin '{kind}' does not implement configure()".format(
+ tag=self.__type_tag, kind=self.get_kind()))
+
+ def preflight(self):
+ """Preflight Check
+
+ Raises:
+ :class:`.SourceError`: If it's a :class:`.Source` implementation
+ :class:`.ElementError`: If it's an :class:`.Element` implementation
+
+ This method is run after :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>`
+ and after the pipeline is fully constructed.
+
+ Implementors should simply raise :class:`.SourceError` or :class:`.ElementError`
+ with an informative message in the case that the host environment is
+ unsuitable for operation.
+
+ Plugins which require host tools (only sources usually) should obtain
+ them with :func:`utils.get_host_tool() <buildstream.utils.get_host_tool>` which
+ will raise an error automatically informing the user that a host tool is needed.
+ """
+ raise ImplError("{tag} plugin '{kind}' does not implement preflight()".format(
+ tag=self.__type_tag, kind=self.get_kind()))
+
+ def get_unique_key(self):
+ """Return something which uniquely identifies the plugin input
+
+ Returns:
+ A string, list or dictionary which uniquely identifies the input
+
+ This is used to construct unique cache keys for elements and sources,
+ sources should return something which uniquely identifies the payload,
+ such as an sha256 sum of a tarball content.
+
+ Elements and Sources should implement this by collecting any configurations
+ which could possibly affect the output and return a dictionary of these settings.
+
+ For Sources, this is guaranteed to only be called if
+ :func:`Source.get_consistency() <buildstream.source.Source.get_consistency>`
+ has not returned :func:`Consistency.INCONSISTENT <buildstream.source.Consistency.INCONSISTENT>`
+ which is to say that the Source is expected to have an exact *ref* indicating
+ exactly what source is going to be staged.
+ """
+ raise ImplError("{tag} plugin '{kind}' does not implement get_unique_key()".format(
+ tag=self.__type_tag, kind=self.get_kind()))
+
+ #############################################################
+ # Public Methods #
+ #############################################################
+ def get_kind(self):
+ """Fetches the kind of this plugin
+
+ Returns:
+ (str): The kind of this plugin
+ """
+ return self.__kind
+
+ def node_items(self, node):
+ """Iterate over a dictionary loaded from YAML
+
+ Args:
+ node (dict): The YAML loaded dictionary object
+
+ Returns:
+ list: List of key/value tuples to iterate over
+
+ BuildStream holds some private data in dictionaries loaded from
+ the YAML in order to preserve information to report in errors.
+
+ This convenience function should be used instead of the dict.items()
+ builtin function provided by python.
+ """
+ yield from _yaml.node_items(node)
+
+ def node_provenance(self, node, member_name=None):
+ """Gets the provenance for `node` and `member_name`
+
+ This reports a string with file, line and column information suitable
+ for reporting an error or warning.
+
+ Args:
+ node (dict): The YAML loaded dictionary object
+ member_name (str): The name of the member to check, or None for the node itself
+
+ Returns:
+ (str): A string describing the provenance of the node and member
+ """
+ provenance = _yaml.node_get_provenance(node, key=member_name)
+ return str(provenance)
+
+ def node_get_member(self, node, expected_type, member_name, default=_yaml._sentinel, *, allow_none=False):
+ """Fetch the value of a node member, raising an error if the value is
+ missing or incorrectly typed.
+
+ Args:
+ node (dict): A dictionary loaded from YAML
+ expected_type (type): The expected type of the node member
+ member_name (str): The name of the member to fetch
+ default (expected_type): A value to return when *member_name* is not specified in *node*
+ allow_none (bool): Allow explicitly set None values in the YAML (*Since: 1.4*)
+
+ Returns:
+ The value of *member_name* in *node*, otherwise *default*
+
+ Raises:
+ :class:`.LoadError`: When *member_name* is not found and no *default* was provided
+
+ Note:
+ Returned strings are stripped of leading and trailing whitespace
+
+ **Example:**
+
+ .. code:: python
+
+ # Expect a string 'name' in 'node'
+ name = self.node_get_member(node, str, 'name')
+
+ # Fetch an optional integer
+ level = self.node_get_member(node, int, 'level', -1)
+ """
+ return _yaml.node_get(node, expected_type, member_name, default_value=default, allow_none=allow_none)
+
+ def node_set_member(self, node, key, value):
+ """Set the value of a node member
+ Args:
+ node (node): A dictionary loaded from YAML
+ key (str): The key name
+ value: The value
+
+ Returns:
+ None
+
+ Raises:
+ None
+
+ **Example:**
+
+ .. code:: python
+
+ # Set a string 'tomjon' in node[name]
+ self.node_set_member(node, 'name', 'tomjon')
+ """
+ _yaml.node_set(node, key, value)
+
+ def new_empty_node(self):
+ """Create an empty 'Node' object to be handled by BuildStream's core
+ Args:
+ None
+
+ Returns:
+ Node: An empty Node object
+
+ Raises:
+ None
+
+ **Example:**
+
+ .. code:: python
+
+ # Create an empty Node object to store metadata information
+ metadata = self.new_empty_node()
+ """
+ return _yaml.new_empty_node()
+
+ def node_get_project_path(self, node, key, *,
+ check_is_file=False, check_is_dir=False):
+ """Fetches a project path from a dictionary node and validates it
+
+ Paths are asserted to never lead to a directory outside of the
+ project directory. In addition, paths can not point to symbolic
+ links, fifos, sockets and block/character devices.
+
+ The `check_is_file` and `check_is_dir` parameters can be used to
+ perform additional validations on the path. Note that an
+ exception will always be raised if both parameters are set to
+ ``True``.
+
+ Args:
+ node (dict): A dictionary loaded from YAML
+ key (str): The key whose value contains a path to validate
+ check_is_file (bool): If ``True`` an error will also be raised
+ if path does not point to a regular file.
+ Defaults to ``False``
+ check_is_dir (bool): If ``True`` an error will also be raised
+ if path does not point to a directory.
+ Defaults to ``False``
+
+ Returns:
+ (str): The project path
+
+ Raises:
+ :class:`.LoadError`: In the case that the project path is not
+ valid or does not exist
+
+ *Since: 1.2*
+
+ **Example:**
+
+ .. code:: python
+
+ path = self.node_get_project_path(node, 'path')
+
+ """
+
+ return self.__project.get_path_from_node(node, key,
+ check_is_file=check_is_file,
+ check_is_dir=check_is_dir)
+
+ def node_validate(self, node, valid_keys):
+ """This should be used in :func:`~buildstream.plugin.Plugin.configure`
+ implementations to assert that users have only entered
+ valid configuration keys.
+
+ Args:
+ node (dict): A dictionary loaded from YAML
+ valid_keys (iterable): A list of valid keys for the node
+
+ Raises:
+ :class:`.LoadError`: When an invalid key is found
+
+ **Example:**
+
+ .. code:: python
+
+ # Ensure our node only contains valid autotools config keys
+ self.node_validate(node, [
+ 'configure-commands', 'build-commands',
+ 'install-commands', 'strip-commands'
+ ])
+
+ """
+ _yaml.node_validate(node, valid_keys)
+
+ def node_get_list_element(self, node, expected_type, member_name, indices):
+ """Fetch the value of a list element from a node member, raising an error if the
+ value is incorrectly typed.
+
+ Args:
+ node (dict): A dictionary loaded from YAML
+ expected_type (type): The expected type of the node member
+ member_name (str): The name of the member to fetch
+ indices (list of int): List of indices to search, in case of nested lists
+
+ Returns:
+ The value of the list element in *member_name* at the specified *indices*
+
+ Raises:
+ :class:`.LoadError`
+
+ Note:
+ Returned strings are stripped of leading and trailing whitespace
+
+ **Example:**
+
+ .. code:: python
+
+ # Fetch the list itself
+ things = self.node_get_member(node, list, 'things')
+
+ # Iterate over the list indices
+ for i in range(len(things)):
+
+ # Fetch dict things
+ thing = self.node_get_list_element(
+ node, dict, 'things', [ i ])
+ """
+ return _yaml.node_get(node, expected_type, member_name, indices=indices)
+
+ def debug(self, brief, *, detail=None):
+ """Print a debugging message
+
+ Args:
+ brief (str): The brief message
+ detail (str): An optional detailed message, can be multiline output
+ """
+ if self.__context.log_debug:
+ self.__message(MessageType.DEBUG, brief, detail=detail)
+
+ def status(self, brief, *, detail=None):
+ """Print a status message
+
+ Args:
+ brief (str): The brief message
+ detail (str): An optional detailed message, can be multiline output
+
+ Note: Status messages tell about what a plugin is currently doing
+ """
+ self.__message(MessageType.STATUS, brief, detail=detail)
+
+ def info(self, brief, *, detail=None):
+ """Print an informative message
+
+ Args:
+ brief (str): The brief message
+ detail (str): An optional detailed message, can be multiline output
+
+ Note: Informative messages tell the user something they might want
+ to know, like if refreshing an element caused it to change.
+ """
+ self.__message(MessageType.INFO, brief, detail=detail)
+
+ def warn(self, brief, *, detail=None, warning_token=None):
+ """Print a warning message, checks warning_token against project configuration
+
+ Args:
+ brief (str): The brief message
+ detail (str): An optional detailed message, can be multiline output
+ warning_token (str): An optional configurable warning assosciated with this warning,
+ this will cause PluginError to be raised if this warning is configured as fatal.
+ (*Since 1.4*)
+
+ Raises:
+ (:class:`.PluginError`): When warning_token is considered fatal by the project configuration
+ """
+ if warning_token:
+ warning_token = _prefix_warning(self, warning_token)
+ brief = "[{}]: {}".format(warning_token, brief)
+ project = self._get_project()
+
+ if project._warning_is_fatal(warning_token):
+ detail = detail if detail else ""
+ raise PluginError(message="{}\n{}".format(brief, detail), reason=warning_token)
+
+ self.__message(MessageType.WARN, brief=brief, detail=detail)
+
+ def log(self, brief, *, detail=None):
+ """Log a message into the plugin's log file
+
+ The message will not be shown in the master log at all (so it will not
+ be displayed to the user on the console).
+
+ Args:
+ brief (str): The brief message
+ detail (str): An optional detailed message, can be multiline output
+ """
+ self.__message(MessageType.LOG, brief, detail=detail)
+
+ @contextmanager
+ def timed_activity(self, activity_name, *, detail=None, silent_nested=False):
+ """Context manager for performing timed activities in plugins
+
+ Args:
+ activity_name (str): The name of the activity
+ detail (str): An optional detailed message, can be multiline output
+ silent_nested (bool): If specified, nested messages will be silenced
+
+ This function lets you perform timed tasks in your plugin,
+ the core will take care of timing the duration of your
+ task and printing start / fail / success messages.
+
+ **Example**
+
+ .. code:: python
+
+ # Activity will be logged and timed
+ with self.timed_activity("Mirroring {}".format(self.url)):
+
+ # This will raise SourceError on its own
+ self.call(... command which takes time ...)
+ """
+ with self.__context.timed_activity(activity_name,
+ unique_id=self._unique_id,
+ detail=detail,
+ silent_nested=silent_nested):
+ yield
+
+ def call(self, *popenargs, fail=None, fail_temporarily=False, **kwargs):
+ """A wrapper for subprocess.call()
+
+ Args:
+ popenargs (list): Popen() arguments
+ fail (str): A message to display if the process returns
+ a non zero exit code
+ fail_temporarily (bool): Whether any exceptions should
+ be raised as temporary. (*Since: 1.2*)
+ rest_of_args (kwargs): Remaining arguments to subprocess.call()
+
+ Returns:
+ (int): The process exit code.
+
+ Raises:
+ (:class:`.PluginError`): If a non-zero return code is received and *fail* is specified
+
+ Note: If *fail* is not specified, then the return value of subprocess.call()
+ is returned even on error, and no exception is automatically raised.
+
+ **Example**
+
+ .. code:: python
+
+ # Call some host tool
+ self.tool = utils.get_host_tool('toolname')
+ self.call(
+ [self.tool, '--download-ponies', self.mirror_directory],
+ "Failed to download ponies from {}".format(
+ self.mirror_directory))
+ """
+ exit_code, _ = self.__call(*popenargs, fail=fail, fail_temporarily=fail_temporarily, **kwargs)
+ return exit_code
+
+ def check_output(self, *popenargs, fail=None, fail_temporarily=False, **kwargs):
+ """A wrapper for subprocess.check_output()
+
+ Args:
+ popenargs (list): Popen() arguments
+ fail (str): A message to display if the process returns
+ a non zero exit code
+ fail_temporarily (bool): Whether any exceptions should
+ be raised as temporary. (*Since: 1.2*)
+ rest_of_args (kwargs): Remaining arguments to subprocess.call()
+
+ Returns:
+ (int): The process exit code
+ (str): The process standard output
+
+ Raises:
+ (:class:`.PluginError`): If a non-zero return code is received and *fail* is specified
+
+ Note: If *fail* is not specified, then the return value of subprocess.check_output()
+ is returned even on error, and no exception is automatically raised.
+
+ **Example**
+
+ .. code:: python
+
+ # Get the tool at preflight time
+ self.tool = utils.get_host_tool('toolname')
+
+ # Call the tool, automatically raise an error
+ _, output = self.check_output(
+ [self.tool, '--print-ponies'],
+ "Failed to print the ponies in {}".format(
+ self.mirror_directory),
+ cwd=self.mirror_directory)
+
+ # Call the tool, inspect exit code
+ exit_code, output = self.check_output(
+ [self.tool, 'get-ref', tracking],
+ cwd=self.mirror_directory)
+
+ if exit_code == 128:
+ return
+ elif exit_code != 0:
+ fmt = "{plugin}: Failed to get ref for tracking: {track}"
+ raise SourceError(
+ fmt.format(plugin=self, track=tracking)) from e
+ """
+ return self.__call(*popenargs, collect_stdout=True, fail=fail, fail_temporarily=fail_temporarily, **kwargs)
+
+ #############################################################
+ # Private Methods used in BuildStream #
+ #############################################################
+
+ # _lookup():
+ #
+ # Fetch a plugin in the current process by its
+ # unique identifier
+ #
+ # Args:
+ # unique_id: The unique identifier as returned by
+ # plugin._unique_id
+ #
+ # Returns:
+ # (Plugin): The plugin for the given ID, or None
+ #
+ @classmethod
+ def _lookup(cls, unique_id):
+ assert unique_id != 0, "Looking up invalid plugin ID 0, ID counter starts at 1"
+ try:
+ return cls.__TABLE[unique_id]
+ except KeyError:
+ assert False, "Could not find plugin with ID {}".format(unique_id)
+ raise # In case a user is running with "python -O"
+
+ # _get_context()
+ #
+ # Fetches the invocation context
+ #
+ def _get_context(self):
+ return self.__context
+
+ # _get_project()
+ #
+ # Fetches the project object associated with this plugin
+ #
+ def _get_project(self):
+ return self.__project
+
+ # _get_provenance():
+ #
+ # Fetch bst file, line and column of the entity
+ #
+ def _get_provenance(self):
+ return self.__provenance
+
+ # Context manager for getting the open file handle to this
+ # plugin's log. Used in the child context to add stuff to
+ # a log.
+ #
+ @contextmanager
+ def _output_file(self):
+ log = self.__context.get_log_handle()
+ if log is None:
+ with open(os.devnull, "w") as output:
+ yield output
+ else:
+ yield log
+
+ # _configure():
+ #
+ # Calls configure() for the plugin, this must be called by
+ # the core instead of configure() directly, so that the
+ # _get_configuring() state is up to date.
+ #
+ # Args:
+ # node (dict): The loaded configuration dictionary
+ #
+ def _configure(self, node):
+ self.__configuring = True
+ self.configure(node)
+ self.__configuring = False
+
+ # _get_configuring():
+ #
+ # Checks whether the plugin is in the middle of having
+ # its Plugin.configure() method called
+ #
+ # Returns:
+ # (bool): Whether we are currently configuring
+ def _get_configuring(self):
+ return self.__configuring
+
+ # _preflight():
+ #
+ # Calls preflight() for the plugin, and allows generic preflight
+ # checks to be added
+ #
+ # Raises:
+ # SourceError: If it's a Source implementation
+ # ElementError: If it's an Element implementation
+ # ProgramNotFoundError: If a required host tool is not found
+ #
+ def _preflight(self):
+ self.preflight()
+
+ #############################################################
+ # Local Private Methods #
+ #############################################################
+
+ # Internal subprocess implementation for the call() and check_output() APIs
+ #
+ def __call(self, *popenargs, collect_stdout=False, fail=None, fail_temporarily=False, **kwargs):
+
+ with self._output_file() as output_file:
+ if 'stdout' not in kwargs:
+ kwargs['stdout'] = output_file
+ if 'stderr' not in kwargs:
+ kwargs['stderr'] = output_file
+ if collect_stdout:
+ kwargs['stdout'] = subprocess.PIPE
+
+ self.__note_command(output_file, *popenargs, **kwargs)
+
+ exit_code, output = utils._call(*popenargs, **kwargs)
+
+ if fail and exit_code:
+ raise PluginError("{plugin}: {message}".format(plugin=self, message=fail),
+ temporary=fail_temporarily)
+
+ return (exit_code, output)
+
+ def __message(self, message_type, brief, **kwargs):
+ message = Message(self._unique_id, message_type, brief, **kwargs)
+ self.__context.message(message)
+
+ def __note_command(self, output, *popenargs, **kwargs):
+ workdir = kwargs.get('cwd', os.getcwd())
+ command = " ".join(popenargs[0])
+ output.write('Running host command {}: {}\n'.format(workdir, command))
+ output.flush()
+ self.status('Running host command', detail=command)
+
+ def _get_full_name(self):
+ project = self.__project
+ if project.junction:
+ return '{}:{}'.format(project.junction.name, self.name)
+ else:
+ return self.name
+
+ def __deprecation_warning_silenced(self):
+ if not self.BST_PLUGIN_DEPRECATED:
+ return False
+ else:
+ silenced_warnings = set()
+ project = self.__project
+
+ for key, value in self.node_items(project.element_overrides):
+ if _yaml.node_get(value, bool, 'suppress-deprecation-warnings', default_value=False):
+ silenced_warnings.add(key)
+ for key, value in self.node_items(project.source_overrides):
+ if _yaml.node_get(value, bool, 'suppress-deprecation-warnings', default_value=False):
+ silenced_warnings.add(key)
+
+ return self.get_kind() in silenced_warnings
+
+
+# A local table for _prefix_warning()
+#
+__CORE_WARNINGS = [
+ value
+ for name, value in CoreWarnings.__dict__.items()
+ if not name.startswith("__")
+]
+
+
+# _prefix_warning():
+#
+# Prefix a warning with the plugin kind. CoreWarnings are not prefixed.
+#
+# Args:
+# plugin (Plugin): The plugin which raised the warning
+# warning (str): The warning to prefix
+#
+# Returns:
+# (str): A prefixed warning
+#
+def _prefix_warning(plugin, warning):
+ if any((warning is core_warning for core_warning in __CORE_WARNINGS)):
+ return warning
+ return "{}:{}".format(plugin.get_kind(), warning)
diff --git a/src/buildstream/plugins/elements/__init__.py b/src/buildstream/plugins/elements/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/plugins/elements/__init__.py
diff --git a/src/buildstream/plugins/elements/autotools.py b/src/buildstream/plugins/elements/autotools.py
new file mode 100644
index 000000000..7a05336b7
--- /dev/null
+++ b/src/buildstream/plugins/elements/autotools.py
@@ -0,0 +1,75 @@
+#
+# Copyright (C) 2016, 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+autotools - Autotools build element
+===================================
+This is a :mod:`BuildElement <buildstream.buildelement>` implementation for
+using Autotools build scripts (also known as the `GNU Build System
+<https://en.wikipedia.org/wiki/GNU_Build_System>`_).
+
+You will often want to pass additional arguments to ``configure``. This should
+be done on a per-element basis by setting the ``conf-local`` variable. Here is
+an example:
+
+.. code:: yaml
+
+ variables:
+ conf-local: |
+ --disable-foo --enable-bar
+
+If you want to pass extra options to ``configure`` for every element in your
+project, set the ``conf-global`` variable in your project.conf file. Here is
+an example of that:
+
+.. code:: yaml
+
+ elements:
+ autotools:
+ variables:
+ conf-global: |
+ --disable-gtk-doc --disable-static
+
+Here is the default configuration for the ``autotools`` element in full:
+
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/autotools.yaml
+ :language: yaml
+
+See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
+details on common configuration options for build elements.
+"""
+
+from buildstream import BuildElement, SandboxFlags
+
+
+# Element implementation for the 'autotools' kind.
+class AutotoolsElement(BuildElement):
+ # Supports virtual directories (required for remote execution)
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Enable command batching across prepare() and assemble()
+ def configure_sandbox(self, sandbox):
+ super().configure_sandbox(sandbox)
+ self.batch_prepare_assemble(SandboxFlags.ROOT_READ_ONLY,
+ collect=self.get_variable('install-root'))
+
+
+# Plugin entry point
+def setup():
+ return AutotoolsElement
diff --git a/src/buildstream/plugins/elements/autotools.yaml b/src/buildstream/plugins/elements/autotools.yaml
new file mode 100644
index 000000000..85f7393e7
--- /dev/null
+++ b/src/buildstream/plugins/elements/autotools.yaml
@@ -0,0 +1,129 @@
+# Autotools default configurations
+
+variables:
+
+ autogen: |
+ export NOCONFIGURE=1;
+
+ if [ -x %{conf-cmd} ]; then true;
+ elif [ -x %{conf-root}/autogen ]; then %{conf-root}/autogen;
+ elif [ -x %{conf-root}/autogen.sh ]; then %{conf-root}/autogen.sh;
+ elif [ -x %{conf-root}/bootstrap ]; then %{conf-root}/bootstrap;
+ elif [ -x %{conf-root}/bootstrap.sh ]; then %{conf-root}/bootstrap.sh;
+ else autoreconf -ivf %{conf-root};
+ fi
+
+ # Project-wide extra arguments to be passed to `configure`
+ conf-global: ''
+
+ # Element-specific extra arguments to be passed to `configure`.
+ conf-local: ''
+
+ # For backwards compatibility only, do not use.
+ conf-extra: ''
+
+ conf-cmd: "%{conf-root}/configure"
+
+ conf-args: |
+
+ --prefix=%{prefix} \
+ --exec-prefix=%{exec_prefix} \
+ --bindir=%{bindir} \
+ --sbindir=%{sbindir} \
+ --sysconfdir=%{sysconfdir} \
+ --datadir=%{datadir} \
+ --includedir=%{includedir} \
+ --libdir=%{libdir} \
+ --libexecdir=%{libexecdir} \
+ --localstatedir=%{localstatedir} \
+ --sharedstatedir=%{sharedstatedir} \
+ --mandir=%{mandir} \
+ --infodir=%{infodir} %{conf-extra} %{conf-global} %{conf-local}
+
+ configure: |
+
+ %{conf-cmd} %{conf-args}
+
+ make: make
+ make-install: make -j1 DESTDIR="%{install-root}" install
+
+ # Set this if the sources cannot handle parallelization.
+ #
+ # notparallel: True
+
+
+ # Automatically remove libtool archive files
+ #
+ # Set remove-libtool-modules to "true" to remove .la files for
+ # modules intended to be opened with lt_dlopen()
+ #
+ # Set remove-libtool-libraries to "true" to remove .la files for
+ # libraries
+ #
+ # Value must be "true" or "false"
+ remove-libtool-modules: "false"
+ remove-libtool-libraries: "false"
+
+ delete-libtool-archives: |
+ if %{remove-libtool-modules} || %{remove-libtool-libraries}; then
+ find "%{install-root}" -name "*.la" -print0 | while read -d '' -r file; do
+ if grep '^shouldnotlink=yes$' "${file}" &>/dev/null; then
+ if %{remove-libtool-modules}; then
+ echo "Removing ${file}."
+ rm "${file}"
+ else
+ echo "Not removing ${file}."
+ fi
+ else
+ if %{remove-libtool-libraries}; then
+ echo "Removing ${file}."
+ rm "${file}"
+ else
+ echo "Not removing ${file}."
+ fi
+ fi
+ done
+ fi
+
+config:
+
+ # Commands for configuring the software
+ #
+ configure-commands:
+ - |
+ %{autogen}
+ - |
+ %{configure}
+
+ # Commands for building the software
+ #
+ build-commands:
+ - |
+ %{make}
+
+ # Commands for installing the software into a
+ # destination folder
+ #
+ install-commands:
+ - |
+ %{make-install}
+ - |
+ %{delete-libtool-archives}
+
+ # Commands for stripping debugging information out of
+ # installed binaries
+ #
+ strip-commands:
+ - |
+ %{strip-binaries}
+
+# Use max-jobs CPUs for building and enable verbosity
+environment:
+ MAKEFLAGS: -j%{max-jobs}
+ V: 1
+
+# And dont consider MAKEFLAGS or V as something which may
+# affect build output.
+environment-nocache:
+- MAKEFLAGS
+- V
diff --git a/src/buildstream/plugins/elements/cmake.py b/src/buildstream/plugins/elements/cmake.py
new file mode 100644
index 000000000..74da04899
--- /dev/null
+++ b/src/buildstream/plugins/elements/cmake.py
@@ -0,0 +1,74 @@
+#
+# Copyright (C) 2016, 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+cmake - CMake build element
+===========================
+This is a :mod:`BuildElement <buildstream.buildelement>` implementation for
+using the `CMake <https://cmake.org/>`_ build system.
+
+You will often want to pass additional arguments to the ``cmake`` program for
+specific configuration options. This should be done on a per-element basis by
+setting the ``cmake-local`` variable. Here is an example:
+
+.. code:: yaml
+
+ variables:
+ cmake-local: |
+ -DCMAKE_BUILD_TYPE=Debug
+
+If you want to pass extra options to ``cmake`` for every element in your
+project, set the ``cmake-global`` variable in your project.conf file. Here is
+an example of that:
+
+.. code:: yaml
+
+ elements:
+ cmake:
+ variables:
+ cmake-global: |
+ -DCMAKE_BUILD_TYPE=Release
+
+Here is the default configuration for the ``cmake`` element in full:
+
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/cmake.yaml
+ :language: yaml
+
+See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
+details on common configuration options for build elements.
+"""
+
+from buildstream import BuildElement, SandboxFlags
+
+
+# Element implementation for the 'cmake' kind.
+class CMakeElement(BuildElement):
+ # Supports virtual directories (required for remote execution)
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Enable command batching across prepare() and assemble()
+ def configure_sandbox(self, sandbox):
+ super().configure_sandbox(sandbox)
+ self.batch_prepare_assemble(SandboxFlags.ROOT_READ_ONLY,
+ collect=self.get_variable('install-root'))
+
+
+# Plugin entry point
+def setup():
+ return CMakeElement
diff --git a/src/buildstream/plugins/elements/cmake.yaml b/src/buildstream/plugins/elements/cmake.yaml
new file mode 100644
index 000000000..ba20d7ce6
--- /dev/null
+++ b/src/buildstream/plugins/elements/cmake.yaml
@@ -0,0 +1,72 @@
+# CMake default configuration
+
+variables:
+
+ build-dir: _builddir
+
+ # Project-wide extra arguments to be passed to `cmake`
+ cmake-global: ''
+
+ # Element-specific extra arguments to be passed to `cmake`.
+ cmake-local: ''
+
+ # For backwards compatibility only, do not use.
+ cmake-extra: ''
+
+ # The cmake generator to use
+ generator: Unix Makefiles
+
+ cmake-args: |
+
+ -DCMAKE_INSTALL_PREFIX:PATH="%{prefix}" \
+ -DCMAKE_INSTALL_LIBDIR:PATH="%{lib}" %{cmake-extra} %{cmake-global} %{cmake-local}
+
+ cmake: |
+
+ cmake -B%{build-dir} -H"%{conf-root}" -G"%{generator}" %{cmake-args}
+
+ make: cmake --build %{build-dir} -- ${JOBS}
+ make-install: env DESTDIR="%{install-root}" cmake --build %{build-dir} --target install
+
+ # Set this if the sources cannot handle parallelization.
+ #
+ # notparallel: True
+
+config:
+
+ # Commands for configuring the software
+ #
+ configure-commands:
+ - |
+ %{cmake}
+
+ # Commands for building the software
+ #
+ build-commands:
+ - |
+ %{make}
+
+ # Commands for installing the software into a
+ # destination folder
+ #
+ install-commands:
+ - |
+ %{make-install}
+
+ # Commands for stripping debugging information out of
+ # installed binaries
+ #
+ strip-commands:
+ - |
+ %{strip-binaries}
+
+# Use max-jobs CPUs for building and enable verbosity
+environment:
+ JOBS: -j%{max-jobs}
+ V: 1
+
+# And dont consider JOBS or V as something which may
+# affect build output.
+environment-nocache:
+- JOBS
+- V
diff --git a/src/buildstream/plugins/elements/compose.py b/src/buildstream/plugins/elements/compose.py
new file mode 100644
index 000000000..b672cde0c
--- /dev/null
+++ b/src/buildstream/plugins/elements/compose.py
@@ -0,0 +1,194 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+compose - Compose the output of multiple elements
+=================================================
+This element creates a selective composition of its dependencies.
+
+This is normally used at near the end of a pipeline to prepare
+something for later deployment.
+
+Since this element's output includes its dependencies, it may only
+depend on elements as `build` type dependencies.
+
+The default configuration and possible options are as such:
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/compose.yaml
+ :language: yaml
+"""
+
+import os
+from buildstream import Element, Scope
+
+
+# Element implementation for the 'compose' kind.
+class ComposeElement(Element):
+ # pylint: disable=attribute-defined-outside-init
+
+ # The compose element's output is its dependencies, so
+ # we must rebuild if the dependencies change even when
+ # not in strict build plans.
+ #
+ BST_STRICT_REBUILD = True
+
+ # Compose artifacts must never have indirect dependencies,
+ # so runtime dependencies are forbidden.
+ BST_FORBID_RDEPENDS = True
+
+ # This element ignores sources, so we should forbid them from being
+ # added, to reduce the potential for confusion
+ BST_FORBID_SOURCES = True
+
+ # This plugin has been modified to avoid the use of Sandbox.get_directory
+ BST_VIRTUAL_DIRECTORY = True
+
+ def configure(self, node):
+ self.node_validate(node, [
+ 'integrate', 'include', 'exclude', 'include-orphans'
+ ])
+
+ # We name this variable 'integration' only to avoid
+ # collision with the Element.integrate() method.
+ self.integration = self.node_get_member(node, bool, 'integrate')
+ self.include = self.node_get_member(node, list, 'include')
+ self.exclude = self.node_get_member(node, list, 'exclude')
+ self.include_orphans = self.node_get_member(node, bool, 'include-orphans')
+
+ def preflight(self):
+ pass
+
+ def get_unique_key(self):
+ key = {'integrate': self.integration,
+ 'include': sorted(self.include),
+ 'orphans': self.include_orphans}
+
+ if self.exclude:
+ key['exclude'] = sorted(self.exclude)
+
+ return key
+
+ def configure_sandbox(self, sandbox):
+ pass
+
+ def stage(self, sandbox):
+ pass
+
+ def assemble(self, sandbox):
+
+ require_split = self.include or self.exclude or not self.include_orphans
+
+ # Stage deps in the sandbox root
+ with self.timed_activity("Staging dependencies", silent_nested=True):
+ self.stage_dependency_artifacts(sandbox, Scope.BUILD)
+
+ manifest = set()
+ if require_split:
+ with self.timed_activity("Computing split", silent_nested=True):
+ for dep in self.dependencies(Scope.BUILD):
+ files = dep.compute_manifest(include=self.include,
+ exclude=self.exclude,
+ orphans=self.include_orphans)
+ manifest.update(files)
+
+ # Make a snapshot of all the files.
+ vbasedir = sandbox.get_virtual_directory()
+ modified_files = set()
+ removed_files = set()
+ added_files = set()
+
+ # Run any integration commands provided by the dependencies
+ # once they are all staged and ready
+ if self.integration:
+ with self.timed_activity("Integrating sandbox"):
+ if require_split:
+
+ # Make a snapshot of all the files before integration-commands are run.
+ snapshot = set(vbasedir.list_relative_paths())
+ vbasedir.mark_unmodified()
+
+ with sandbox.batch(0):
+ for dep in self.dependencies(Scope.BUILD):
+ dep.integrate(sandbox)
+
+ if require_split:
+ # Calculate added, modified and removed files
+ post_integration_snapshot = vbasedir.list_relative_paths()
+ modified_files = set(vbasedir.list_modified_paths())
+ basedir_contents = set(post_integration_snapshot)
+ for path in manifest:
+ if path in snapshot and path not in basedir_contents:
+ removed_files.add(path)
+
+ for path in basedir_contents:
+ if path not in snapshot:
+ added_files.add(path)
+ self.info("Integration modified {}, added {} and removed {} files"
+ .format(len(modified_files), len(added_files), len(removed_files)))
+
+ # The remainder of this is expensive, make an early exit if
+ # we're not being selective about what is to be included.
+ if not require_split:
+ return '/'
+
+ # Do we want to force include files which were modified by
+ # the integration commands, even if they were not added ?
+ #
+ manifest.update(added_files)
+ manifest.difference_update(removed_files)
+
+ # XXX We should be moving things outside of the build sandbox
+ # instead of into a subdir. The element assemble() method should
+ # support this in some way.
+ #
+ installdir = vbasedir.descend('buildstream', 'install', create=True)
+
+ # We already saved the manifest for created files in the integration phase,
+ # now collect the rest of the manifest.
+ #
+
+ lines = []
+ if self.include:
+ lines.append("Including files from domains: " + ", ".join(self.include))
+ else:
+ lines.append("Including files from all domains")
+
+ if self.exclude:
+ lines.append("Excluding files from domains: " + ", ".join(self.exclude))
+
+ if self.include_orphans:
+ lines.append("Including orphaned files")
+ else:
+ lines.append("Excluding orphaned files")
+
+ detail = "\n".join(lines)
+
+ def import_filter(path):
+ return path in manifest
+
+ with self.timed_activity("Creating composition", detail=detail, silent_nested=True):
+ self.info("Composing {} files".format(len(manifest)))
+ installdir.import_files(vbasedir, filter_callback=import_filter, can_link=True)
+
+ # And we're done
+ return os.path.join(os.sep, 'buildstream', 'install')
+
+
+# Plugin entry point
+def setup():
+ return ComposeElement
diff --git a/src/buildstream/plugins/elements/compose.yaml b/src/buildstream/plugins/elements/compose.yaml
new file mode 100644
index 000000000..fd2eb9358
--- /dev/null
+++ b/src/buildstream/plugins/elements/compose.yaml
@@ -0,0 +1,34 @@
+
+# Compose element configuration
+config:
+
+ # Whether to run the integration commands for the
+ # staged dependencies.
+ #
+ integrate: True
+
+ # A list of domains to include from each artifact, as
+ # they were defined in the element's 'split-rules'.
+ #
+ # Since domains can be added, it is not an error to
+ # specify domains which may not exist for all of the
+ # elements in this composition.
+ #
+ # The default empty list indicates that all domains
+ # from each dependency should be included.
+ #
+ include: []
+
+ # A list of domains to exclude from each artifact, as
+ # they were defined in the element's 'split-rules'.
+ #
+ # In the case that a file is spoken for by a domain
+ # in the 'include' list and another in the 'exclude'
+ # list, then the file will be excluded.
+ exclude: []
+
+ # Whether to include orphan files which are not
+ # included by any of the 'split-rules' present on
+ # a given element.
+ #
+ include-orphans: True
diff --git a/src/buildstream/plugins/elements/distutils.py b/src/buildstream/plugins/elements/distutils.py
new file mode 100644
index 000000000..4b2c1e2f4
--- /dev/null
+++ b/src/buildstream/plugins/elements/distutils.py
@@ -0,0 +1,51 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+distutils - Python distutils element
+====================================
+A :mod:`BuildElement <buildstream.buildelement>` implementation for using
+python distutils
+
+The distutils default configuration:
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/distutils.yaml
+ :language: yaml
+
+See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
+details on common configuration options for build elements.
+"""
+
+from buildstream import BuildElement, SandboxFlags
+
+
+# Element implementation for the python 'distutils' kind.
+class DistutilsElement(BuildElement):
+ # Supports virtual directories (required for remote execution)
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Enable command batching across prepare() and assemble()
+ def configure_sandbox(self, sandbox):
+ super().configure_sandbox(sandbox)
+ self.batch_prepare_assemble(SandboxFlags.ROOT_READ_ONLY,
+ collect=self.get_variable('install-root'))
+
+
+# Plugin entry point
+def setup():
+ return DistutilsElement
diff --git a/src/buildstream/plugins/elements/distutils.yaml b/src/buildstream/plugins/elements/distutils.yaml
new file mode 100644
index 000000000..cec7da6e9
--- /dev/null
+++ b/src/buildstream/plugins/elements/distutils.yaml
@@ -0,0 +1,49 @@
+# Default python distutils configuration
+
+variables:
+
+ # When building for python2 distutils, simply
+ # override this in the element declaration
+ python: python3
+
+ python-build: |
+
+ %{python} %{conf-root}/setup.py build
+
+ install-args: |
+
+ --prefix "%{prefix}" \
+ --root "%{install-root}"
+
+ python-install: |
+
+ %{python} %{conf-root}/setup.py install %{install-args}
+
+
+config:
+
+ # Commands for configuring the software
+ #
+ configure-commands: []
+
+ # Commands for building the software
+ #
+ build-commands:
+ - |
+ %{python-build}
+
+ # Commands for installing the software into a
+ # destination folder
+ #
+ install-commands:
+ - |
+ %{python-install}
+
+ # Commands for stripping debugging information out of
+ # installed binaries
+ #
+ strip-commands:
+ - |
+ %{strip-binaries}
+ - |
+ %{fix-pyc-timestamps}
diff --git a/src/buildstream/plugins/elements/filter.py b/src/buildstream/plugins/elements/filter.py
new file mode 100644
index 000000000..45847e685
--- /dev/null
+++ b/src/buildstream/plugins/elements/filter.py
@@ -0,0 +1,256 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jonathan Maw <jonathan.maw@codethink.co.uk>
+
+"""
+filter - Extract a subset of files from another element
+=======================================================
+Filter another element by producing an output that is a subset of
+the parent element's output. Subsets are defined by the parent element's
+:ref:`split rules <public_split_rules>`.
+
+Overview
+--------
+A filter element must have exactly one *build* dependency, where said
+dependency is the 'parent' element which we would like to filter.
+Runtime dependencies may also be specified, which can be useful to propagate
+forward from this filter element onto its reverse dependencies.
+See :ref:`Dependencies <format_dependencies>` to see how we specify dependencies.
+
+When workspaces are opened, closed or reset on a filter element, or this
+element is tracked, the filter element will transparently pass on the command
+to its parent element (the sole build-dependency).
+
+Example
+-------
+Consider a simple import element, ``import.bst`` which imports the local files
+'foo', 'bar' and 'baz' (each stored in ``files/``, relative to the project's root):
+
+.. code:: yaml
+
+ kind: import
+
+ # Specify sources to import
+ sources:
+ - kind: local
+ path: files
+
+ # Specify public domain data, visible to other elements
+ public:
+ bst:
+ split-rules:
+ foo:
+ - /foo
+ bar:
+ - /bar
+
+.. note::
+
+ We can make an element's metadata visible to all reverse dependencies by making use
+ of the ``public:`` field. See the :ref:`public data documentation <format_public>`
+ for more information.
+
+In this example, ``import.bst`` will serve as the 'parent' of the filter element, thus
+its output will be filtered. It is important to understand that the artifact of the
+above element will contain the files: 'foo', 'bar' and 'baz'.
+
+Now, to produce an element whose artifact contains the file 'foo', and exlusively 'foo',
+we can define the following filter, ``filter-foo.bst``:
+
+.. code:: yaml
+
+ kind: filter
+
+ # Declare the sole build-dependency of the filter element
+ depends:
+ - filename: import.bst
+ type: build
+
+ # Declare a list of domains to include in the filter's artifact
+ config:
+ include:
+ - foo
+
+.. note::
+
+ We can also specify build-dependencies with a 'build-depends' field which has been
+ available since :ref:`format version 14 <project_format_version>`. See the
+ :ref:`Build-Depends documentation <format_build_depends>` for more detail.
+
+It should be noted that an 'empty' ``include:`` list would, by default, include all
+split-rules specified in the parent element, which, in this example, would be the
+files 'foo' and 'bar' (the file 'baz' was not covered by any split rules).
+
+Equally, we can use the ``exclude:`` statement to create the same artifact (which
+only contains the file 'foo') by declaring the following element, ``exclude-bar.bst``:
+
+.. code:: yaml
+
+ kind: filter
+
+ # Declare the sole build-dependency of the filter element
+ depends:
+ - filename: import.bst
+ type: build
+
+ # Declare a list of domains to exclude in the filter's artifact
+ config:
+ exclude:
+ - bar
+
+In addition to the ``include:`` and ``exclude:`` fields, there exists an ``include-orphans:``
+(Boolean) field, which defaults to ``False``. This will determine whether to include files
+which are not present in the 'split-rules'. For example, if we wanted to filter out all files
+which are not included as split rules we can define the following element, ``filter-misc.bst``:
+
+.. code:: yaml
+
+ kind: filter
+
+ # Declare the sole build-dependency of the filter element
+ depends:
+ - filename: import.bst
+ type: build
+
+ # Filter out all files which are not declared as split rules
+ config:
+ exclude:
+ - foo
+ - bar
+ include-orphans: True
+
+The artifact of ``filter-misc.bst`` will only contain the file 'baz'.
+
+Below is more information regarding the the default configurations and possible options
+of the filter element:
+
+.. literalinclude:: ../../../src/buildstream/plugins/elements/filter.yaml
+ :language: yaml
+"""
+
+from buildstream import Element, ElementError, Scope
+
+
+class FilterElement(Element):
+ # pylint: disable=attribute-defined-outside-init
+
+ BST_ARTIFACT_VERSION = 1
+
+ # The filter element's output is its dependencies, so
+ # we must rebuild if the dependencies change even when
+ # not in strict build plans.
+ BST_STRICT_REBUILD = True
+
+ # This element ignores sources, so we should forbid them from being
+ # added, to reduce the potential for confusion
+ BST_FORBID_SOURCES = True
+
+ # This plugin has been modified to avoid the use of Sandbox.get_directory
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Filter elements do not run any commands
+ BST_RUN_COMMANDS = False
+
+ def configure(self, node):
+ self.node_validate(node, [
+ 'include', 'exclude', 'include-orphans'
+ ])
+
+ self.include = self.node_get_member(node, list, 'include')
+ self.exclude = self.node_get_member(node, list, 'exclude')
+ self.include_orphans = self.node_get_member(node, bool, 'include-orphans')
+ self.include_provenance = self.node_provenance(node, member_name='include')
+ self.exclude_provenance = self.node_provenance(node, member_name='exclude')
+
+ def preflight(self):
+ # Exactly one build-depend is permitted
+ build_deps = list(self.dependencies(Scope.BUILD, recurse=False))
+ if len(build_deps) != 1:
+ detail = "Full list of build-depends:\n"
+ deps_list = " \n".join([x.name for x in build_deps])
+ detail += deps_list
+ raise ElementError("{}: {} element must have exactly 1 build-dependency, actually have {}"
+ .format(self, type(self).__name__, len(build_deps)),
+ detail=detail, reason="filter-bdepend-wrong-count")
+
+ # That build-depend must not also be a runtime-depend
+ runtime_deps = list(self.dependencies(Scope.RUN, recurse=False))
+ if build_deps[0] in runtime_deps:
+ detail = "Full list of runtime depends:\n"
+ deps_list = " \n".join([x.name for x in runtime_deps])
+ detail += deps_list
+ raise ElementError("{}: {} element's build dependency must not also be a runtime dependency"
+ .format(self, type(self).__name__),
+ detail=detail, reason="filter-bdepend-also-rdepend")
+
+ def get_unique_key(self):
+ key = {
+ 'include': sorted(self.include),
+ 'exclude': sorted(self.exclude),
+ 'orphans': self.include_orphans,
+ }
+ return key
+
+ def configure_sandbox(self, sandbox):
+ pass
+
+ def stage(self, sandbox):
+ pass
+
+ def assemble(self, sandbox):
+ with self.timed_activity("Staging artifact", silent_nested=True):
+ for dep in self.dependencies(Scope.BUILD, recurse=False):
+ # Check that all the included/excluded domains exist
+ pub_data = dep.get_public_data('bst')
+ split_rules = self.node_get_member(pub_data, dict, 'split-rules', {})
+ unfound_includes = []
+ for domain in self.include:
+ if domain not in split_rules:
+ unfound_includes.append(domain)
+ unfound_excludes = []
+ for domain in self.exclude:
+ if domain not in split_rules:
+ unfound_excludes.append(domain)
+
+ detail = []
+ if unfound_includes:
+ detail.append("Unknown domains were used in {}".format(self.include_provenance))
+ detail.extend([' - {}'.format(domain) for domain in unfound_includes])
+
+ if unfound_excludes:
+ detail.append("Unknown domains were used in {}".format(self.exclude_provenance))
+ detail.extend([' - {}'.format(domain) for domain in unfound_excludes])
+
+ if detail:
+ detail = '\n'.join(detail)
+ raise ElementError("Unknown domains declared.", detail=detail)
+
+ dep.stage_artifact(sandbox, include=self.include,
+ exclude=self.exclude, orphans=self.include_orphans)
+ return ""
+
+ def _get_source_element(self):
+ # Filter elements act as proxies for their sole build-dependency
+ build_deps = list(self.dependencies(Scope.BUILD, recurse=False))
+ assert len(build_deps) == 1
+ output_elm = build_deps[0]._get_source_element()
+ return output_elm
+
+
+def setup():
+ return FilterElement
diff --git a/src/buildstream/plugins/elements/filter.yaml b/src/buildstream/plugins/elements/filter.yaml
new file mode 100644
index 000000000..9c2bf69f4
--- /dev/null
+++ b/src/buildstream/plugins/elements/filter.yaml
@@ -0,0 +1,29 @@
+
+# Filter element configuration
+config:
+
+ # A list of domains to include in each artifact, as
+ # they were defined as public data in the parent
+ # element's 'split-rules'.
+ #
+ # If a domain is specified that does not exist, the
+ # filter element will fail to build.
+ #
+ # The default empty list indicates that all domains
+ # of the parent's artifact should be included.
+ #
+ include: []
+
+ # A list of domains to exclude from each artifact, as
+ # they were defined in the parent element's 'split-rules'.
+ #
+ # In the case that a file is spoken for by a domain
+ # in the 'include' list and another in the 'exclude'
+ # list, then the file will be excluded.
+ exclude: []
+
+ # Whether to include orphan files which are not
+ # included by any of the 'split-rules' present in
+ # the parent element.
+ #
+ include-orphans: False
diff --git a/src/buildstream/plugins/elements/import.py b/src/buildstream/plugins/elements/import.py
new file mode 100644
index 000000000..61e353dbc
--- /dev/null
+++ b/src/buildstream/plugins/elements/import.py
@@ -0,0 +1,129 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+import - Import sources directly
+================================
+Import elements produce artifacts directly from its sources
+without any kind of processing. These are typically used to
+import an SDK to build on top of or to overlay your build with
+some configuration data.
+
+The empty configuration is as such:
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/import.yaml
+ :language: yaml
+"""
+
+import os
+from buildstream import Element, ElementError
+
+
+# Element implementation for the 'import' kind.
+class ImportElement(Element):
+ # pylint: disable=attribute-defined-outside-init
+
+ # This plugin has been modified to avoid the use of Sandbox.get_directory
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Import elements do not run any commands
+ BST_RUN_COMMANDS = False
+
+ def configure(self, node):
+ self.node_validate(node, [
+ 'source', 'target'
+ ])
+
+ self.source = self.node_subst_member(node, 'source')
+ self.target = self.node_subst_member(node, 'target')
+
+ def preflight(self):
+ # Assert that we have at least one source to fetch.
+
+ sources = list(self.sources())
+ if not sources:
+ raise ElementError("{}: An import element must have at least one source.".format(self))
+
+ def get_unique_key(self):
+ return {
+ 'source': self.source,
+ 'target': self.target
+ }
+
+ def configure_sandbox(self, sandbox):
+ pass
+
+ def stage(self, sandbox):
+ pass
+
+ def assemble(self, sandbox):
+
+ # Stage sources into the input directory
+ # Do not mount workspaces as the files are copied from outside the sandbox
+ self._stage_sources_in_sandbox(sandbox, 'input', mount_workspaces=False)
+
+ rootdir = sandbox.get_virtual_directory()
+ inputdir = rootdir.descend('input')
+ outputdir = rootdir.descend('output', create=True)
+
+ # The directory to grab
+ inputdir = inputdir.descend(*self.source.strip(os.sep).split(os.sep))
+
+ # The output target directory
+ outputdir = outputdir.descend(*self.target.strip(os.sep).split(os.sep), create=True)
+
+ if inputdir.is_empty():
+ raise ElementError("{}: No files were found inside directory '{}'"
+ .format(self, self.source))
+
+ # Move it over
+ outputdir.import_files(inputdir)
+
+ # And we're done
+ return '/output'
+
+ def generate_script(self):
+ build_root = self.get_variable('build-root')
+ install_root = self.get_variable('install-root')
+ commands = []
+
+ # The directory to grab
+ inputdir = os.path.join(build_root, self.normal_name, self.source.lstrip(os.sep))
+ inputdir = inputdir.rstrip(os.sep)
+
+ # The output target directory
+ outputdir = os.path.join(install_root, self.target.lstrip(os.sep))
+ outputdir = outputdir.rstrip(os.sep)
+
+ # Ensure target directory parent exists but target directory doesn't
+ commands.append("mkdir -p {}".format(os.path.dirname(outputdir)))
+ commands.append("[ ! -e {outputdir} ] || rmdir {outputdir}".format(outputdir=outputdir))
+
+ # Move it over
+ commands.append("mv {} {}".format(inputdir, outputdir))
+
+ script = ""
+ for cmd in commands:
+ script += "(set -ex; {}\n) || exit 1\n".format(cmd)
+
+ return script
+
+
+# Plugin entry point
+def setup():
+ return ImportElement
diff --git a/src/buildstream/plugins/elements/import.yaml b/src/buildstream/plugins/elements/import.yaml
new file mode 100644
index 000000000..698111b55
--- /dev/null
+++ b/src/buildstream/plugins/elements/import.yaml
@@ -0,0 +1,14 @@
+# The import element simply stages the given sources
+# directly to the root of the sandbox and then collects
+# the output to create an output artifact.
+#
+config:
+
+ # By default we collect everything staged, specify a
+ # directory here to output only a subset of the staged
+ # input sources.
+ source: /
+
+ # Prefix the output with an optional directory, by default
+ # the input is found at the root of the produced artifact.
+ target: /
diff --git a/src/buildstream/plugins/elements/junction.py b/src/buildstream/plugins/elements/junction.py
new file mode 100644
index 000000000..15ef115d9
--- /dev/null
+++ b/src/buildstream/plugins/elements/junction.py
@@ -0,0 +1,229 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jürg Billeter <juerg.billeter@codethink.co.uk>
+
+"""
+junction - Integrate subprojects
+================================
+This element is a link to another BuildStream project. It allows integration
+of multiple projects into a single pipeline.
+
+Overview
+--------
+
+.. code:: yaml
+
+ kind: junction
+
+ # Specify the BuildStream project source
+ sources:
+ - kind: git
+ url: upstream:projectname.git
+ track: master
+ ref: d0b38561afb8122a3fc6bafc5a733ec502fcaed6
+
+ # Specify the junction configuration
+ config:
+
+ # Override project options
+ options:
+ machine_arch: "%{machine_arch}"
+ debug: True
+
+ # Optionally look in a subpath of the source repository for the project
+ path: projects/hello
+
+ # Optionally specify another junction element to serve as a target for
+ # this element. Target should be defined using the syntax
+ # ``{junction-name}:{element-name}``.
+ #
+ # Note that this option cannot be used in conjunction with sources.
+ target: sub-project.bst:sub-sub-project.bst
+
+.. note::
+
+ The configuration option to allow specifying junction targets is available
+ since :ref:`format version 24 <project_format_version>`.
+
+.. note::
+
+ Junction elements may not specify any dependencies as they are simply
+ links to other projects and are not in the dependency graph on their own.
+
+With a junction element in place, local elements can depend on elements in
+the other BuildStream project using the additional ``junction`` attribute in the
+dependency dictionary:
+
+.. code:: yaml
+
+ depends:
+ - junction: toolchain.bst
+ filename: gcc.bst
+ type: build
+
+While junctions are elements, only a limited set of element operations is
+supported. They can be tracked and fetched like other elements.
+However, junction elements do not produce any artifacts, which means that
+they cannot be built or staged. It also means that another element cannot
+depend on a junction element itself.
+
+.. note::
+
+ BuildStream does not implicitly track junction elements. This means
+ that if we were to invoke: `bst build --track-all ELEMENT` on an element
+ which uses a junction element, the ref of the junction element
+ will not automatically be updated if a more recent version exists.
+
+ Therefore, if you require the most up-to-date version of a subproject,
+ you must explicitly track the junction element by invoking:
+ `bst source track JUNCTION_ELEMENT`.
+
+ Furthermore, elements within the subproject are also not tracked by default.
+ For this, we must specify the `--track-cross-junctions` option. This option
+ must be preceeded by `--track ELEMENT` or `--track-all`.
+
+
+Sources
+-------
+``bst show`` does not implicitly fetch junction sources if they haven't been
+cached yet. However, they can be fetched explicitly:
+
+.. code::
+
+ bst source fetch junction.bst
+
+Other commands such as ``bst build`` implicitly fetch junction sources.
+
+Options
+-------
+.. code:: yaml
+
+ options:
+ machine_arch: "%{machine_arch}"
+ debug: True
+
+Junctions can configure options of the linked project. Options are never
+implicitly inherited across junctions, however, variables can be used to
+explicitly assign the same value to a subproject option.
+
+.. _core_junction_nested:
+
+Nested Junctions
+----------------
+Junctions can be nested. That is, subprojects are allowed to have junctions on
+their own. Nested junctions in different subprojects may point to the same
+project, however, in most use cases the same project should be loaded only once.
+BuildStream uses the junction element name as key to determine which junctions
+to merge. It is recommended that the name of a junction is set to the same as
+the name of the linked project.
+
+As the junctions may differ in source version and options, BuildStream cannot
+simply use one junction and ignore the others. Due to this, BuildStream requires
+the user to resolve possibly conflicting nested junctions by creating a junction
+with the same name in the top-level project, which then takes precedence.
+
+Targeting other junctions
+~~~~~~~~~~~~~~~~~~~~~~~~~
+When working with nested junctions, you can also create a junction element that
+targets another junction element in the sub-project. This can be useful if you
+need to ensure that both the top-level project and the sub-project are using
+the same version of the sub-sub-project.
+
+This can be done using the ``target`` configuration option. See below for an
+example:
+
+.. code:: yaml
+
+ kind: junction
+
+ config:
+ target: subproject.bst:subsubproject.bst
+
+In the above example, this junction element would be targeting the junction
+element named ``subsubproject.bst`` in the subproject referred to by
+``subproject.bst``.
+
+Note that when targeting another junction, the names of the junction element
+must not be the same as the name of the target.
+"""
+
+from collections.abc import Mapping
+from buildstream import Element, ElementError
+from buildstream._pipeline import PipelineError
+
+
+# Element implementation for the 'junction' kind.
+class JunctionElement(Element):
+ # pylint: disable=attribute-defined-outside-init
+
+ # Junctions are not allowed any dependencies
+ BST_FORBID_BDEPENDS = True
+ BST_FORBID_RDEPENDS = True
+
+ def configure(self, node):
+ self.path = self.node_get_member(node, str, 'path', default='')
+ self.options = self.node_get_member(node, Mapping, 'options', default={})
+ self.target = self.node_get_member(node, str, 'target', default=None)
+ self.target_element = None
+ self.target_junction = None
+
+ def preflight(self):
+ # "target" cannot be used in conjunction with:
+ # 1. sources
+ # 2. config['options']
+ # 3. config['path']
+ if self.target and any(self.sources()):
+ raise ElementError("junction elements cannot define both 'sources' and 'target' config option")
+ if self.target and any(self.node_items(self.options)):
+ raise ElementError("junction elements cannot define both 'options' and 'target'")
+ if self.target and self.path:
+ raise ElementError("junction elements cannot define both 'path' and 'target'")
+
+ # Validate format of target, if defined
+ if self.target:
+ try:
+ self.target_junction, self.target_element = self.target.split(":")
+ except ValueError:
+ raise ElementError("'target' option must be in format '{junction-name}:{element-name}'")
+
+ # We cannot target a junction that has the same name as us, since that
+ # will cause an infinite recursion while trying to load it.
+ if self.name == self.target_element:
+ raise ElementError("junction elements cannot target an element with the same name")
+
+ def get_unique_key(self):
+ # Junctions do not produce artifacts. get_unique_key() implementation
+ # is still required for `bst source fetch`.
+ return 1
+
+ def configure_sandbox(self, sandbox):
+ raise PipelineError("Cannot build junction elements")
+
+ def stage(self, sandbox):
+ raise PipelineError("Cannot stage junction elements")
+
+ def generate_script(self):
+ raise PipelineError("Cannot build junction elements")
+
+ def assemble(self, sandbox):
+ raise PipelineError("Cannot build junction elements")
+
+
+# Plugin entry point
+def setup():
+ return JunctionElement
diff --git a/src/buildstream/plugins/elements/make.py b/src/buildstream/plugins/elements/make.py
new file mode 100644
index 000000000..67a261100
--- /dev/null
+++ b/src/buildstream/plugins/elements/make.py
@@ -0,0 +1,56 @@
+#
+# Copyright Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Ed Baunton <ebaunton1@bloomberg.net>
+
+"""
+make - Make build element
+=========================
+This is a :mod:`BuildElement <buildstream.buildelement>` implementation for
+using GNU make based build.
+
+.. note::
+
+ The ``make`` element is available since :ref:`format version 9 <project_format_version>`
+
+Here is the default configuration for the ``make`` element in full:
+
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/make.yaml
+ :language: yaml
+
+See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
+details on common configuration options for build elements.
+"""
+
+from buildstream import BuildElement, SandboxFlags
+
+
+# Element implementation for the 'make' kind.
+class MakeElement(BuildElement):
+ # Supports virtual directories (required for remote execution)
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Enable command batching across prepare() and assemble()
+ def configure_sandbox(self, sandbox):
+ super().configure_sandbox(sandbox)
+ self.batch_prepare_assemble(SandboxFlags.ROOT_READ_ONLY,
+ collect=self.get_variable('install-root'))
+
+
+# Plugin entry point
+def setup():
+ return MakeElement
diff --git a/src/buildstream/plugins/elements/make.yaml b/src/buildstream/plugins/elements/make.yaml
new file mode 100644
index 000000000..83e5c658f
--- /dev/null
+++ b/src/buildstream/plugins/elements/make.yaml
@@ -0,0 +1,42 @@
+# make default configurations
+
+variables:
+ make: make PREFIX="%{prefix}"
+ make-install: make -j1 PREFIX="%{prefix}" DESTDIR="%{install-root}" install
+
+ # Set this if the sources cannot handle parallelization.
+ #
+ # notparallel: True
+
+config:
+
+ # Commands for building the software
+ #
+ build-commands:
+ - |
+ %{make}
+
+ # Commands for installing the software into a
+ # destination folder
+ #
+ install-commands:
+ - |
+ %{make-install}
+
+ # Commands for stripping debugging information out of
+ # installed binaries
+ #
+ strip-commands:
+ - |
+ %{strip-binaries}
+
+# Use max-jobs CPUs for building and enable verbosity
+environment:
+ MAKEFLAGS: -j%{max-jobs}
+ V: 1
+
+# And dont consider MAKEFLAGS or V as something which may
+# affect build output.
+environment-nocache:
+- MAKEFLAGS
+- V
diff --git a/src/buildstream/plugins/elements/makemaker.py b/src/buildstream/plugins/elements/makemaker.py
new file mode 100644
index 000000000..7da051592
--- /dev/null
+++ b/src/buildstream/plugins/elements/makemaker.py
@@ -0,0 +1,51 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+makemaker - Perl MakeMaker build element
+========================================
+A :mod:`BuildElement <buildstream.buildelement>` implementation for using
+the Perl ExtUtil::MakeMaker build system
+
+The MakeMaker default configuration:
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/makemaker.yaml
+ :language: yaml
+
+See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
+details on common configuration options for build elements.
+"""
+
+from buildstream import BuildElement, SandboxFlags
+
+
+# Element implementation for the 'makemaker' kind.
+class MakeMakerElement(BuildElement):
+ # Supports virtual directories (required for remote execution)
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Enable command batching across prepare() and assemble()
+ def configure_sandbox(self, sandbox):
+ super().configure_sandbox(sandbox)
+ self.batch_prepare_assemble(SandboxFlags.ROOT_READ_ONLY,
+ collect=self.get_variable('install-root'))
+
+
+# Plugin entry point
+def setup():
+ return MakeMakerElement
diff --git a/src/buildstream/plugins/elements/makemaker.yaml b/src/buildstream/plugins/elements/makemaker.yaml
new file mode 100644
index 000000000..c9c4622cb
--- /dev/null
+++ b/src/buildstream/plugins/elements/makemaker.yaml
@@ -0,0 +1,48 @@
+# Default configuration for the Perl ExtUtil::MakeMaker
+# build system
+
+variables:
+
+ # To install perl distributions into the correct location
+ # in our chroot we need to set PREFIX to <destdir>/<prefix>
+ # in the configure-commands.
+ #
+ # The mapping between PREFIX and the final installation
+ # directories is complex and depends upon the configuration
+ # of perl see,
+ # https://metacpan.org/pod/distribution/perl/INSTALL#Installation-Directories
+ # and ExtUtil::MakeMaker's documentation for more details.
+ configure: |
+
+ perl Makefile.PL PREFIX=%{install-root}%{prefix}
+
+ make: make
+ make-install: make install
+
+config:
+
+ # Commands for configuring the software
+ #
+ configure-commands:
+ - |
+ %{configure}
+
+ # Commands for building the software
+ #
+ build-commands:
+ - |
+ %{make}
+
+ # Commands for installing the software into a
+ # destination folder
+ #
+ install-commands:
+ - |
+ %{make-install}
+
+ # Commands for stripping debugging information out of
+ # installed binaries
+ #
+ strip-commands:
+ - |
+ %{strip-binaries}
diff --git a/src/buildstream/plugins/elements/manual.py b/src/buildstream/plugins/elements/manual.py
new file mode 100644
index 000000000..bbda65312
--- /dev/null
+++ b/src/buildstream/plugins/elements/manual.py
@@ -0,0 +1,51 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+manual - Manual build element
+=============================
+The most basic build element does nothing but allows users to
+add custom build commands to the array understood by the :mod:`BuildElement <buildstream.buildelement>`
+
+The empty configuration is as such:
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/manual.yaml
+ :language: yaml
+
+See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
+details on common configuration options for build elements.
+"""
+
+from buildstream import BuildElement, SandboxFlags
+
+
+# Element implementation for the 'manual' kind.
+class ManualElement(BuildElement):
+ # Supports virtual directories (required for remote execution)
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Enable command batching across prepare() and assemble()
+ def configure_sandbox(self, sandbox):
+ super().configure_sandbox(sandbox)
+ self.batch_prepare_assemble(SandboxFlags.ROOT_READ_ONLY,
+ collect=self.get_variable('install-root'))
+
+
+# Plugin entry point
+def setup():
+ return ManualElement
diff --git a/src/buildstream/plugins/elements/manual.yaml b/src/buildstream/plugins/elements/manual.yaml
new file mode 100644
index 000000000..38fe7d163
--- /dev/null
+++ b/src/buildstream/plugins/elements/manual.yaml
@@ -0,0 +1,22 @@
+# Manual build element does not provide any default
+# build commands
+config:
+
+ # Commands for configuring the software
+ #
+ configure-commands: []
+
+ # Commands for building the software
+ #
+ build-commands: []
+
+ # Commands for installing the software into a
+ # destination folder
+ #
+ install-commands: []
+
+ # Commands for stripping installed binaries
+ #
+ strip-commands:
+ - |
+ %{strip-binaries}
diff --git a/src/buildstream/plugins/elements/meson.py b/src/buildstream/plugins/elements/meson.py
new file mode 100644
index 000000000..d80f77977
--- /dev/null
+++ b/src/buildstream/plugins/elements/meson.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2017 Patrick Griffis
+# Copyright (C) 2018 Codethink Ltd.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+meson - Meson build element
+===========================
+This is a :mod:`BuildElement <buildstream.buildelement>` implementation for
+using `Meson <http://mesonbuild.com/>`_ build scripts.
+
+You will often want to pass additional arguments to ``meson``. This should
+be done on a per-element basis by setting the ``meson-local`` variable. Here is
+an example:
+
+.. code:: yaml
+
+ variables:
+ meson-local: |
+ -Dmonkeys=yes
+
+If you want to pass extra options to ``meson`` for every element in your
+project, set the ``meson-global`` variable in your project.conf file. Here is
+an example of that:
+
+.. code:: yaml
+
+ elements:
+ meson:
+ variables:
+ meson-global: |
+ -Dmonkeys=always
+
+Here is the default configuration for the ``meson`` element in full:
+
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/meson.yaml
+ :language: yaml
+
+See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
+details on common configuration options for build elements.
+"""
+
+from buildstream import BuildElement, SandboxFlags
+
+
+# Element implementation for the 'meson' kind.
+class MesonElement(BuildElement):
+ # Supports virtual directories (required for remote execution)
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Enable command batching across prepare() and assemble()
+ def configure_sandbox(self, sandbox):
+ super().configure_sandbox(sandbox)
+ self.batch_prepare_assemble(SandboxFlags.ROOT_READ_ONLY,
+ collect=self.get_variable('install-root'))
+
+
+# Plugin entry point
+def setup():
+ return MesonElement
diff --git a/src/buildstream/plugins/elements/meson.yaml b/src/buildstream/plugins/elements/meson.yaml
new file mode 100644
index 000000000..2172cb34c
--- /dev/null
+++ b/src/buildstream/plugins/elements/meson.yaml
@@ -0,0 +1,79 @@
+# Meson default configuration
+
+variables:
+
+ build-dir: _builddir
+
+ # Project-wide extra arguments to be passed to `meson`
+ meson-global: ''
+
+ # Element-specific extra arguments to be passed to `meson`.
+ meson-local: ''
+
+ # For backwards compatibility only, do not use.
+ meson-extra: ''
+
+ meson-args: |
+
+ --prefix=%{prefix} \
+ --bindir=%{bindir} \
+ --sbindir=%{sbindir} \
+ --sysconfdir=%{sysconfdir} \
+ --datadir=%{datadir} \
+ --includedir=%{includedir} \
+ --libdir=%{libdir} \
+ --libexecdir=%{libexecdir} \
+ --localstatedir=%{localstatedir} \
+ --sharedstatedir=%{sharedstatedir} \
+ --mandir=%{mandir} \
+ --infodir=%{infodir} %{meson-extra} %{meson-global} %{meson-local}
+
+ meson: meson %{conf-root} %{build-dir} %{meson-args}
+
+ ninja: |
+ ninja -j ${NINJAJOBS} -C %{build-dir}
+
+ ninja-install: |
+ env DESTDIR="%{install-root}" ninja -C %{build-dir} install
+
+ # Set this if the sources cannot handle parallelization.
+ #
+ # notparallel: True
+
+config:
+
+ # Commands for configuring the software
+ #
+ configure-commands:
+ - |
+ %{meson}
+
+ # Commands for building the software
+ #
+ build-commands:
+ - |
+ %{ninja}
+
+ # Commands for installing the software into a
+ # destination folder
+ #
+ install-commands:
+ - |
+ %{ninja-install}
+
+ # Commands for stripping debugging information out of
+ # installed binaries
+ #
+ strip-commands:
+ - |
+ %{strip-binaries}
+
+# Use max-jobs CPUs for building
+environment:
+ NINJAJOBS: |
+ %{max-jobs}
+
+# And dont consider NINJAJOBS as something which may
+# affect build output.
+environment-nocache:
+- NINJAJOBS
diff --git a/src/buildstream/plugins/elements/modulebuild.py b/src/buildstream/plugins/elements/modulebuild.py
new file mode 100644
index 000000000..63e3840dc
--- /dev/null
+++ b/src/buildstream/plugins/elements/modulebuild.py
@@ -0,0 +1,51 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+modulebuild - Perl Module::Build build element
+==============================================
+A :mod:`BuildElement <buildstream.buildelement>` implementation for using
+the Perl Module::Build build system
+
+The modulebuild default configuration:
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/modulebuild.yaml
+ :language: yaml
+
+See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
+details on common configuration options for build elements.
+"""
+
+from buildstream import BuildElement, SandboxFlags
+
+
+# Element implementation for the 'modulebuild' kind.
+class ModuleBuildElement(BuildElement):
+ # Supports virtual directories (required for remote execution)
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Enable command batching across prepare() and assemble()
+ def configure_sandbox(self, sandbox):
+ super().configure_sandbox(sandbox)
+ self.batch_prepare_assemble(SandboxFlags.ROOT_READ_ONLY,
+ collect=self.get_variable('install-root'))
+
+
+# Plugin entry point
+def setup():
+ return ModuleBuildElement
diff --git a/src/buildstream/plugins/elements/modulebuild.yaml b/src/buildstream/plugins/elements/modulebuild.yaml
new file mode 100644
index 000000000..18f034bab
--- /dev/null
+++ b/src/buildstream/plugins/elements/modulebuild.yaml
@@ -0,0 +1,48 @@
+# Default configuration for the Perl Module::Build
+# build system.
+
+variables:
+
+ # To install perl distributions into the correct location
+ # in our chroot we need to set PREFIX to <destdir>/<prefix>
+ # in the configure-commands.
+ #
+ # The mapping between PREFIX and the final installation
+ # directories is complex and depends upon the configuration
+ # of perl see,
+ # https://metacpan.org/pod/distribution/perl/INSTALL#Installation-Directories
+ # and ExtUtil::MakeMaker's documentation for more details.
+ configure: |
+
+ perl Build.PL --prefix "%{install-root}%{prefix}"
+
+ perl-build: ./Build
+ perl-install: ./Build install
+
+config:
+
+ # Commands for configuring the software
+ #
+ configure-commands:
+ - |
+ %{configure}
+
+ # Commands for building the software
+ #
+ build-commands:
+ - |
+ %{perl-build}
+
+ # Commands for installing the software into a
+ # destination folder
+ #
+ install-commands:
+ - |
+ %{perl-install}
+
+ # Commands for stripping debugging information out of
+ # installed binaries
+ #
+ strip-commands:
+ - |
+ %{strip-binaries}
diff --git a/src/buildstream/plugins/elements/pip.py b/src/buildstream/plugins/elements/pip.py
new file mode 100644
index 000000000..4a9eefde1
--- /dev/null
+++ b/src/buildstream/plugins/elements/pip.py
@@ -0,0 +1,51 @@
+#
+# Copyright (C) 2017 Mathieu Bridon
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Mathieu Bridon <bochecha@daitauha.fr>
+
+"""
+pip - Pip build element
+=======================
+A :mod:`BuildElement <buildstream.buildelement>` implementation for installing
+Python modules with pip
+
+The pip default configuration:
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/pip.yaml
+ :language: yaml
+
+See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
+details on common configuration options for build elements.
+"""
+
+from buildstream import BuildElement, SandboxFlags
+
+
+# Element implementation for the 'pip' kind.
+class PipElement(BuildElement):
+ # Supports virtual directories (required for remote execution)
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Enable command batching across prepare() and assemble()
+ def configure_sandbox(self, sandbox):
+ super().configure_sandbox(sandbox)
+ self.batch_prepare_assemble(SandboxFlags.ROOT_READ_ONLY,
+ collect=self.get_variable('install-root'))
+
+
+# Plugin entry point
+def setup():
+ return PipElement
diff --git a/src/buildstream/plugins/elements/pip.yaml b/src/buildstream/plugins/elements/pip.yaml
new file mode 100644
index 000000000..294d4ad9a
--- /dev/null
+++ b/src/buildstream/plugins/elements/pip.yaml
@@ -0,0 +1,36 @@
+# Pip default configurations
+
+variables:
+
+ pip: pip
+ pip-flags: |
+ %{pip} install --no-deps --root=%{install-root} --prefix=%{prefix}
+ pip-install-package: |
+ %{pip-flags} %{conf-root}
+ pip-download-dir: |
+ .bst_pip_downloads
+ pip-install-dependencies: |
+ if [ -e %{pip-download-dir} ]; then %{pip-flags} %{pip-download-dir}/*; fi
+
+config:
+
+ configure-commands: []
+ build-commands: []
+
+ # Commands for installing the software into a
+ # destination folder
+ #
+ install-commands:
+ - |
+ %{pip-install-package}
+ - |
+ %{pip-install-dependencies}
+
+ # Commands for stripping debugging information out of
+ # installed binaries
+ #
+ strip-commands:
+ - |
+ %{strip-binaries}
+ - |
+ %{fix-pyc-timestamps}
diff --git a/src/buildstream/plugins/elements/qmake.py b/src/buildstream/plugins/elements/qmake.py
new file mode 100644
index 000000000..56a0e641e
--- /dev/null
+++ b/src/buildstream/plugins/elements/qmake.py
@@ -0,0 +1,51 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+qmake - QMake build element
+===========================
+A :mod:`BuildElement <buildstream.buildelement>` implementation for using
+the qmake build system
+
+The qmake default configuration:
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/qmake.yaml
+ :language: yaml
+
+See :ref:`built-in functionality documentation <core_buildelement_builtins>` for
+details on common configuration options for build elements.
+"""
+
+from buildstream import BuildElement, SandboxFlags
+
+
+# Element implementation for the 'qmake' kind.
+class QMakeElement(BuildElement):
+ # Supports virtual directories (required for remote execution)
+ BST_VIRTUAL_DIRECTORY = True
+
+ # Enable command batching across prepare() and assemble()
+ def configure_sandbox(self, sandbox):
+ super().configure_sandbox(sandbox)
+ self.batch_prepare_assemble(SandboxFlags.ROOT_READ_ONLY,
+ collect=self.get_variable('install-root'))
+
+
+# Plugin entry point
+def setup():
+ return QMakeElement
diff --git a/src/buildstream/plugins/elements/qmake.yaml b/src/buildstream/plugins/elements/qmake.yaml
new file mode 100644
index 000000000..4ac31932e
--- /dev/null
+++ b/src/buildstream/plugins/elements/qmake.yaml
@@ -0,0 +1,50 @@
+# QMake default configuration
+
+variables:
+
+ qmake: qmake -makefile %{conf-root}
+ make: make
+ make-install: make -j1 INSTALL_ROOT="%{install-root}" install
+
+ # Set this if the sources cannot handle parallelization.
+ #
+ # notparallel: True
+
+config:
+
+ # Commands for configuring the software
+ #
+ configure-commands:
+ - |
+ %{qmake}
+
+ # Commands for building the software
+ #
+ build-commands:
+ - |
+ %{make}
+
+ # Commands for installing the software into a
+ # destination folder
+ #
+ install-commands:
+ - |
+ %{make-install}
+
+ # Commands for stripping debugging information out of
+ # installed binaries
+ #
+ strip-commands:
+ - |
+ %{strip-binaries}
+
+# Use max-jobs CPUs for building and enable verbosity
+environment:
+ MAKEFLAGS: -j%{max-jobs}
+ V: 1
+
+# And dont consider MAKEFLAGS or V as something which may
+# affect build output.
+environment-nocache:
+- MAKEFLAGS
+- V
diff --git a/src/buildstream/plugins/elements/script.py b/src/buildstream/plugins/elements/script.py
new file mode 100644
index 000000000..0d194dcc1
--- /dev/null
+++ b/src/buildstream/plugins/elements/script.py
@@ -0,0 +1,69 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jonathan Maw <jonathan.maw@codethink.co.uk>
+
+"""
+script - Run scripts to create output
+=====================================
+This element allows one to run some commands to mutate the
+input and create some output.
+
+.. note::
+
+ Script elements may only specify build dependencies. See
+ :ref:`the format documentation <format_dependencies>` for more
+ detail on specifying dependencies.
+
+The default configuration and possible options are as such:
+ .. literalinclude:: ../../../src/buildstream/plugins/elements/script.yaml
+ :language: yaml
+"""
+
+import buildstream
+
+
+# Element implementation for the 'script' kind.
+class ScriptElement(buildstream.ScriptElement):
+ # pylint: disable=attribute-defined-outside-init
+
+ # This plugin has been modified to avoid the use of Sandbox.get_directory
+ BST_VIRTUAL_DIRECTORY = True
+
+ def configure(self, node):
+ for n in self.node_get_member(node, list, 'layout', []):
+ dst = self.node_subst_member(n, 'destination')
+ elm = self.node_subst_member(n, 'element', None)
+ self.layout_add(elm, dst)
+
+ self.node_validate(node, [
+ 'commands', 'root-read-only', 'layout'
+ ])
+
+ cmds = self.node_subst_list(node, "commands")
+ self.add_commands("commands", cmds)
+
+ self.set_work_dir()
+ self.set_install_root()
+ self.set_root_read_only(self.node_get_member(node, bool,
+ 'root-read-only', False))
+
+
+# Plugin entry point
+def setup():
+ return ScriptElement
diff --git a/src/buildstream/plugins/elements/script.yaml b/src/buildstream/plugins/elements/script.yaml
new file mode 100644
index 000000000..b388378da
--- /dev/null
+++ b/src/buildstream/plugins/elements/script.yaml
@@ -0,0 +1,25 @@
+# Common script element variables
+variables:
+ # Defines the directory commands will be run from.
+ cwd: /
+
+# Script element configuration
+config:
+
+ # Defines whether to run the sandbox with '/' read-only.
+ # It is recommended to set root as read-only wherever possible.
+ root-read-only: False
+
+ # Defines where to stage elements which are direct or indirect dependencies.
+ # By default, all direct dependencies are staged to '/'.
+ # This is also commonly used to take one element as an environment
+ # containing the tools used to operate on the other element.
+ # layout:
+ # - element: foo-tools.bst
+ # destination: /
+ # - element: foo-system.bst
+ # destination: %{build-root}
+
+ # List of commands to run in the sandbox.
+ commands: []
+
diff --git a/src/buildstream/plugins/elements/stack.py b/src/buildstream/plugins/elements/stack.py
new file mode 100644
index 000000000..97517ca48
--- /dev/null
+++ b/src/buildstream/plugins/elements/stack.py
@@ -0,0 +1,66 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+stack - Symbolic Element for dependency grouping
+================================================
+Stack elements are simply a symbolic element used for representing
+a logical group of elements.
+"""
+
+from buildstream import Element
+
+
+# Element implementation for the 'stack' kind.
+class StackElement(Element):
+
+ # This plugin has been modified to avoid the use of Sandbox.get_directory
+ BST_VIRTUAL_DIRECTORY = True
+
+ def configure(self, node):
+ pass
+
+ def preflight(self):
+ pass
+
+ def get_unique_key(self):
+ # We do not add anything to the build, only our dependencies
+ # do, so our unique key is just a constant.
+ return 1
+
+ def configure_sandbox(self, sandbox):
+ pass
+
+ def stage(self, sandbox):
+ pass
+
+ def assemble(self, sandbox):
+
+ # Just create a dummy empty artifact, its existence is a statement
+ # that all this stack's dependencies are built.
+ vrootdir = sandbox.get_virtual_directory()
+ vrootdir.descend('output', create=True)
+
+ # And we're done
+ return '/output'
+
+
+# Plugin entry point
+def setup():
+ return StackElement
diff --git a/src/buildstream/plugins/sources/__init__.py b/src/buildstream/plugins/sources/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/plugins/sources/__init__.py
diff --git a/src/buildstream/plugins/sources/_downloadablefilesource.py b/src/buildstream/plugins/sources/_downloadablefilesource.py
new file mode 100644
index 000000000..b9b15e268
--- /dev/null
+++ b/src/buildstream/plugins/sources/_downloadablefilesource.py
@@ -0,0 +1,250 @@
+"""A base abstract class for source implementations which download a file"""
+
+import os
+import urllib.request
+import urllib.error
+import contextlib
+import shutil
+import netrc
+
+from buildstream import Source, SourceError, Consistency
+from buildstream import utils
+
+
+class _NetrcFTPOpener(urllib.request.FTPHandler):
+
+ def __init__(self, netrc_config):
+ self.netrc = netrc_config
+
+ def _split(self, netloc):
+ userpass, hostport = urllib.parse.splituser(netloc)
+ host, port = urllib.parse.splitport(hostport)
+ if userpass:
+ user, passwd = urllib.parse.splitpasswd(userpass)
+ else:
+ user = None
+ passwd = None
+ return host, port, user, passwd
+
+ def _unsplit(self, host, port, user, passwd):
+ if port:
+ host = '{}:{}'.format(host, port)
+ if user:
+ if passwd:
+ user = '{}:{}'.format(user, passwd)
+ host = '{}@{}'.format(user, host)
+
+ return host
+
+ def ftp_open(self, req):
+ host, port, user, passwd = self._split(req.host)
+
+ if user is None and self.netrc:
+ entry = self.netrc.authenticators(host)
+ if entry:
+ user, _, passwd = entry
+
+ req.host = self._unsplit(host, port, user, passwd)
+
+ return super().ftp_open(req)
+
+
+class _NetrcPasswordManager:
+
+ def __init__(self, netrc_config):
+ self.netrc = netrc_config
+
+ def add_password(self, realm, uri, user, passwd):
+ pass
+
+ def find_user_password(self, realm, authuri):
+ if not self.netrc:
+ return None, None
+ parts = urllib.parse.urlsplit(authuri)
+ entry = self.netrc.authenticators(parts.hostname)
+ if not entry:
+ return None, None
+ else:
+ login, _, password = entry
+ return login, password
+
+
+class DownloadableFileSource(Source):
+ # pylint: disable=attribute-defined-outside-init
+
+ COMMON_CONFIG_KEYS = Source.COMMON_CONFIG_KEYS + ['url', 'ref', 'etag']
+
+ __urlopener = None
+
+ def configure(self, node):
+ self.original_url = self.node_get_member(node, str, 'url')
+ self.ref = self.node_get_member(node, str, 'ref', None)
+ self.url = self.translate_url(self.original_url)
+ self._warn_deprecated_etag(node)
+
+ def preflight(self):
+ return
+
+ def get_unique_key(self):
+ return [self.original_url, self.ref]
+
+ def get_consistency(self):
+ if self.ref is None:
+ return Consistency.INCONSISTENT
+
+ if os.path.isfile(self._get_mirror_file()):
+ return Consistency.CACHED
+
+ else:
+ return Consistency.RESOLVED
+
+ def load_ref(self, node):
+ self.ref = self.node_get_member(node, str, 'ref', None)
+ self._warn_deprecated_etag(node)
+
+ def get_ref(self):
+ return self.ref
+
+ def set_ref(self, ref, node):
+ node['ref'] = self.ref = ref
+
+ def track(self):
+ # there is no 'track' field in the source to determine what/whether
+ # or not to update refs, because tracking a ref is always a conscious
+ # decision by the user.
+ with self.timed_activity("Tracking {}".format(self.url),
+ silent_nested=True):
+ new_ref = self._ensure_mirror()
+
+ if self.ref and self.ref != new_ref:
+ detail = "When tracking, new ref differs from current ref:\n" \
+ + " Tracked URL: {}\n".format(self.url) \
+ + " Current ref: {}\n".format(self.ref) \
+ + " New ref: {}\n".format(new_ref)
+ self.warn("Potential man-in-the-middle attack!", detail=detail)
+
+ return new_ref
+
+ def fetch(self):
+
+ # Just a defensive check, it is impossible for the
+ # file to be already cached because Source.fetch() will
+ # not be called if the source is already Consistency.CACHED.
+ #
+ if os.path.isfile(self._get_mirror_file()):
+ return # pragma: nocover
+
+ # Download the file, raise hell if the sha256sums don't match,
+ # and mirror the file otherwise.
+ with self.timed_activity("Fetching {}".format(self.url), silent_nested=True):
+ sha256 = self._ensure_mirror()
+ if sha256 != self.ref:
+ raise SourceError("File downloaded from {} has sha256sum '{}', not '{}'!"
+ .format(self.url, sha256, self.ref))
+
+ def _warn_deprecated_etag(self, node):
+ etag = self.node_get_member(node, str, 'etag', None)
+ if etag:
+ provenance = self.node_provenance(node, member_name='etag')
+ self.warn('{} "etag" is deprecated and ignored.'.format(provenance))
+
+ def _get_etag(self, ref):
+ etagfilename = os.path.join(self._get_mirror_dir(), '{}.etag'.format(ref))
+ if os.path.exists(etagfilename):
+ with open(etagfilename, 'r') as etagfile:
+ return etagfile.read()
+
+ return None
+
+ def _store_etag(self, ref, etag):
+ etagfilename = os.path.join(self._get_mirror_dir(), '{}.etag'.format(ref))
+ with utils.save_file_atomic(etagfilename) as etagfile:
+ etagfile.write(etag)
+
+ def _ensure_mirror(self):
+ # Downloads from the url and caches it according to its sha256sum.
+ try:
+ with self.tempdir() as td:
+ default_name = os.path.basename(self.url)
+ request = urllib.request.Request(self.url)
+ request.add_header('Accept', '*/*')
+
+ # We do not use etag in case what we have in cache is
+ # not matching ref in order to be able to recover from
+ # corrupted download.
+ if self.ref:
+ etag = self._get_etag(self.ref)
+
+ # Do not re-download the file if the ETag matches.
+ if etag and self.get_consistency() == Consistency.CACHED:
+ request.add_header('If-None-Match', etag)
+
+ opener = self.__get_urlopener()
+ with contextlib.closing(opener.open(request)) as response:
+ info = response.info()
+
+ etag = info['ETag'] if 'ETag' in info else None
+
+ filename = info.get_filename(default_name)
+ filename = os.path.basename(filename)
+ local_file = os.path.join(td, filename)
+ with open(local_file, 'wb') as dest:
+ shutil.copyfileobj(response, dest)
+
+ # Make sure url-specific mirror dir exists.
+ if not os.path.isdir(self._get_mirror_dir()):
+ os.makedirs(self._get_mirror_dir())
+
+ # Store by sha256sum
+ sha256 = utils.sha256sum(local_file)
+ # Even if the file already exists, move the new file over.
+ # In case the old file was corrupted somehow.
+ os.rename(local_file, self._get_mirror_file(sha256))
+
+ if etag:
+ self._store_etag(sha256, etag)
+ return sha256
+
+ except urllib.error.HTTPError as e:
+ if e.code == 304:
+ # 304 Not Modified.
+ # Because we use etag only for matching ref, currently specified ref is what
+ # we would have downloaded.
+ return self.ref
+ raise SourceError("{}: Error mirroring {}: {}"
+ .format(self, self.url, e), temporary=True) from e
+
+ except (urllib.error.URLError, urllib.error.ContentTooShortError, OSError, ValueError) as e:
+ # Note that urllib.request.Request in the try block may throw a
+ # ValueError for unknown url types, so we handle it here.
+ raise SourceError("{}: Error mirroring {}: {}"
+ .format(self, self.url, e), temporary=True) from e
+
+ def _get_mirror_dir(self):
+ return os.path.join(self.get_mirror_directory(),
+ utils.url_directory_name(self.original_url))
+
+ def _get_mirror_file(self, sha=None):
+ return os.path.join(self._get_mirror_dir(), sha or self.ref)
+
+ def __get_urlopener(self):
+ if not DownloadableFileSource.__urlopener:
+ try:
+ netrc_config = netrc.netrc()
+ except OSError:
+ # If the .netrc file was not found, FileNotFoundError will be
+ # raised, but OSError will be raised directly by the netrc package
+ # in the case that $HOME is not set.
+ #
+ # This will catch both cases.
+ #
+ DownloadableFileSource.__urlopener = urllib.request.build_opener()
+ except netrc.NetrcParseError as e:
+ self.warn('{}: While reading .netrc: {}'.format(self, e))
+ return urllib.request.build_opener()
+ else:
+ netrc_pw_mgr = _NetrcPasswordManager(netrc_config)
+ http_auth = urllib.request.HTTPBasicAuthHandler(netrc_pw_mgr)
+ ftp_handler = _NetrcFTPOpener(netrc_config)
+ DownloadableFileSource.__urlopener = urllib.request.build_opener(http_auth, ftp_handler)
+ return DownloadableFileSource.__urlopener
diff --git a/src/buildstream/plugins/sources/bzr.py b/src/buildstream/plugins/sources/bzr.py
new file mode 100644
index 000000000..e59986da6
--- /dev/null
+++ b/src/buildstream/plugins/sources/bzr.py
@@ -0,0 +1,210 @@
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jonathan Maw <jonathan.maw@codethink.co.uk>
+
+"""
+bzr - stage files from a bazaar repository
+==========================================
+
+**Host dependencies:**
+
+ * bzr
+
+**Usage:**
+
+.. code:: yaml
+
+ # Specify the bzr source kind
+ kind: bzr
+
+ # Specify the bzr url. Bazaar URLs come in many forms, see
+ # `bzr help urlspec` for more information. Using an alias defined
+ # in your project configuration is encouraged.
+ url: https://launchpad.net/bzr
+
+ # Specify the tracking branch. This is mandatory, as bzr cannot identify
+ # an individual revision outside its branch. bzr URLs that omit the branch
+ # name implicitly specify the trunk branch, but bst requires this to be
+ # explicit.
+ track: trunk
+
+ # Specify the ref. This is a revision number. This is usually a decimal,
+ # but revisions on a branch are of the form
+ # <revision-branched-from>.<branch-number>.<revision-since-branching>
+ # e.g. 6622.1.6.
+ # The ref must be specified to build, and 'bst source track' will update the
+ # revision number to the one on the tip of the branch specified in 'track'.
+ ref: 6622
+
+See :ref:`built-in functionality doumentation <core_source_builtins>` for
+details on common configuration options for sources.
+"""
+
+import os
+import shutil
+import fcntl
+from contextlib import contextmanager
+
+from buildstream import Source, SourceError, Consistency
+from buildstream import utils
+
+
+class BzrSource(Source):
+ # pylint: disable=attribute-defined-outside-init
+
+ def configure(self, node):
+ self.node_validate(node, ['url', 'track', 'ref', *Source.COMMON_CONFIG_KEYS])
+
+ self.original_url = self.node_get_member(node, str, 'url')
+ self.tracking = self.node_get_member(node, str, 'track')
+ self.ref = self.node_get_member(node, str, 'ref', None)
+ self.url = self.translate_url(self.original_url)
+
+ def preflight(self):
+ # Check if bzr is installed, get the binary at the same time.
+ self.host_bzr = utils.get_host_tool('bzr')
+
+ def get_unique_key(self):
+ return [self.original_url, self.tracking, self.ref]
+
+ def get_consistency(self):
+ if self.ref is None or self.tracking is None:
+ return Consistency.INCONSISTENT
+
+ # Lock for the _check_ref()
+ with self._locked():
+ if self._check_ref():
+ return Consistency.CACHED
+ else:
+ return Consistency.RESOLVED
+
+ def load_ref(self, node):
+ self.ref = self.node_get_member(node, str, 'ref', None)
+
+ def get_ref(self):
+ return self.ref
+
+ def set_ref(self, ref, node):
+ node['ref'] = self.ref = ref
+
+ def track(self):
+ with self.timed_activity("Tracking {}".format(self.url),
+ silent_nested=True), self._locked():
+ self._ensure_mirror(skip_ref_check=True)
+ ret, out = self.check_output([self.host_bzr, "version-info",
+ "--custom", "--template={revno}",
+ self._get_branch_dir()],
+ fail="Failed to read the revision number at '{}'"
+ .format(self._get_branch_dir()))
+ if ret != 0:
+ raise SourceError("{}: Failed to get ref for tracking {}".format(self, self.tracking))
+
+ return out
+
+ def fetch(self):
+ with self.timed_activity("Fetching {}".format(self.url),
+ silent_nested=True), self._locked():
+ self._ensure_mirror()
+
+ def stage(self, directory):
+ self.call([self.host_bzr, "checkout", "--lightweight",
+ "--revision=revno:{}".format(self.ref),
+ self._get_branch_dir(), directory],
+ fail="Failed to checkout revision {} from branch {} to {}"
+ .format(self.ref, self._get_branch_dir(), directory))
+ # Remove .bzr dir
+ shutil.rmtree(os.path.join(directory, ".bzr"))
+
+ def init_workspace(self, directory):
+ url = os.path.join(self.url, self.tracking)
+ with self.timed_activity('Setting up workspace "{}"'.format(directory), silent_nested=True):
+ # Checkout from the cache
+ self.call([self.host_bzr, "branch",
+ "--use-existing-dir",
+ "--revision=revno:{}".format(self.ref),
+ self._get_branch_dir(), directory],
+ fail="Failed to branch revision {} from branch {} to {}"
+ .format(self.ref, self._get_branch_dir(), directory))
+ # Switch the parent branch to the source's origin
+ self.call([self.host_bzr, "switch",
+ "--directory={}".format(directory), url],
+ fail="Failed to switch workspace's parent branch to {}".format(url))
+
+ # _locked()
+ #
+ # This context manager ensures exclusive access to the
+ # bzr repository.
+ #
+ @contextmanager
+ def _locked(self):
+ lockdir = os.path.join(self.get_mirror_directory(), 'locks')
+ lockfile = os.path.join(
+ lockdir,
+ utils.url_directory_name(self.original_url) + '.lock'
+ )
+ os.makedirs(lockdir, exist_ok=True)
+ with open(lockfile, 'w') as lock:
+ fcntl.flock(lock, fcntl.LOCK_EX)
+ try:
+ yield
+ finally:
+ fcntl.flock(lock, fcntl.LOCK_UN)
+
+ def _check_ref(self):
+ # If the mirror doesnt exist yet, then we dont have the ref
+ if not os.path.exists(self._get_branch_dir()):
+ return False
+
+ return self.call([self.host_bzr, "revno",
+ "--revision=revno:{}".format(self.ref),
+ self._get_branch_dir()]) == 0
+
+ def _get_branch_dir(self):
+ return os.path.join(self._get_mirror_dir(), self.tracking)
+
+ def _get_mirror_dir(self):
+ return os.path.join(self.get_mirror_directory(),
+ utils.url_directory_name(self.original_url))
+
+ def _ensure_mirror(self, skip_ref_check=False):
+ mirror_dir = self._get_mirror_dir()
+ bzr_metadata_dir = os.path.join(mirror_dir, ".bzr")
+ if not os.path.exists(bzr_metadata_dir):
+ self.call([self.host_bzr, "init-repo", "--no-trees", mirror_dir],
+ fail="Failed to initialize bzr repository")
+
+ branch_dir = os.path.join(mirror_dir, self.tracking)
+ branch_url = self.url + "/" + self.tracking
+ if not os.path.exists(branch_dir):
+ # `bzr branch` the branch if it doesn't exist
+ # to get the upstream code
+ self.call([self.host_bzr, "branch", branch_url, branch_dir],
+ fail="Failed to branch from {} to {}".format(branch_url, branch_dir))
+
+ else:
+ # `bzr pull` the branch if it does exist
+ # to get any changes to the upstream code
+ self.call([self.host_bzr, "pull", "--directory={}".format(branch_dir), branch_url],
+ fail="Failed to pull new changes for {}".format(branch_dir))
+
+ if not skip_ref_check and not self._check_ref():
+ raise SourceError("Failed to ensure ref '{}' was mirrored".format(self.ref),
+ reason="ref-not-mirrored")
+
+
+def setup():
+ return BzrSource
diff --git a/src/buildstream/plugins/sources/deb.py b/src/buildstream/plugins/sources/deb.py
new file mode 100644
index 000000000..e45994951
--- /dev/null
+++ b/src/buildstream/plugins/sources/deb.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Phillip Smyth <phillip.smyth@codethink.co.uk>
+# Jonathan Maw <jonathan.maw@codethink.co.uk>
+# Richard Maw <richard.maw@codethink.co.uk>
+
+"""
+deb - stage files from .deb packages
+====================================
+
+**Host dependencies:**
+
+ * arpy (python package)
+
+**Usage:**
+
+.. code:: yaml
+
+ # Specify the deb source kind
+ kind: deb
+
+ # Specify the deb url. Using an alias defined in your project
+ # configuration is encouraged. 'bst source track' will update the
+ # sha256sum in 'ref' to the downloaded file's sha256sum.
+ url: upstream:foo.deb
+
+ # Specify the ref. It's a sha256sum of the file you download.
+ ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b
+
+ # Specify the basedir to return only the specified dir and its children
+ base-dir: ''
+
+See :ref:`built-in functionality doumentation <core_source_builtins>` for
+details on common configuration options for sources.
+"""
+
+import tarfile
+from contextlib import contextmanager
+import arpy # pylint: disable=import-error
+
+from .tar import TarSource
+
+
+class DebSource(TarSource):
+ # pylint: disable=attribute-defined-outside-init
+
+ def configure(self, node):
+ super().configure(node)
+
+ self.base_dir = self.node_get_member(node, str, 'base-dir', None)
+
+ def preflight(self):
+ return
+
+ @contextmanager
+ def _get_tar(self):
+ with open(self._get_mirror_file(), 'rb') as deb_file:
+ arpy_archive = arpy.Archive(fileobj=deb_file)
+ arpy_archive.read_all_headers()
+ data_tar_arpy = [v for k, v in arpy_archive.archived_files.items() if b"data.tar" in k][0]
+ # ArchiveFileData is not enough like a file object for tarfile to use.
+ # Monkey-patching a seekable method makes it close enough for TarFile to open.
+ data_tar_arpy.seekable = lambda *args: True
+ tar = tarfile.open(fileobj=data_tar_arpy, mode="r:*")
+ yield tar
+
+
+def setup():
+ return DebSource
diff --git a/src/buildstream/plugins/sources/git.py b/src/buildstream/plugins/sources/git.py
new file mode 100644
index 000000000..5e6834979
--- /dev/null
+++ b/src/buildstream/plugins/sources/git.py
@@ -0,0 +1,168 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+"""
+git - stage files from a git repository
+=======================================
+
+**Host dependencies:**
+
+ * git
+
+.. attention::
+
+ Note that this plugin **will checkout git submodules by default**; even if
+ they are not specified in the `.bst` file.
+
+**Usage:**
+
+.. code:: yaml
+
+ # Specify the git source kind
+ kind: git
+
+ # Specify the repository url, using an alias defined
+ # in your project configuration is recommended.
+ url: upstream:foo.git
+
+ # Optionally specify a symbolic tracking branch or tag, this
+ # will be used to update the 'ref' when refreshing the pipeline.
+ track: master
+
+ # Optionally specify the ref format used for tracking.
+ # The default is 'sha1' for the raw commit hash.
+ # If you specify 'git-describe', the commit hash will be prefixed
+ # with the closest tag.
+ ref-format: sha1
+
+ # Specify the commit ref, this must be specified in order to
+ # checkout sources and build, but can be automatically updated
+ # if the 'track' attribute was specified.
+ ref: d63cbb6fdc0bbdadc4a1b92284826a6d63a7ebcd
+
+ # Optionally specify whether submodules should be checked-out.
+ # If not set, this will default to 'True'
+ checkout-submodules: True
+
+ # If your repository has submodules, explicitly specifying the
+ # url from which they are to be fetched allows you to easily
+ # rebuild the same sources from a different location. This is
+ # especially handy when used with project defined aliases which
+ # can be redefined at a later time.
+ # You may also explicitly specify whether to check out this
+ # submodule. If 'checkout' is set, it will override
+ # 'checkout-submodules' with the value set below.
+ submodules:
+ plugins/bar:
+ url: upstream:bar.git
+ checkout: True
+ plugins/baz:
+ url: upstream:baz.git
+ checkout: False
+
+ # Enable tag tracking.
+ #
+ # This causes the `tags` metadata to be populated automatically
+ # as a result of tracking the git source.
+ #
+ # By default this is 'False'.
+ #
+ track-tags: True
+
+ # If the list of tags below is set, then a lightweight dummy
+ # git repository will be staged along with the content at
+ # build time.
+ #
+ # This is useful for a growing number of modules which use
+ # `git describe` at build time in order to determine the version
+ # which will be encoded into the built software.
+ #
+ # The 'tags' below is considered as a part of the git source
+ # reference and will be stored in the 'project.refs' file if
+ # that has been selected as your project's ref-storage.
+ #
+ # Migration notes:
+ #
+ # If you are upgrading from BuildStream 1.2, which used to
+ # stage the entire repository by default, you will notice that
+ # some modules which use `git describe` are broken, and will
+ # need to enable this feature in order to fix them.
+ #
+ # If you need to enable this feature without changing the
+ # the specific commit that you are building, then we recommend
+ # the following migration steps for any git sources where
+ # `git describe` is required:
+ #
+ # o Enable `track-tags` feature
+ # o Set the `track` parameter to the desired commit sha which
+ # the current `ref` points to
+ # o Run `bst source track` for these elements, this will result in
+ # populating the `tags` portion of the refs without changing
+ # the refs
+ # o Restore the `track` parameter to the branches which you have
+ # previously been tracking afterwards.
+ #
+ tags:
+ - tag: lightweight-example
+ commit: 04ad0dc656cb7cc6feb781aa13bdbf1d67d0af78
+ annotated: false
+ - tag: annotated-example
+ commit: 10abe77fe8d77385d86f225b503d9185f4ef7f3a
+ annotated: true
+
+See :ref:`built-in functionality doumentation <core_source_builtins>` for
+details on common configuration options for sources.
+
+**Configurable Warnings:**
+
+This plugin provides the following :ref:`configurable warnings <configurable_warnings>`:
+
+- ``git:inconsistent-submodule`` - A submodule present in the git repository's .gitmodules was never
+ added with `git submodule add`.
+
+- ``git:unlisted-submodule`` - A submodule is present in the git repository but was not specified in
+ the source configuration and was not disabled for checkout.
+
+ .. note::
+
+ The ``git:unlisted-submodule`` warning is available since :ref:`format version 20 <project_format_version>`
+
+- ``git:invalid-submodule`` - A submodule is specified in the source configuration but does not exist
+ in the repository.
+
+ .. note::
+
+ The ``git:invalid-submodule`` warning is available since :ref:`format version 20 <project_format_version>`
+
+This plugin also utilises the following configurable :class:`core warnings <buildstream.types.CoreWarnings>`:
+
+- :attr:`ref-not-in-track <buildstream.types.CoreWarnings.REF_NOT_IN_TRACK>` - The provided ref was not
+ found in the provided track in the element's git repository.
+"""
+
+from buildstream import _GitSourceBase
+
+
+class GitSource(_GitSourceBase):
+ pass
+
+
+# Plugin entry point
+def setup():
+ return GitSource
diff --git a/src/buildstream/plugins/sources/local.py b/src/buildstream/plugins/sources/local.py
new file mode 100644
index 000000000..50df85427
--- /dev/null
+++ b/src/buildstream/plugins/sources/local.py
@@ -0,0 +1,147 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Tiago Gomes <tiago.gomes@codethink.co.uk>
+
+"""
+local - stage local files and directories
+=========================================
+
+**Usage:**
+
+.. code:: yaml
+
+ # Specify the local source kind
+ kind: local
+
+ # Specify the project relative path to a file or directory
+ path: files/somefile.txt
+
+See :ref:`built-in functionality doumentation <core_source_builtins>` for
+details on common configuration options for sources.
+"""
+
+import os
+import stat
+from buildstream import Source, Consistency
+from buildstream import utils
+
+
+class LocalSource(Source):
+ # pylint: disable=attribute-defined-outside-init
+
+ def __init__(self, context, project, meta):
+ super().__init__(context, project, meta)
+
+ # Cached unique key to avoid multiple file system traversal if the unique key is requested multiple times.
+ self.__unique_key = None
+
+ def configure(self, node):
+ self.node_validate(node, ['path', *Source.COMMON_CONFIG_KEYS])
+ self.path = self.node_get_project_path(node, 'path')
+ self.fullpath = os.path.join(self.get_project_directory(), self.path)
+
+ def preflight(self):
+ pass
+
+ def get_unique_key(self):
+ if self.__unique_key is None:
+ # Get a list of tuples of the the project relative paths and fullpaths
+ if os.path.isdir(self.fullpath):
+ filelist = utils.list_relative_paths(self.fullpath)
+ filelist = [(relpath, os.path.join(self.fullpath, relpath)) for relpath in filelist]
+ else:
+ filelist = [(self.path, self.fullpath)]
+
+ # Return a list of (relative filename, sha256 digest) tuples, a sorted list
+ # has already been returned by list_relative_paths()
+ self.__unique_key = [(relpath, unique_key(fullpath)) for relpath, fullpath in filelist]
+ return self.__unique_key
+
+ def get_consistency(self):
+ return Consistency.CACHED
+
+ # We dont have a ref, we're a local file...
+ def load_ref(self, node):
+ pass
+
+ def get_ref(self):
+ return None # pragma: nocover
+
+ def set_ref(self, ref, node):
+ pass # pragma: nocover
+
+ def fetch(self):
+ # Nothing to do here for a local source
+ pass # pragma: nocover
+
+ def stage(self, directory):
+
+ # Dont use hardlinks to stage sources, they are not write protected
+ # in the sandbox.
+ with self.timed_activity("Staging local files at {}".format(self.path)):
+
+ if os.path.isdir(self.fullpath):
+ files = list(utils.list_relative_paths(self.fullpath))
+ utils.copy_files(self.fullpath, directory)
+ else:
+ destfile = os.path.join(directory, os.path.basename(self.path))
+ files = [os.path.basename(self.path)]
+ utils.safe_copy(self.fullpath, destfile)
+
+ for f in files:
+ # Non empty directories are not listed by list_relative_paths
+ dirs = f.split(os.sep)
+ for i in range(1, len(dirs)):
+ d = os.path.join(directory, *(dirs[:i]))
+ assert os.path.isdir(d) and not os.path.islink(d)
+ os.chmod(d, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
+
+ path = os.path.join(directory, f)
+ if os.path.islink(path):
+ pass
+ elif os.path.isdir(path):
+ os.chmod(path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
+ else:
+ st = os.stat(path)
+ if st.st_mode & stat.S_IXUSR:
+ os.chmod(path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
+ else:
+ os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
+
+ def _get_local_path(self):
+ return self.fullpath
+
+
+# Create a unique key for a file
+def unique_key(filename):
+
+ # Return some hard coded things for files which
+ # have no content to calculate a key for
+ if os.path.islink(filename):
+ # For a symbolic link, use the link target as its unique identifier
+ return os.readlink(filename)
+ elif os.path.isdir(filename):
+ return "0"
+
+ return utils.sha256sum(filename)
+
+
+# Plugin entry point
+def setup():
+ return LocalSource
diff --git a/src/buildstream/plugins/sources/patch.py b/src/buildstream/plugins/sources/patch.py
new file mode 100644
index 000000000..e42868264
--- /dev/null
+++ b/src/buildstream/plugins/sources/patch.py
@@ -0,0 +1,101 @@
+#
+# Copyright Bloomberg Finance LP
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Chandan Singh <csingh43@bloomberg.net>
+# Tiago Gomes <tiago.gomes@codethink.co.uk>
+
+"""
+patch - apply locally stored patches
+====================================
+
+**Host dependencies:**
+
+ * patch
+
+**Usage:**
+
+.. code:: yaml
+
+ # Specify the local source kind
+ kind: patch
+
+ # Specify the project relative path to a patch file
+ path: files/somefile.diff
+
+ # Optionally specify the strip level, defaults to 1
+ strip-level: 1
+
+See :ref:`built-in functionality doumentation <core_source_builtins>` for
+details on common configuration options for sources.
+"""
+
+import os
+from buildstream import Source, SourceError, Consistency
+from buildstream import utils
+
+
+class PatchSource(Source):
+ # pylint: disable=attribute-defined-outside-init
+
+ BST_REQUIRES_PREVIOUS_SOURCES_STAGE = True
+
+ def configure(self, node):
+ self.path = self.node_get_project_path(node, 'path',
+ check_is_file=True)
+ self.strip_level = self.node_get_member(node, int, "strip-level", 1)
+ self.fullpath = os.path.join(self.get_project_directory(), self.path)
+
+ def preflight(self):
+ # Check if patch is installed, get the binary at the same time
+ self.host_patch = utils.get_host_tool("patch")
+
+ def get_unique_key(self):
+ return [self.path, utils.sha256sum(self.fullpath), self.strip_level]
+
+ def get_consistency(self):
+ return Consistency.CACHED
+
+ def load_ref(self, node):
+ pass
+
+ def get_ref(self):
+ return None # pragma: nocover
+
+ def set_ref(self, ref, node):
+ pass # pragma: nocover
+
+ def fetch(self):
+ # Nothing to do here for a local source
+ pass # pragma: nocover
+
+ def stage(self, directory):
+ with self.timed_activity("Applying local patch: {}".format(self.path)):
+
+ # Bail out with a comprehensive message if the target directory is empty
+ if not os.listdir(directory):
+ raise SourceError("Nothing to patch in directory '{}'".format(directory),
+ reason="patch-no-files")
+
+ strip_level_option = "-p{}".format(self.strip_level)
+ self.call([self.host_patch, strip_level_option, "-i", self.fullpath, "-d", directory],
+ fail="Failed to apply patch {}".format(self.path))
+
+
+# Plugin entry point
+def setup():
+ return PatchSource
diff --git a/src/buildstream/plugins/sources/pip.py b/src/buildstream/plugins/sources/pip.py
new file mode 100644
index 000000000..9d6c40d74
--- /dev/null
+++ b/src/buildstream/plugins/sources/pip.py
@@ -0,0 +1,254 @@
+#
+# Copyright 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Chandan Singh <csingh43@bloomberg.net>
+
+"""
+pip - stage python packages using pip
+=====================================
+
+**Host depndencies:**
+
+ * ``pip`` python module
+
+This plugin will download source distributions for specified packages using
+``pip`` but will not install them. It is expected that the elements using this
+source will install the downloaded packages.
+
+Downloaded tarballs will be stored in a directory called ".bst_pip_downloads".
+
+**Usage:**
+
+.. code:: yaml
+
+ # Specify the pip source kind
+ kind: pip
+
+ # Optionally specify index url, defaults to PyPi
+ # This url is used to discover new versions of packages and download them
+ # Projects intending to mirror their sources to a permanent location should
+ # use an aliased url, and declare the alias in the project configuration
+ url: https://mypypi.example.com/simple
+
+ # Optionally specify the path to requirements files
+ # Note that either 'requirements-files' or 'packages' must be defined
+ requirements-files:
+ - requirements.txt
+
+ # Optionally specify a list of additional packages
+ # Note that either 'requirements-files' or 'packages' must be defined
+ packages:
+ - flake8
+
+ # Specify the ref. It is a list of strings of format
+ # "<package-name>==<version>", separated by "\\n".
+ # Usually this will be contents of a requirements.txt file where all
+ # package versions have been frozen.
+ ref: "flake8==3.5.0\\nmccabe==0.6.1\\npkg-resources==0.0.0\\npycodestyle==2.3.1\\npyflakes==1.6.0"
+
+See :ref:`built-in functionality doumentation <core_source_builtins>` for
+details on common configuration options for sources.
+
+.. note::
+
+ The ``pip`` plugin is available since :ref:`format version 16 <project_format_version>`
+"""
+
+import hashlib
+import os
+import re
+
+from buildstream import Consistency, Source, SourceError, utils
+
+_OUTPUT_DIRNAME = '.bst_pip_downloads'
+_PYPI_INDEX_URL = 'https://pypi.org/simple/'
+
+# Used only for finding pip command
+_PYTHON_VERSIONS = [
+ 'python', # when running in a venv, we might not have the exact version
+ 'python2.7',
+ 'python3.0',
+ 'python3.1',
+ 'python3.2',
+ 'python3.3',
+ 'python3.4',
+ 'python3.5',
+ 'python3.6',
+ 'python3.7',
+]
+
+# List of allowed extensions taken from
+# https://docs.python.org/3/distutils/sourcedist.html.
+# Names of source distribution archives must be of the form
+# '%{package-name}-%{version}.%{extension}'.
+_SDIST_RE = re.compile(
+ r'^([\w.-]+?)-((?:[\d.]+){2,})\.(?:tar|tar.bz2|tar.gz|tar.xz|tar.Z|zip)$',
+ re.IGNORECASE)
+
+
+class PipSource(Source):
+ # pylint: disable=attribute-defined-outside-init
+
+ # We need access to previous sources at track time to use requirements.txt
+ # but not at fetch time as self.ref should contain sufficient information
+ # for this plugin
+ BST_REQUIRES_PREVIOUS_SOURCES_TRACK = True
+
+ def configure(self, node):
+ self.node_validate(node, ['url', 'packages', 'ref', 'requirements-files'] +
+ Source.COMMON_CONFIG_KEYS)
+ self.ref = self.node_get_member(node, str, 'ref', None)
+ self.original_url = self.node_get_member(node, str, 'url', _PYPI_INDEX_URL)
+ self.index_url = self.translate_url(self.original_url)
+ self.packages = self.node_get_member(node, list, 'packages', [])
+ self.requirements_files = self.node_get_member(node, list, 'requirements-files', [])
+
+ if not (self.packages or self.requirements_files):
+ raise SourceError("{}: Either 'packages' or 'requirements-files' must be specified". format(self))
+
+ def preflight(self):
+ # Try to find a pip version that supports download command
+ self.host_pip = None
+ for python in reversed(_PYTHON_VERSIONS):
+ try:
+ host_python = utils.get_host_tool(python)
+ rc = self.call([host_python, '-m', 'pip', 'download', '--help'])
+ if rc == 0:
+ self.host_pip = [host_python, '-m', 'pip']
+ break
+ except utils.ProgramNotFoundError:
+ pass
+
+ if self.host_pip is None:
+ raise SourceError("{}: Unable to find a suitable pip command".format(self))
+
+ def get_unique_key(self):
+ return [self.original_url, self.ref]
+
+ def get_consistency(self):
+ if not self.ref:
+ return Consistency.INCONSISTENT
+ if os.path.exists(self._mirror) and os.listdir(self._mirror):
+ return Consistency.CACHED
+ return Consistency.RESOLVED
+
+ def get_ref(self):
+ return self.ref
+
+ def load_ref(self, node):
+ self.ref = self.node_get_member(node, str, 'ref', None)
+
+ def set_ref(self, ref, node):
+ node['ref'] = self.ref = ref
+
+ def track(self, previous_sources_dir):
+ # XXX pip does not offer any public API other than the CLI tool so it
+ # is not feasible to correctly parse the requirements file or to check
+ # which package versions pip is going to install.
+ # See https://pip.pypa.io/en/stable/user_guide/#using-pip-from-your-program
+ # for details.
+ # As a result, we have to wastefully install the packages during track.
+ with self.tempdir() as tmpdir:
+ install_args = self.host_pip + ['download',
+ '--no-binary', ':all:',
+ '--index-url', self.index_url,
+ '--dest', tmpdir]
+ for requirement_file in self.requirements_files:
+ fpath = os.path.join(previous_sources_dir, requirement_file)
+ install_args += ['-r', fpath]
+ install_args += self.packages
+
+ self.call(install_args, fail="Failed to install python packages")
+ reqs = self._parse_sdist_names(tmpdir)
+
+ return '\n'.join(["{}=={}".format(pkg, ver) for pkg, ver in reqs])
+
+ def fetch(self):
+ with self.tempdir() as tmpdir:
+ packages = self.ref.strip().split('\n')
+ package_dir = os.path.join(tmpdir, 'packages')
+ os.makedirs(package_dir)
+ self.call([*self.host_pip,
+ 'download',
+ '--no-binary', ':all:',
+ '--index-url', self.index_url,
+ '--dest', package_dir,
+ *packages],
+ fail="Failed to install python packages: {}".format(packages))
+
+ # If the mirror directory already exists, assume that some other
+ # process has fetched the sources before us and ensure that we do
+ # not raise an error in that case.
+ try:
+ utils.move_atomic(package_dir, self._mirror)
+ except utils.DirectoryExistsError:
+ # Another process has beaten us and has fetched the sources
+ # before us.
+ pass
+ except OSError as e:
+ raise SourceError("{}: Failed to move downloaded pip packages from '{}' to '{}': {}"
+ .format(self, package_dir, self._mirror, e)) from e
+
+ def stage(self, directory):
+ with self.timed_activity("Staging Python packages", silent_nested=True):
+ utils.copy_files(self._mirror, os.path.join(directory, _OUTPUT_DIRNAME))
+
+ # Directory where this source should stage its files
+ #
+ @property
+ def _mirror(self):
+ if not self.ref:
+ return None
+ return os.path.join(self.get_mirror_directory(),
+ utils.url_directory_name(self.original_url),
+ hashlib.sha256(self.ref.encode()).hexdigest())
+
+ # Parse names of downloaded source distributions
+ #
+ # Args:
+ # basedir (str): Directory containing source distribution archives
+ #
+ # Returns:
+ # (list): List of (package_name, version) tuples in sorted order
+ #
+ def _parse_sdist_names(self, basedir):
+ reqs = []
+ for f in os.listdir(basedir):
+ pkg = _match_package_name(f)
+ if pkg is not None:
+ reqs.append(pkg)
+
+ return sorted(reqs)
+
+
+# Extract the package name and version of a source distribution
+#
+# Args:
+# filename (str): Filename of the source distribution
+#
+# Returns:
+# (tuple): A tuple of (package_name, version)
+#
+def _match_package_name(filename):
+ pkg_match = _SDIST_RE.match(filename)
+ if pkg_match is None:
+ return None
+ return pkg_match.groups()
+
+
+def setup():
+ return PipSource
diff --git a/src/buildstream/plugins/sources/remote.py b/src/buildstream/plugins/sources/remote.py
new file mode 100644
index 000000000..562a8f226
--- /dev/null
+++ b/src/buildstream/plugins/sources/remote.py
@@ -0,0 +1,93 @@
+#
+# Copyright Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Ed Baunton <ebaunton1@bloomberg.net>
+
+"""
+remote - stage files from remote urls
+=====================================
+
+**Usage:**
+
+.. code:: yaml
+
+ # Specify the remote source kind
+ kind: remote
+
+ # Optionally specify a relative staging filename.
+ # If not specified, the basename of the url will be used.
+ # filename: customfilename
+
+ # Optionally specify whether the downloaded file should be
+ # marked executable.
+ # executable: true
+
+ # Specify the url. Using an alias defined in your project
+ # configuration is encouraged. 'bst source track' will update the
+ # sha256sum in 'ref' to the downloaded file's sha256sum.
+ url: upstream:foo
+
+ # Specify the ref. It's a sha256sum of the file you download.
+ ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b
+
+See :ref:`built-in functionality doumentation <core_source_builtins>` for
+details on common configuration options for sources.
+
+.. note::
+
+ The ``remote`` plugin is available since :ref:`format version 10 <project_format_version>`
+"""
+import os
+from buildstream import SourceError, utils
+from ._downloadablefilesource import DownloadableFileSource
+
+
+class RemoteSource(DownloadableFileSource):
+ # pylint: disable=attribute-defined-outside-init
+
+ def configure(self, node):
+ super().configure(node)
+
+ self.filename = self.node_get_member(node, str, 'filename', os.path.basename(self.url))
+ self.executable = self.node_get_member(node, bool, 'executable', False)
+
+ if os.sep in self.filename:
+ raise SourceError('{}: filename parameter cannot contain directories'.format(self),
+ reason="filename-contains-directory")
+ self.node_validate(node, DownloadableFileSource.COMMON_CONFIG_KEYS + ['filename', 'executable'])
+
+ def get_unique_key(self):
+ return super().get_unique_key() + [self.filename, self.executable]
+
+ def stage(self, directory):
+ # Same as in local plugin, don't use hardlinks to stage sources, they
+ # are not write protected in the sandbox.
+ dest = os.path.join(directory, self.filename)
+ with self.timed_activity("Staging remote file to {}".format(dest)):
+
+ utils.safe_copy(self._get_mirror_file(), dest)
+
+ # To prevent user's umask introducing variability here, explicitly set
+ # file modes.
+ if self.executable:
+ os.chmod(dest, 0o755)
+ else:
+ os.chmod(dest, 0o644)
+
+
+def setup():
+ return RemoteSource
diff --git a/src/buildstream/plugins/sources/tar.py b/src/buildstream/plugins/sources/tar.py
new file mode 100644
index 000000000..31dc17497
--- /dev/null
+++ b/src/buildstream/plugins/sources/tar.py
@@ -0,0 +1,202 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jonathan Maw <jonathan.maw@codethink.co.uk>
+
+"""
+tar - stage files from tar archives
+===================================
+
+**Host dependencies:**
+
+ * lzip (for .tar.lz files)
+
+**Usage:**
+
+.. code:: yaml
+
+ # Specify the tar source kind
+ kind: tar
+
+ # Specify the tar url. Using an alias defined in your project
+ # configuration is encouraged. 'bst source track' will update the
+ # sha256sum in 'ref' to the downloaded file's sha256sum.
+ url: upstream:foo.tar
+
+ # Specify the ref. It's a sha256sum of the file you download.
+ ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b
+
+ # Specify a glob pattern to indicate the base directory to extract
+ # from the tarball. The first matching directory will be used.
+ #
+ # Note that this is '*' by default since most standard release
+ # tarballs contain a self named subdirectory at the root which
+ # contains the files one normally wants to extract to build.
+ #
+ # To extract the root of the tarball directly, this can be set
+ # to an empty string.
+ base-dir: '*'
+
+See :ref:`built-in functionality doumentation <core_source_builtins>` for
+details on common configuration options for sources.
+"""
+
+import os
+import tarfile
+from contextlib import contextmanager
+from tempfile import TemporaryFile
+
+from buildstream import SourceError
+from buildstream import utils
+
+from ._downloadablefilesource import DownloadableFileSource
+
+
+class TarSource(DownloadableFileSource):
+ # pylint: disable=attribute-defined-outside-init
+
+ def configure(self, node):
+ super().configure(node)
+
+ self.base_dir = self.node_get_member(node, str, 'base-dir', '*') or None
+
+ self.node_validate(node, DownloadableFileSource.COMMON_CONFIG_KEYS + ['base-dir'])
+
+ def preflight(self):
+ self.host_lzip = None
+ if self.url.endswith('.lz'):
+ self.host_lzip = utils.get_host_tool('lzip')
+
+ def get_unique_key(self):
+ return super().get_unique_key() + [self.base_dir]
+
+ @contextmanager
+ def _run_lzip(self):
+ assert self.host_lzip
+ with TemporaryFile() as lzip_stdout:
+ with open(self._get_mirror_file(), 'r') as lzip_file:
+ self.call([self.host_lzip, '-d'],
+ stdin=lzip_file,
+ stdout=lzip_stdout)
+
+ lzip_stdout.seek(0, 0)
+ yield lzip_stdout
+
+ @contextmanager
+ def _get_tar(self):
+ if self.url.endswith('.lz'):
+ with self._run_lzip() as lzip_dec:
+ with tarfile.open(fileobj=lzip_dec, mode='r:') as tar:
+ yield tar
+ else:
+ with tarfile.open(self._get_mirror_file()) as tar:
+ yield tar
+
+ def stage(self, directory):
+ try:
+ with self._get_tar() as tar:
+ base_dir = None
+ if self.base_dir:
+ base_dir = self._find_base_dir(tar, self.base_dir)
+
+ if base_dir:
+ tar.extractall(path=directory, members=self._extract_members(tar, base_dir))
+ else:
+ tar.extractall(path=directory)
+
+ except (tarfile.TarError, OSError) as e:
+ raise SourceError("{}: Error staging source: {}".format(self, e)) from e
+
+ # Override and translate which filenames to extract
+ def _extract_members(self, tar, base_dir):
+ if not base_dir.endswith(os.sep):
+ base_dir = base_dir + os.sep
+
+ L = len(base_dir)
+ for member in tar.getmembers():
+
+ # First, ensure that a member never starts with `./`
+ if member.path.startswith('./'):
+ member.path = member.path[2:]
+
+ # Now extract only the paths which match the normalized path
+ if member.path.startswith(base_dir):
+
+ # If it's got a link name, give it the same treatment, we
+ # need the link targets to match up with what we are staging
+ #
+ # NOTE: Its possible this is not perfect, we may need to
+ # consider links which point outside of the chosen
+ # base directory.
+ #
+ if member.type == tarfile.LNKTYPE:
+ member.linkname = member.linkname[L:]
+
+ member.path = member.path[L:]
+ yield member
+
+ # We want to iterate over all paths of a tarball, but getmembers()
+ # is not enough because some tarballs simply do not contain the leading
+ # directory paths for the archived files.
+ def _list_tar_paths(self, tar):
+
+ visited = set()
+ for member in tar.getmembers():
+
+ # Remove any possible leading './', offer more consistent behavior
+ # across tarballs encoded with or without a leading '.'
+ member_name = member.name.lstrip('./')
+
+ if not member.isdir():
+
+ # Loop over the components of a path, for a path of a/b/c/d
+ # we will first visit 'a', then 'a/b' and then 'a/b/c', excluding
+ # the final component
+ components = member_name.split('/')
+ for i in range(len(components) - 1):
+ dir_component = '/'.join([components[j] for j in range(i + 1)])
+ if dir_component not in visited:
+ visited.add(dir_component)
+ try:
+ # Dont yield directory members which actually do
+ # exist in the archive
+ _ = tar.getmember(dir_component)
+ except KeyError:
+ if dir_component != '.':
+ yield dir_component
+
+ continue
+
+ # Avoid considering the '.' directory, if any is included in the archive
+ # this is to avoid the default 'base-dir: *' value behaving differently
+ # depending on whether the tarball was encoded with a leading '.' or not
+ elif member_name == '.':
+ continue
+
+ yield member_name
+
+ def _find_base_dir(self, tar, pattern):
+ paths = self._list_tar_paths(tar)
+ matches = sorted(list(utils.glob(paths, pattern)))
+ if not matches:
+ raise SourceError("{}: Could not find base directory matching pattern: {}".format(self, pattern))
+
+ return matches[0]
+
+
+def setup():
+ return TarSource
diff --git a/src/buildstream/plugins/sources/zip.py b/src/buildstream/plugins/sources/zip.py
new file mode 100644
index 000000000..03efcef79
--- /dev/null
+++ b/src/buildstream/plugins/sources/zip.py
@@ -0,0 +1,181 @@
+#
+# Copyright (C) 2017 Mathieu Bridon
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Mathieu Bridon <bochecha@daitauha.fr>
+
+"""
+zip - stage files from zip archives
+===================================
+
+**Usage:**
+
+.. code:: yaml
+
+ # Specify the zip source kind
+ kind: zip
+
+ # Specify the zip url. Using an alias defined in your project
+ # configuration is encouraged. 'bst source track' will update the
+ # sha256sum in 'ref' to the downloaded file's sha256sum.
+ url: upstream:foo.zip
+
+ # Specify the ref. It's a sha256sum of the file you download.
+ ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b
+
+ # Specify a glob pattern to indicate the base directory to extract
+ # from the archive. The first matching directory will be used.
+ #
+ # Note that this is '*' by default since most standard release
+ # archives contain a self named subdirectory at the root which
+ # contains the files one normally wants to extract to build.
+ #
+ # To extract the root of the archive directly, this can be set
+ # to an empty string.
+ base-dir: '*'
+
+See :ref:`built-in functionality doumentation <core_source_builtins>` for
+details on common configuration options for sources.
+
+.. attention::
+
+ File permissions are not preserved. All extracted directories have
+ permissions 0755 and all extracted files have permissions 0644.
+"""
+
+import os
+import zipfile
+import stat
+
+from buildstream import SourceError
+from buildstream import utils
+
+from ._downloadablefilesource import DownloadableFileSource
+
+
+class ZipSource(DownloadableFileSource):
+ # pylint: disable=attribute-defined-outside-init
+
+ def configure(self, node):
+ super().configure(node)
+
+ self.base_dir = self.node_get_member(node, str, 'base-dir', '*') or None
+
+ self.node_validate(node, DownloadableFileSource.COMMON_CONFIG_KEYS + ['base-dir'])
+
+ def get_unique_key(self):
+ return super().get_unique_key() + [self.base_dir]
+
+ def stage(self, directory):
+ exec_rights = (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) & ~(stat.S_IWGRP | stat.S_IWOTH)
+ noexec_rights = exec_rights & ~(stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
+
+ try:
+ with zipfile.ZipFile(self._get_mirror_file()) as archive:
+ base_dir = None
+ if self.base_dir:
+ base_dir = self._find_base_dir(archive, self.base_dir)
+
+ if base_dir:
+ members = self._extract_members(archive, base_dir)
+ else:
+ members = archive.namelist()
+
+ for member in members:
+ written = archive.extract(member, path=directory)
+
+ # zipfile.extract might create missing directories
+ rel = os.path.relpath(written, start=directory)
+ assert not os.path.isabs(rel)
+ rel = os.path.dirname(rel)
+ while rel:
+ os.chmod(os.path.join(directory, rel), exec_rights)
+ rel = os.path.dirname(rel)
+
+ if os.path.islink(written):
+ pass
+ elif os.path.isdir(written):
+ os.chmod(written, exec_rights)
+ else:
+ os.chmod(written, noexec_rights)
+
+ except (zipfile.BadZipFile, zipfile.LargeZipFile, OSError) as e:
+ raise SourceError("{}: Error staging source: {}".format(self, e)) from e
+
+ # Override and translate which filenames to extract
+ def _extract_members(self, archive, base_dir):
+ if not base_dir.endswith(os.sep):
+ base_dir = base_dir + os.sep
+
+ L = len(base_dir)
+ for member in archive.infolist():
+ if member.filename == base_dir:
+ continue
+
+ if member.filename.startswith(base_dir):
+ member.filename = member.filename[L:]
+ yield member
+
+ # We want to iterate over all paths of an archive, but namelist()
+ # is not enough because some archives simply do not contain the leading
+ # directory paths for the archived files.
+ def _list_archive_paths(self, archive):
+
+ visited = {}
+ for member in archive.infolist():
+
+ # ZipInfo.is_dir() is only available in python >= 3.6, but all
+ # it does is check for a trailing '/' in the name
+ #
+ if not member.filename.endswith('/'):
+
+ # Loop over the components of a path, for a path of a/b/c/d
+ # we will first visit 'a', then 'a/b' and then 'a/b/c', excluding
+ # the final component
+ components = member.filename.split('/')
+ for i in range(len(components) - 1):
+ dir_component = '/'.join([components[j] for j in range(i + 1)])
+ if dir_component not in visited:
+ visited[dir_component] = True
+ try:
+ # Dont yield directory members which actually do
+ # exist in the archive
+ _ = archive.getinfo(dir_component)
+ except KeyError:
+ if dir_component != '.':
+ yield dir_component
+
+ continue
+
+ # Avoid considering the '.' directory, if any is included in the archive
+ # this is to avoid the default 'base-dir: *' value behaving differently
+ # depending on whether the archive was encoded with a leading '.' or not
+ elif member.filename == '.' or member.filename == './':
+ continue
+
+ yield member.filename
+
+ def _find_base_dir(self, archive, pattern):
+ paths = self._list_archive_paths(archive)
+ matches = sorted(list(utils.glob(paths, pattern)))
+ if not matches:
+ raise SourceError("{}: Could not find base directory matching pattern: {}".format(self, pattern))
+
+ return matches[0]
+
+
+def setup():
+ return ZipSource
diff --git a/src/buildstream/sandbox/__init__.py b/src/buildstream/sandbox/__init__.py
new file mode 100644
index 000000000..5966d194f
--- /dev/null
+++ b/src/buildstream/sandbox/__init__.py
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+from .sandbox import Sandbox, SandboxFlags, SandboxCommandError
+from ._sandboxremote import SandboxRemote
+from ._sandboxdummy import SandboxDummy
diff --git a/src/buildstream/sandbox/_config.py b/src/buildstream/sandbox/_config.py
new file mode 100644
index 000000000..457f92b3c
--- /dev/null
+++ b/src/buildstream/sandbox/_config.py
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jim MacArthur <jim.macarthur@codethink.co.uk>
+
+
+# SandboxConfig
+#
+# A container for sandbox configuration data. We want the internals
+# of this to be opaque, hence putting it in its own private file.
+class SandboxConfig():
+ def __init__(self, build_uid, build_gid, build_os=None, build_arch=None):
+ self.build_uid = build_uid
+ self.build_gid = build_gid
+ self.build_os = build_os
+ self.build_arch = build_arch
+
+ # get_unique_key():
+ #
+ # This returns the SandboxConfig's contribution
+ # to an element's cache key.
+ #
+ # Returns:
+ # (dict): A dictionary to add to an element's cache key
+ #
+ def get_unique_key(self):
+
+ # Currently operating system and machine architecture
+ # are not configurable and we have no sandbox implementation
+ # which can conform to such configurations.
+ #
+ # However this should be the right place to support
+ # such configurations in the future.
+ #
+ unique_key = {
+ 'os': self.build_os,
+ 'arch': self.build_arch
+ }
+
+ # Avoid breaking cache key calculation with
+ # the addition of configurabuild build uid/gid
+ if self.build_uid != 0:
+ unique_key['build-uid'] = self.build_uid
+
+ if self.build_gid != 0:
+ unique_key['build-gid'] = self.build_gid
+
+ return unique_key
diff --git a/src/buildstream/sandbox/_mount.py b/src/buildstream/sandbox/_mount.py
new file mode 100644
index 000000000..c0f26c8d7
--- /dev/null
+++ b/src/buildstream/sandbox/_mount.py
@@ -0,0 +1,149 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+import os
+from collections import OrderedDict
+from contextlib import contextmanager, ExitStack
+
+from .. import utils
+from .._fuse import SafeHardlinks
+
+
+# Mount()
+#
+# Helper data object representing a single mount point in the mount map
+#
+class Mount():
+ def __init__(self, sandbox, mount_point, safe_hardlinks, fuse_mount_options=None):
+ # Getting _get_underlying_directory() here is acceptable as
+ # we're part of the sandbox code. This will fail if our
+ # directory is CAS-based.
+ root_directory = sandbox.get_virtual_directory()._get_underlying_directory()
+
+ self.mount_point = mount_point
+ self.safe_hardlinks = safe_hardlinks
+ self._fuse_mount_options = {} if fuse_mount_options is None else fuse_mount_options
+
+ # FIXME: When the criteria for mounting something and its parent
+ # mount is identical, then there is no need to mount an additional
+ # fuse layer (i.e. if the root is read-write and there is a directory
+ # marked for staged artifacts directly within the rootfs, they can
+ # safely share the same fuse layer).
+ #
+ # In these cases it would be saner to redirect the sub-mount to
+ # a regular mount point within the parent's redirected mount.
+ #
+ if self.safe_hardlinks:
+ scratch_directory = sandbox._get_scratch_directory()
+ # Redirected mount
+ self.mount_origin = os.path.join(root_directory, mount_point.lstrip(os.sep))
+ self.mount_base = os.path.join(scratch_directory, utils.url_directory_name(mount_point))
+ self.mount_source = os.path.join(self.mount_base, 'mount')
+ self.mount_tempdir = os.path.join(self.mount_base, 'temp')
+ os.makedirs(self.mount_origin, exist_ok=True)
+ os.makedirs(self.mount_tempdir, exist_ok=True)
+ else:
+ # No redirection needed
+ self.mount_source = os.path.join(root_directory, mount_point.lstrip(os.sep))
+
+ external_mount_sources = sandbox._get_mount_sources()
+ external_mount_source = external_mount_sources.get(mount_point)
+
+ if external_mount_source is None:
+ os.makedirs(self.mount_source, exist_ok=True)
+ else:
+ if os.path.isdir(external_mount_source):
+ os.makedirs(self.mount_source, exist_ok=True)
+ else:
+ # When mounting a regular file, ensure the parent
+ # directory exists in the sandbox; and that an empty
+ # file is created at the mount location.
+ parent_dir = os.path.dirname(self.mount_source.rstrip('/'))
+ os.makedirs(parent_dir, exist_ok=True)
+ if not os.path.exists(self.mount_source):
+ with open(self.mount_source, 'w'):
+ pass
+
+ @contextmanager
+ def mounted(self, sandbox):
+ if self.safe_hardlinks:
+ mount = SafeHardlinks(self.mount_origin, self.mount_tempdir, self._fuse_mount_options)
+ with mount.mounted(self.mount_source):
+ yield
+ else:
+ # Nothing to mount here
+ yield
+
+
+# MountMap()
+#
+# Helper object for mapping of the sandbox mountpoints
+#
+# Args:
+# sandbox (Sandbox): The sandbox object
+# root_readonly (bool): Whether the sandbox root is readonly
+#
+class MountMap():
+
+ def __init__(self, sandbox, root_readonly, fuse_mount_options=None):
+ # We will be doing the mounts in the order in which they were declared.
+ self.mounts = OrderedDict()
+
+ if fuse_mount_options is None:
+ fuse_mount_options = {}
+
+ # We want safe hardlinks on rootfs whenever root is not readonly
+ self.mounts['/'] = Mount(sandbox, '/', not root_readonly, fuse_mount_options)
+
+ for mark in sandbox._get_marked_directories():
+ directory = mark['directory']
+ artifact = mark['artifact']
+
+ # We want safe hardlinks for any non-root directory where
+ # artifacts will be staged to
+ self.mounts[directory] = Mount(sandbox, directory, artifact, fuse_mount_options)
+
+ # get_mount_source()
+ #
+ # Gets the host directory where the mountpoint in the
+ # sandbox should be bind mounted from
+ #
+ # Args:
+ # mountpoint (str): The absolute mountpoint path inside the sandbox
+ #
+ # Returns:
+ # The host path to be mounted at the mount point
+ #
+ def get_mount_source(self, mountpoint):
+ return self.mounts[mountpoint].mount_source
+
+ # mounted()
+ #
+ # A context manager which ensures all the mount sources
+ # were mounted with any fuse layers which may have been needed.
+ #
+ # Args:
+ # sandbox (Sandbox): The sandbox
+ #
+ @contextmanager
+ def mounted(self, sandbox):
+ with ExitStack() as stack:
+ for _, mount in self.mounts.items():
+ stack.enter_context(mount.mounted(sandbox))
+ yield
diff --git a/src/buildstream/sandbox/_mounter.py b/src/buildstream/sandbox/_mounter.py
new file mode 100644
index 000000000..e6054c20d
--- /dev/null
+++ b/src/buildstream/sandbox/_mounter.py
@@ -0,0 +1,147 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+import sys
+from contextlib import contextmanager
+
+from .._exceptions import SandboxError
+from .. import utils, _signals
+
+
+# A class to wrap the `mount` and `umount` system commands
+class Mounter():
+ @classmethod
+ def _mount(cls, dest, src=None, mount_type=None,
+ stdout=sys.stdout, stderr=sys.stderr, options=None,
+ flags=None):
+
+ argv = [utils.get_host_tool('mount')]
+ if mount_type:
+ argv.extend(['-t', mount_type])
+ if options:
+ argv.extend(['-o', options])
+ if flags:
+ argv.extend(flags)
+
+ if src is not None:
+ argv += [src]
+ argv += [dest]
+
+ status, _ = utils._call(
+ argv,
+ terminate=True,
+ stdout=stdout,
+ stderr=stderr
+ )
+
+ if status != 0:
+ raise SandboxError('`{}` failed with exit code {}'
+ .format(' '.join(argv), status))
+
+ return dest
+
+ @classmethod
+ def _umount(cls, path, stdout=sys.stdout, stderr=sys.stderr):
+
+ cmd = [utils.get_host_tool('umount'), '-R', path]
+ status, _ = utils._call(
+ cmd,
+ terminate=True,
+ stdout=stdout,
+ stderr=stderr
+ )
+
+ if status != 0:
+ raise SandboxError('`{}` failed with exit code {}'
+ .format(' '.join(cmd), status))
+
+ # mount()
+ #
+ # A wrapper for the `mount` command. The device is unmounted when
+ # the context is left.
+ #
+ # Args:
+ # dest (str) - The directory to mount to
+ # src (str) - The directory to mount
+ # stdout (file) - stdout
+ # stderr (file) - stderr
+ # mount_type (str|None) - The mount type (can be omitted or None)
+ # kwargs - Arguments to pass to the mount command, such as `ro=True`
+ #
+ # Yields:
+ # (str) The path to the destination
+ #
+ @classmethod
+ @contextmanager
+ def mount(cls, dest, src=None, stdout=sys.stdout,
+ stderr=sys.stderr, mount_type=None, **kwargs):
+
+ def kill_proc():
+ cls._umount(dest, stdout, stderr)
+
+ options = ','.join([key for key, val in kwargs.items() if val])
+
+ path = cls._mount(dest, src, mount_type, stdout=stdout, stderr=stderr, options=options)
+ try:
+ with _signals.terminator(kill_proc):
+ yield path
+ finally:
+ cls._umount(dest, stdout, stderr)
+
+ # bind_mount()
+ #
+ # Mount a directory to a different location (a hardlink for all
+ # intents and purposes). The directory is unmounted when the
+ # context is left.
+ #
+ # Args:
+ # dest (str) - The directory to mount to
+ # src (str) - The directory to mount
+ # stdout (file) - stdout
+ # stderr (file) - stderr
+ # kwargs - Arguments to pass to the mount command, such as `ro=True`
+ #
+ # Yields:
+ # (str) The path to the destination
+ #
+ # While this is equivalent to `mount --rbind`, this option may not
+ # exist and can be dangerous, requiring careful cleanupIt is
+ # recommended to use this function over a manual mount invocation.
+ #
+ @classmethod
+ @contextmanager
+ def bind_mount(cls, dest, src=None, stdout=sys.stdout,
+ stderr=sys.stderr, **kwargs):
+
+ def kill_proc():
+ cls._umount(dest, stdout, stderr)
+
+ kwargs['rbind'] = True
+ options = ','.join([key for key, val in kwargs.items() if val])
+
+ path = cls._mount(dest, src, None, stdout, stderr, options)
+
+ try:
+ with _signals.terminator(kill_proc):
+ # Make the rbind a slave to avoid unmounting vital devices in
+ # /proc
+ cls._mount(dest, flags=['--make-rslave'])
+ yield path
+ finally:
+ cls._umount(dest, stdout, stderr)
diff --git a/src/buildstream/sandbox/_sandboxbwrap.py b/src/buildstream/sandbox/_sandboxbwrap.py
new file mode 100644
index 000000000..d2abc33d0
--- /dev/null
+++ b/src/buildstream/sandbox/_sandboxbwrap.py
@@ -0,0 +1,433 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Andrew Leeming <andrew.leeming@codethink.co.uk>
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+import collections
+import json
+import os
+import sys
+import time
+import errno
+import signal
+import subprocess
+import shutil
+from contextlib import ExitStack, suppress
+from tempfile import TemporaryFile
+
+import psutil
+
+from .._exceptions import SandboxError
+from .. import utils, _signals
+from ._mount import MountMap
+from . import Sandbox, SandboxFlags
+
+
+# SandboxBwrap()
+#
+# Default bubblewrap based sandbox implementation.
+#
+class SandboxBwrap(Sandbox):
+
+ # Minimal set of devices for the sandbox
+ DEVICES = [
+ '/dev/full',
+ '/dev/null',
+ '/dev/urandom',
+ '/dev/random',
+ '/dev/zero'
+ ]
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.user_ns_available = kwargs['user_ns_available']
+ self.die_with_parent_available = kwargs['die_with_parent_available']
+ self.json_status_available = kwargs['json_status_available']
+ self.linux32 = kwargs['linux32']
+
+ def _run(self, command, flags, *, cwd, env):
+ stdout, stderr = self._get_output()
+
+ # Allowable access to underlying storage as we're part of the sandbox
+ root_directory = self.get_virtual_directory()._get_underlying_directory()
+
+ if not self._has_command(command[0], env):
+ raise SandboxError("Staged artifacts do not provide command "
+ "'{}'".format(command[0]),
+ reason='missing-command')
+
+ # Create the mount map, this will tell us where
+ # each mount point needs to be mounted from and to
+ mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY)
+ root_mount_source = mount_map.get_mount_source('/')
+
+ # start command with linux32 if needed
+ if self.linux32:
+ bwrap_command = [utils.get_host_tool('linux32')]
+ else:
+ bwrap_command = []
+
+ # Grab the full path of the bwrap binary
+ bwrap_command += [utils.get_host_tool('bwrap')]
+
+ for k, v in env.items():
+ bwrap_command += ['--setenv', k, v]
+ for k in os.environ.keys() - env.keys():
+ bwrap_command += ['--unsetenv', k]
+
+ # Create a new pid namespace, this also ensures that any subprocesses
+ # are cleaned up when the bwrap process exits.
+ bwrap_command += ['--unshare-pid']
+
+ # Ensure subprocesses are cleaned up when the bwrap parent dies.
+ if self.die_with_parent_available:
+ bwrap_command += ['--die-with-parent']
+
+ # Add in the root filesystem stuff first.
+ #
+ # The rootfs is mounted as RW initially so that further mounts can be
+ # placed on top. If a RO root is required, after all other mounts are
+ # complete, root is remounted as RO
+ bwrap_command += ["--bind", root_mount_source, "/"]
+
+ if not flags & SandboxFlags.NETWORK_ENABLED:
+ bwrap_command += ['--unshare-net']
+ bwrap_command += ['--unshare-uts', '--hostname', 'buildstream']
+ bwrap_command += ['--unshare-ipc']
+
+ # Give it a proc and tmpfs
+ bwrap_command += [
+ '--proc', '/proc',
+ '--tmpfs', '/tmp'
+ ]
+
+ # In interactive mode, we want a complete devpts inside
+ # the container, so there is a /dev/console and such. In
+ # the regular non-interactive sandbox, we want to hand pick
+ # a minimal set of devices to expose to the sandbox.
+ #
+ if flags & SandboxFlags.INTERACTIVE:
+ bwrap_command += ['--dev', '/dev']
+ else:
+ for device in self.DEVICES:
+ bwrap_command += ['--dev-bind', device, device]
+
+ # Add bind mounts to any marked directories
+ marked_directories = self._get_marked_directories()
+ mount_source_overrides = self._get_mount_sources()
+ for mark in marked_directories:
+ mount_point = mark['directory']
+ if mount_point in mount_source_overrides: # pylint: disable=consider-using-get
+ mount_source = mount_source_overrides[mount_point]
+ else:
+ mount_source = mount_map.get_mount_source(mount_point)
+
+ # Use --dev-bind for all mounts, this is simply a bind mount which does
+ # not restrictive about devices.
+ #
+ # While it's important for users to be able to mount devices
+ # into the sandbox for `bst shell` testing purposes, it is
+ # harmless to do in a build environment where the directories
+ # we mount just never contain device files.
+ #
+ bwrap_command += ['--dev-bind', mount_source, mount_point]
+
+ if flags & SandboxFlags.ROOT_READ_ONLY:
+ bwrap_command += ["--remount-ro", "/"]
+
+ if cwd is not None:
+ bwrap_command += ['--dir', cwd]
+ bwrap_command += ['--chdir', cwd]
+
+ # Set UID and GUI
+ if self.user_ns_available:
+ bwrap_command += ['--unshare-user']
+ if not flags & SandboxFlags.INHERIT_UID:
+ uid = self._get_config().build_uid
+ gid = self._get_config().build_gid
+ bwrap_command += ['--uid', str(uid), '--gid', str(gid)]
+
+ with ExitStack() as stack:
+ pass_fds = ()
+ # Improve error reporting with json-status if available
+ if self.json_status_available:
+ json_status_file = stack.enter_context(TemporaryFile())
+ pass_fds = (json_status_file.fileno(),)
+ bwrap_command += ['--json-status-fd', str(json_status_file.fileno())]
+
+ # Add the command
+ bwrap_command += command
+
+ # bwrap might create some directories while being suid
+ # and may give them to root gid, if it does, we'll want
+ # to clean them up after, so record what we already had
+ # there just in case so that we can safely cleanup the debris.
+ #
+ existing_basedirs = {
+ directory: os.path.exists(os.path.join(root_directory, directory))
+ for directory in ['tmp', 'dev', 'proc']
+ }
+
+ # Use the MountMap context manager to ensure that any redirected
+ # mounts through fuse layers are in context and ready for bwrap
+ # to mount them from.
+ #
+ stack.enter_context(mount_map.mounted(self))
+
+ # If we're interactive, we want to inherit our stdin,
+ # otherwise redirect to /dev/null, ensuring process
+ # disconnected from terminal.
+ if flags & SandboxFlags.INTERACTIVE:
+ stdin = sys.stdin
+ else:
+ stdin = stack.enter_context(open(os.devnull, "r"))
+
+ # Run bubblewrap !
+ exit_code = self.run_bwrap(bwrap_command, stdin, stdout, stderr,
+ (flags & SandboxFlags.INTERACTIVE), pass_fds)
+
+ # Cleanup things which bwrap might have left behind, while
+ # everything is still mounted because bwrap can be creating
+ # the devices on the fuse mount, so we should remove it there.
+ if not flags & SandboxFlags.INTERACTIVE:
+ for device in self.DEVICES:
+ device_path = os.path.join(root_mount_source, device.lstrip('/'))
+
+ # This will remove the device in a loop, allowing some
+ # retries in case the device file leaked by bubblewrap is still busy
+ self.try_remove_device(device_path)
+
+ # Remove /tmp, this is a bwrap owned thing we want to be sure
+ # never ends up in an artifact
+ for basedir in ['tmp', 'dev', 'proc']:
+
+ # Skip removal of directories which already existed before
+ # launching bwrap
+ if existing_basedirs[basedir]:
+ continue
+
+ base_directory = os.path.join(root_mount_source, basedir)
+
+ if flags & SandboxFlags.INTERACTIVE:
+ # Be more lenient in interactive mode here.
+ #
+ # In interactive mode; it's possible that the project shell
+ # configuration has mounted some things below the base
+ # directories, such as /dev/dri, and in this case it's less
+ # important to consider cleanup, as we wont be collecting
+ # this build result and creating an artifact.
+ #
+ # Note: Ideally; we should instead fix upstream bubblewrap to
+ # cleanup any debris it creates at startup time, and do
+ # the same ourselves for any directories we explicitly create.
+ #
+ shutil.rmtree(base_directory, ignore_errors=True)
+ else:
+ try:
+ os.rmdir(base_directory)
+ except FileNotFoundError:
+ # ignore this, if bwrap cleaned up properly then it's not a problem.
+ #
+ # If the directory was not empty on the other hand, then this is clearly
+ # a bug, bwrap mounted a tempfs here and when it exits, that better be empty.
+ pass
+
+ if self.json_status_available:
+ json_status_file.seek(0, 0)
+ child_exit_code = None
+ # The JSON status file's output is a JSON object per line
+ # with the keys present identifying the type of message.
+ # The only message relevant to us now is the exit-code of the subprocess.
+ for line in json_status_file:
+ with suppress(json.decoder.JSONDecodeError):
+ o = json.loads(line)
+ if isinstance(o, collections.abc.Mapping) and 'exit-code' in o:
+ child_exit_code = o['exit-code']
+ break
+ if child_exit_code is None:
+ raise SandboxError("`bwrap' terminated during sandbox setup with exitcode {}".format(exit_code),
+ reason="bwrap-sandbox-fail")
+ exit_code = child_exit_code
+
+ self._vdir._mark_changed()
+ return exit_code
+
+ def run_bwrap(self, argv, stdin, stdout, stderr, interactive, pass_fds):
+ # Wrapper around subprocess.Popen() with common settings.
+ #
+ # This function blocks until the subprocess has terminated.
+ #
+ # It then returns a tuple of (exit code, stdout output, stderr output).
+ # If stdout was not equal to subprocess.PIPE, stdout will be None. Same for
+ # stderr.
+
+ # Fetch the process actually launched inside the bwrap sandbox, or the
+ # intermediat control bwrap processes.
+ #
+ # NOTE:
+ # The main bwrap process itself is setuid root and as such we cannot
+ # send it any signals. Since we launch bwrap with --unshare-pid, it's
+ # direct child is another bwrap process which retains ownership of the
+ # pid namespace. This is the right process to kill when terminating.
+ #
+ # The grandchild is the binary which we asked bwrap to launch on our
+ # behalf, whatever this binary is, it is the right process to use
+ # for suspending and resuming. In the case that this is a shell, the
+ # shell will be group leader and all build scripts will stop/resume
+ # with that shell.
+ #
+ def get_user_proc(bwrap_pid, grand_child=False):
+ bwrap_proc = psutil.Process(bwrap_pid)
+ bwrap_children = bwrap_proc.children()
+ if bwrap_children:
+ if grand_child:
+ bwrap_grand_children = bwrap_children[0].children()
+ if bwrap_grand_children:
+ return bwrap_grand_children[0]
+ else:
+ return bwrap_children[0]
+ return None
+
+ def terminate_bwrap():
+ if process:
+ user_proc = get_user_proc(process.pid)
+ if user_proc:
+ user_proc.kill()
+
+ def suspend_bwrap():
+ if process:
+ user_proc = get_user_proc(process.pid, grand_child=True)
+ if user_proc:
+ group_id = os.getpgid(user_proc.pid)
+ os.killpg(group_id, signal.SIGSTOP)
+
+ def resume_bwrap():
+ if process:
+ user_proc = get_user_proc(process.pid, grand_child=True)
+ if user_proc:
+ group_id = os.getpgid(user_proc.pid)
+ os.killpg(group_id, signal.SIGCONT)
+
+ with ExitStack() as stack:
+
+ # We want to launch bwrap in a new session in non-interactive
+ # mode so that we handle the SIGTERM and SIGTSTP signals separately
+ # from the nested bwrap process, but in interactive mode this
+ # causes launched shells to lack job control (we dont really
+ # know why that is).
+ #
+ if interactive:
+ new_session = False
+ else:
+ new_session = True
+ stack.enter_context(_signals.suspendable(suspend_bwrap, resume_bwrap))
+ stack.enter_context(_signals.terminator(terminate_bwrap))
+
+ process = subprocess.Popen(
+ argv,
+ # The default is to share file descriptors from the parent process
+ # to the subprocess, which is rarely good for sandboxing.
+ close_fds=True,
+ pass_fds=pass_fds,
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ start_new_session=new_session
+ )
+
+ # Wait for the child process to finish, ensuring that
+ # a SIGINT has exactly the effect the user probably
+ # expects (i.e. let the child process handle it).
+ try:
+ while True:
+ try:
+ _, status = os.waitpid(process.pid, 0)
+ # If the process exits due to a signal, we
+ # brutally murder it to avoid zombies
+ if not os.WIFEXITED(status):
+ user_proc = get_user_proc(process.pid)
+ if user_proc:
+ utils._kill_process_tree(user_proc.pid)
+
+ # If we receive a KeyboardInterrupt we continue
+ # waiting for the process since we are in the same
+ # process group and it should also have received
+ # the SIGINT.
+ except KeyboardInterrupt:
+ continue
+
+ break
+ # If we can't find the process, it has already died of its
+ # own accord, and therefore we don't need to check or kill
+ # anything.
+ except psutil.NoSuchProcess:
+ pass
+
+ # Return the exit code - see the documentation for
+ # os.WEXITSTATUS to see why this is required.
+ if os.WIFEXITED(status):
+ exit_code = os.WEXITSTATUS(status)
+ else:
+ exit_code = -1
+
+ if interactive and stdin.isatty():
+ # Make this process the foreground process again, otherwise the
+ # next read() on stdin will trigger SIGTTIN and stop the process.
+ # This is required because the sandboxed process does not have
+ # permission to do this on its own (running in separate PID namespace).
+ #
+ # tcsetpgrp() will trigger SIGTTOU when called from a background
+ # process, so ignore it temporarily.
+ handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
+ os.tcsetpgrp(0, os.getpid())
+ signal.signal(signal.SIGTTOU, handler)
+
+ return exit_code
+
+ def try_remove_device(self, device_path):
+
+ # Put some upper limit on the tries here
+ max_tries = 1000
+ tries = 0
+
+ while True:
+ try:
+ os.unlink(device_path)
+ except OSError as e:
+ if e.errno == errno.EBUSY:
+ # This happens on some machines, seems there is a race sometimes
+ # after bubblewrap returns and the device files it bind-mounted did
+ # not finish unmounting.
+ #
+ if tries < max_tries:
+ tries += 1
+ time.sleep(1 / 100)
+ continue
+ else:
+ # We've reached the upper limit of tries, bail out now
+ # because something must have went wrong
+ #
+ raise
+ elif e.errno == errno.ENOENT:
+ # Bubblewrap cleaned it up for us, no problem if we cant remove it
+ break
+ else:
+ # Something unexpected, reraise this error
+ raise
+ else:
+ # Successfully removed the symlink
+ break
diff --git a/src/buildstream/sandbox/_sandboxchroot.py b/src/buildstream/sandbox/_sandboxchroot.py
new file mode 100644
index 000000000..7266a00e3
--- /dev/null
+++ b/src/buildstream/sandbox/_sandboxchroot.py
@@ -0,0 +1,325 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+import os
+import sys
+import stat
+import signal
+import subprocess
+from contextlib import contextmanager, ExitStack
+import psutil
+
+from .._exceptions import SandboxError
+from .. import utils
+from .. import _signals
+from ._mounter import Mounter
+from ._mount import MountMap
+from . import Sandbox, SandboxFlags
+
+
+class SandboxChroot(Sandbox):
+
+ _FUSE_MOUNT_OPTIONS = {'dev': True}
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ uid = self._get_config().build_uid
+ gid = self._get_config().build_gid
+ if uid != 0 or gid != 0:
+ raise SandboxError("Chroot sandboxes cannot specify a non-root uid/gid "
+ "({},{} were supplied via config)".format(uid, gid))
+
+ self.mount_map = None
+
+ def _run(self, command, flags, *, cwd, env):
+
+ if not self._has_command(command[0], env):
+ raise SandboxError("Staged artifacts do not provide command "
+ "'{}'".format(command[0]),
+ reason='missing-command')
+
+ stdout, stderr = self._get_output()
+
+ # Create the mount map, this will tell us where
+ # each mount point needs to be mounted from and to
+ self.mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY,
+ self._FUSE_MOUNT_OPTIONS)
+
+ # Create a sysroot and run the command inside it
+ with ExitStack() as stack:
+ os.makedirs('/var/run/buildstream', exist_ok=True)
+
+ # FIXME: While we do not currently do anything to prevent
+ # network access, we also don't copy /etc/resolv.conf to
+ # the new rootfs.
+ #
+ # This effectively disables network access, since DNs will
+ # never resolve, so anything a normal process wants to do
+ # will fail. Malicious processes could gain rights to
+ # anything anyway.
+ #
+ # Nonetheless a better solution could perhaps be found.
+
+ rootfs = stack.enter_context(utils._tempdir(dir='/var/run/buildstream'))
+ stack.enter_context(self.create_devices(self._root, flags))
+ stack.enter_context(self.mount_dirs(rootfs, flags, stdout, stderr))
+
+ if flags & SandboxFlags.INTERACTIVE:
+ stdin = sys.stdin
+ else:
+ stdin = stack.enter_context(open(os.devnull, 'r'))
+
+ # Ensure the cwd exists
+ if cwd is not None:
+ workdir = os.path.join(rootfs, cwd.lstrip(os.sep))
+ os.makedirs(workdir, exist_ok=True)
+ status = self.chroot(rootfs, command, stdin, stdout,
+ stderr, cwd, env, flags)
+
+ self._vdir._mark_changed()
+ return status
+
+ # chroot()
+ #
+ # A helper function to chroot into the rootfs.
+ #
+ # Args:
+ # rootfs (str): The path of the sysroot to chroot into
+ # command (list): The command to execute in the chroot env
+ # stdin (file): The stdin
+ # stdout (file): The stdout
+ # stderr (file): The stderr
+ # cwd (str): The current working directory
+ # env (dict): The environment variables to use while executing the command
+ # flags (:class:`SandboxFlags`): The flags to enable on the sandbox
+ #
+ # Returns:
+ # (int): The exit code of the executed command
+ #
+ def chroot(self, rootfs, command, stdin, stdout, stderr, cwd, env, flags):
+ def kill_proc():
+ if process:
+ # First attempt to gracefully terminate
+ proc = psutil.Process(process.pid)
+ proc.terminate()
+
+ try:
+ proc.wait(20)
+ except psutil.TimeoutExpired:
+ utils._kill_process_tree(process.pid)
+
+ def suspend_proc():
+ group_id = os.getpgid(process.pid)
+ os.killpg(group_id, signal.SIGSTOP)
+
+ def resume_proc():
+ group_id = os.getpgid(process.pid)
+ os.killpg(group_id, signal.SIGCONT)
+
+ try:
+ with _signals.suspendable(suspend_proc, resume_proc), _signals.terminator(kill_proc):
+ process = subprocess.Popen( # pylint: disable=subprocess-popen-preexec-fn
+ command,
+ close_fds=True,
+ cwd=os.path.join(rootfs, cwd.lstrip(os.sep)),
+ env=env,
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ # If you try to put gtk dialogs here Tristan (either)
+ # will personally scald you
+ preexec_fn=lambda: (os.chroot(rootfs), os.chdir(cwd)),
+ start_new_session=flags & SandboxFlags.INTERACTIVE
+ )
+
+ # Wait for the child process to finish, ensuring that
+ # a SIGINT has exactly the effect the user probably
+ # expects (i.e. let the child process handle it).
+ try:
+ while True:
+ try:
+ _, status = os.waitpid(process.pid, 0)
+ # If the process exits due to a signal, we
+ # brutally murder it to avoid zombies
+ if not os.WIFEXITED(status):
+ utils._kill_process_tree(process.pid)
+
+ # Unlike in the bwrap case, here only the main
+ # process seems to receive the SIGINT. We pass
+ # on the signal to the child and then continue
+ # to wait.
+ except KeyboardInterrupt:
+ process.send_signal(signal.SIGINT)
+ continue
+
+ break
+ # If we can't find the process, it has already died of
+ # its own accord, and therefore we don't need to check
+ # or kill anything.
+ except psutil.NoSuchProcess:
+ pass
+
+ # Return the exit code - see the documentation for
+ # os.WEXITSTATUS to see why this is required.
+ if os.WIFEXITED(status):
+ code = os.WEXITSTATUS(status)
+ else:
+ code = -1
+
+ except subprocess.SubprocessError as e:
+ # Exceptions in preexec_fn are simply reported as
+ # 'Exception occurred in preexec_fn', turn these into
+ # a more readable message.
+ if str(e) == 'Exception occurred in preexec_fn.':
+ raise SandboxError('Could not chroot into {} or chdir into {}. '
+ 'Ensure you are root and that the relevant directory exists.'
+ .format(rootfs, cwd)) from e
+ else:
+ raise SandboxError('Could not run command {}: {}'.format(command, e)) from e
+
+ return code
+
+ # create_devices()
+ #
+ # Create the nodes in /dev/ usually required for builds (null,
+ # none, etc.)
+ #
+ # Args:
+ # rootfs (str): The path of the sysroot to prepare
+ # flags (:class:`.SandboxFlags`): The sandbox flags
+ #
+ @contextmanager
+ def create_devices(self, rootfs, flags):
+
+ devices = []
+ # When we are interactive, we'd rather mount /dev due to the
+ # sheer number of devices
+ if not flags & SandboxFlags.INTERACTIVE:
+
+ for device in Sandbox.DEVICES:
+ location = os.path.join(rootfs, device.lstrip(os.sep))
+ os.makedirs(os.path.dirname(location), exist_ok=True)
+ try:
+ if os.path.exists(location):
+ os.remove(location)
+
+ devices.append(self.mknod(device, location))
+ except OSError as err:
+ if err.errno == 1:
+ raise SandboxError("Permission denied while creating device node: {}.".format(err) +
+ "BuildStream reqiures root permissions for these setttings.")
+ else:
+ raise
+
+ yield
+
+ for device in devices:
+ os.remove(device)
+
+ # mount_dirs()
+ #
+ # Mount paths required for the command.
+ #
+ # Args:
+ # rootfs (str): The path of the sysroot to prepare
+ # flags (:class:`.SandboxFlags`): The sandbox flags
+ # stdout (file): The stdout
+ # stderr (file): The stderr
+ #
+ @contextmanager
+ def mount_dirs(self, rootfs, flags, stdout, stderr):
+
+ # FIXME: This should probably keep track of potentially
+ # already existing files a la _sandboxwrap.py:239
+
+ @contextmanager
+ def mount_point(point, **kwargs):
+ mount_source_overrides = self._get_mount_sources()
+ if point in mount_source_overrides: # pylint: disable=consider-using-get
+ mount_source = mount_source_overrides[point]
+ else:
+ mount_source = self.mount_map.get_mount_source(point)
+ mount_point = os.path.join(rootfs, point.lstrip(os.sep))
+
+ with Mounter.bind_mount(mount_point, src=mount_source, stdout=stdout, stderr=stderr, **kwargs):
+ yield
+
+ @contextmanager
+ def mount_src(src, **kwargs):
+ mount_point = os.path.join(rootfs, src.lstrip(os.sep))
+ os.makedirs(mount_point, exist_ok=True)
+
+ with Mounter.bind_mount(mount_point, src=src, stdout=stdout, stderr=stderr, **kwargs):
+ yield
+
+ with ExitStack() as stack:
+ stack.enter_context(self.mount_map.mounted(self))
+
+ stack.enter_context(mount_point('/'))
+
+ if flags & SandboxFlags.INTERACTIVE:
+ stack.enter_context(mount_src('/dev'))
+
+ stack.enter_context(mount_src('/tmp'))
+ stack.enter_context(mount_src('/proc'))
+
+ for mark in self._get_marked_directories():
+ stack.enter_context(mount_point(mark['directory']))
+
+ # Remount root RO if necessary
+ if flags & flags & SandboxFlags.ROOT_READ_ONLY:
+ root_mount = Mounter.mount(rootfs, stdout=stdout, stderr=stderr, remount=True, ro=True, bind=True)
+ # Since the exit stack has already registered a mount
+ # for this path, we do not need to register another
+ # umount call.
+ root_mount.__enter__()
+
+ yield
+
+ # mknod()
+ #
+ # Create a device node equivalent to the given source node
+ #
+ # Args:
+ # source (str): Path of the device to mimic (e.g. '/dev/null')
+ # target (str): Location to create the new device in
+ #
+ # Returns:
+ # target (str): The location of the created node
+ #
+ def mknod(self, source, target):
+ try:
+ dev = os.stat(source)
+ major = os.major(dev.st_rdev)
+ minor = os.minor(dev.st_rdev)
+
+ target_dev = os.makedev(major, minor)
+
+ os.mknod(target, mode=stat.S_IFCHR | dev.st_mode, device=target_dev)
+
+ except PermissionError as e:
+ raise SandboxError('Could not create device {}, ensure that you have root permissions: {}')
+
+ except OSError as e:
+ raise SandboxError('Could not create device {}: {}'
+ .format(target, e)) from e
+
+ return target
diff --git a/src/buildstream/sandbox/_sandboxdummy.py b/src/buildstream/sandbox/_sandboxdummy.py
new file mode 100644
index 000000000..750ddb05d
--- /dev/null
+++ b/src/buildstream/sandbox/_sandboxdummy.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+
+from .._exceptions import SandboxError
+from .sandbox import Sandbox
+
+
+class SandboxDummy(Sandbox):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._reason = kwargs.get("dummy_reason", "no reason given")
+
+ def _run(self, command, flags, *, cwd, env):
+
+ if not self._has_command(command[0], env):
+ raise SandboxError("Staged artifacts do not provide command "
+ "'{}'".format(command[0]),
+ reason='missing-command')
+
+ raise SandboxError("This platform does not support local builds: {}".format(self._reason),
+ reason="unavailable-local-sandbox")
diff --git a/src/buildstream/sandbox/_sandboxremote.py b/src/buildstream/sandbox/_sandboxremote.py
new file mode 100644
index 000000000..2cb7e2538
--- /dev/null
+++ b/src/buildstream/sandbox/_sandboxremote.py
@@ -0,0 +1,577 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2018 Bloomberg LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jim MacArthur <jim.macarthur@codethink.co.uk>
+
+import os
+import shlex
+from collections import namedtuple
+from urllib.parse import urlparse
+from functools import partial
+
+import grpc
+
+from .. import utils
+from .._message import Message, MessageType
+from .sandbox import Sandbox, SandboxCommandError, _SandboxBatch
+from ..storage.directory import VirtualDirectoryError
+from ..storage._casbaseddirectory import CasBasedDirectory
+from .. import _signals
+from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
+from .._protos.google.rpc import code_pb2
+from .._exceptions import BstError, SandboxError
+from .. import _yaml
+from .._protos.google.longrunning import operations_pb2, operations_pb2_grpc
+from .._cas import CASRemote, CASRemoteSpec
+
+
+class RemoteExecutionSpec(namedtuple('RemoteExecutionSpec', 'exec_service storage_service action_service')):
+ pass
+
+
+# SandboxRemote()
+#
+# This isn't really a sandbox, it's a stub which sends all the sources and build
+# commands to a remote server and retrieves the results from it.
+#
+class SandboxRemote(Sandbox):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ self._output_files_required = kwargs.get('output_files_required', True)
+
+ config = kwargs['specs'] # This should be a RemoteExecutionSpec
+ if config is None:
+ return
+
+ self.storage_url = config.storage_service['url']
+ self.exec_url = config.exec_service['url']
+
+ exec_certs = {}
+ for key in ['client-cert', 'client-key', 'server-cert']:
+ if key in config.exec_service:
+ with open(config.exec_service[key], 'rb') as f:
+ exec_certs[key] = f.read()
+
+ self.exec_credentials = grpc.ssl_channel_credentials(
+ root_certificates=exec_certs.get('server-cert'),
+ private_key=exec_certs.get('client-key'),
+ certificate_chain=exec_certs.get('client-cert'))
+
+ action_certs = {}
+ for key in ['client-cert', 'client-key', 'server-cert']:
+ if key in config.action_service:
+ with open(config.action_service[key], 'rb') as f:
+ action_certs[key] = f.read()
+
+ if config.action_service:
+ self.action_url = config.action_service['url']
+ self.action_instance = config.action_service.get('instance-name', None)
+ self.action_credentials = grpc.ssl_channel_credentials(
+ root_certificates=action_certs.get('server-cert'),
+ private_key=action_certs.get('client-key'),
+ certificate_chain=action_certs.get('client-cert'))
+ else:
+ self.action_url = None
+ self.action_instance = None
+ self.action_credentials = None
+
+ self.exec_instance = config.exec_service.get('instance-name', None)
+ self.storage_instance = config.storage_service.get('instance-name', None)
+
+ self.storage_remote_spec = CASRemoteSpec(self.storage_url, push=True,
+ server_cert=config.storage_service.get('server-cert'),
+ client_key=config.storage_service.get('client-key'),
+ client_cert=config.storage_service.get('client-cert'),
+ instance_name=self.storage_instance)
+ self.operation_name = None
+
+ def info(self, msg):
+ self._get_context().message(Message(None, MessageType.INFO, msg))
+
+ @staticmethod
+ def specs_from_config_node(config_node, basedir=None):
+
+ def require_node(config, keyname):
+ val = _yaml.node_get(config, dict, keyname, default_value=None)
+ if val is None:
+ provenance = _yaml.node_get_provenance(remote_config, key=keyname)
+ raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA,
+ "{}: '{}' was not present in the remote "
+ "execution configuration (remote-execution). "
+ .format(str(provenance), keyname))
+ return val
+
+ remote_config = _yaml.node_get(config_node, dict, 'remote-execution', default_value=None)
+ if remote_config is None:
+ return None
+
+ service_keys = ['execution-service', 'storage-service', 'action-cache-service']
+
+ _yaml.node_validate(remote_config, ['url', *service_keys])
+
+ exec_config = require_node(remote_config, 'execution-service')
+ storage_config = require_node(remote_config, 'storage-service')
+ action_config = _yaml.node_get(remote_config, dict, 'action-cache-service', default_value={})
+
+ tls_keys = ['client-key', 'client-cert', 'server-cert']
+
+ _yaml.node_validate(exec_config, ['url', 'instance-name', *tls_keys])
+ _yaml.node_validate(storage_config, ['url', 'instance-name', *tls_keys])
+ if action_config:
+ _yaml.node_validate(action_config, ['url', 'instance-name', *tls_keys])
+
+ # Maintain some backwards compatibility with older configs, in which
+ # 'url' was the only valid key for remote-execution:
+ if 'url' in remote_config:
+ if 'execution-service' not in remote_config:
+ exec_config = _yaml.new_node_from_dict({'url': remote_config['url']})
+ else:
+ provenance = _yaml.node_get_provenance(remote_config, key='url')
+ raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA,
+ "{}: 'url' and 'execution-service' keys were found in the remote "
+ "execution configuration (remote-execution). "
+ "You can only specify one of these."
+ .format(str(provenance)))
+
+ service_configs = [exec_config, storage_config, action_config]
+
+ def resolve_path(path):
+ if basedir and path:
+ return os.path.join(basedir, path)
+ else:
+ return path
+
+ for config_key, config in zip(service_keys, service_configs):
+ # Either both or none of the TLS client key/cert pair must be specified:
+ if ('client-key' in config) != ('client-cert' in config):
+ provenance = _yaml.node_get_provenance(remote_config, key=config_key)
+ raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA,
+ "{}: TLS client key/cert pair is incomplete. "
+ "You must specify both 'client-key' and 'client-cert' "
+ "for authenticated HTTPS connections."
+ .format(str(provenance)))
+
+ for tls_key in tls_keys:
+ if tls_key in config:
+ _yaml.node_set(config, tls_key, resolve_path(_yaml.node_get(config, str, tls_key)))
+
+ return RemoteExecutionSpec(*[_yaml.node_sanitize(conf) for conf in service_configs])
+
+ def run_remote_command(self, channel, action_digest):
+ # Sends an execution request to the remote execution server.
+ #
+ # This function blocks until it gets a response from the server.
+
+ # Try to create a communication channel to the BuildGrid server.
+ stub = remote_execution_pb2_grpc.ExecutionStub(channel)
+ request = remote_execution_pb2.ExecuteRequest(instance_name=self.exec_instance,
+ action_digest=action_digest,
+ skip_cache_lookup=False)
+
+ def __run_remote_command(stub, execute_request=None, running_operation=None):
+ try:
+ last_operation = None
+ if execute_request is not None:
+ operation_iterator = stub.Execute(execute_request)
+ else:
+ request = remote_execution_pb2.WaitExecutionRequest(name=running_operation.name)
+ operation_iterator = stub.WaitExecution(request)
+
+ for operation in operation_iterator:
+ if not self.operation_name:
+ self.operation_name = operation.name
+ if operation.done:
+ return operation
+ else:
+ last_operation = operation
+
+ except grpc.RpcError as e:
+ status_code = e.code()
+ if status_code == grpc.StatusCode.UNAVAILABLE:
+ raise SandboxError("Failed contacting remote execution server at {}."
+ .format(self.exec_url))
+
+ elif status_code in (grpc.StatusCode.INVALID_ARGUMENT,
+ grpc.StatusCode.FAILED_PRECONDITION,
+ grpc.StatusCode.RESOURCE_EXHAUSTED,
+ grpc.StatusCode.INTERNAL,
+ grpc.StatusCode.DEADLINE_EXCEEDED):
+ raise SandboxError("{} ({}).".format(e.details(), status_code.name))
+
+ elif running_operation and status_code == grpc.StatusCode.UNIMPLEMENTED:
+ raise SandboxError("Failed trying to recover from connection loss: "
+ "server does not support operation status polling recovery.")
+
+ return last_operation
+
+ # Set up signal handler to trigger cancel_operation on SIGTERM
+ operation = None
+ with self._get_context().timed_activity("Waiting for the remote build to complete"), \
+ _signals.terminator(partial(self.cancel_operation, channel)):
+ operation = __run_remote_command(stub, execute_request=request)
+ if operation is None:
+ return None
+ elif operation.done:
+ return operation
+ while operation is not None and not operation.done:
+ operation = __run_remote_command(stub, running_operation=operation)
+
+ return operation
+
+ def cancel_operation(self, channel):
+ # If we don't have the name can't send request.
+ if self.operation_name is None:
+ return
+
+ stub = operations_pb2_grpc.OperationsStub(channel)
+ request = operations_pb2.CancelOperationRequest(
+ name=str(self.operation_name))
+
+ try:
+ stub.CancelOperation(request)
+ except grpc.RpcError as e:
+ if (e.code() == grpc.StatusCode.UNIMPLEMENTED or
+ e.code() == grpc.StatusCode.INVALID_ARGUMENT):
+ pass
+ else:
+ raise SandboxError("Failed trying to send CancelOperation request: "
+ "{} ({})".format(e.details(), e.code().name))
+
+ def process_job_output(self, output_directories, output_files, *, failure):
+ # Reads the remote execution server response to an execution request.
+ #
+ # output_directories is an array of OutputDirectory objects.
+ # output_files is an array of OutputFile objects.
+ #
+ # We only specify one output_directory, so it's an error
+ # for there to be any output files or more than one directory at the moment.
+ #
+ if output_files:
+ raise SandboxError("Output files were returned when we didn't request any.")
+ elif not output_directories:
+ error_text = "No output directory was returned from the build server."
+ raise SandboxError(error_text)
+ elif len(output_directories) > 1:
+ error_text = "More than one output directory was returned from the build server: {}."
+ raise SandboxError(error_text.format(output_directories))
+
+ tree_digest = output_directories[0].tree_digest
+ if tree_digest is None or not tree_digest.hash:
+ raise SandboxError("Output directory structure had no digest attached.")
+
+ context = self._get_context()
+ project = self._get_project()
+ cascache = context.get_cascache()
+ artifactcache = context.artifactcache
+ casremote = CASRemote(self.storage_remote_spec)
+
+ # Now do a pull to ensure we have the full directory structure.
+ dir_digest = cascache.pull_tree(casremote, tree_digest)
+ if dir_digest is None or not dir_digest.hash or not dir_digest.size_bytes:
+ raise SandboxError("Output directory structure pulling from remote failed.")
+
+ # At the moment, we will get the whole directory back in the first directory argument and we need
+ # to replace the sandbox's virtual directory with that. Creating a new virtual directory object
+ # from another hash will be interesting, though...
+
+ new_dir = CasBasedDirectory(context.artifactcache.cas, digest=dir_digest)
+ self._set_virtual_directory(new_dir)
+
+ # Fetch the file blobs if needed
+ if self._output_files_required or artifactcache.has_push_remotes():
+ required_blobs = []
+ directories = []
+
+ directories.append(self._output_directory)
+ if self._build_directory and (self._build_directory_always or failure):
+ directories.append(self._build_directory)
+
+ for directory in directories:
+ try:
+ vdir = new_dir.descend(*directory.strip(os.sep).split(os.sep))
+ dir_digest = vdir._get_digest()
+ required_blobs += cascache.required_blobs_for_directory(dir_digest)
+ except VirtualDirectoryError:
+ # If the directory does not exist, there is no need to
+ # download file blobs.
+ pass
+
+ local_missing_blobs = cascache.local_missing_blobs(required_blobs)
+ if local_missing_blobs:
+ if self._output_files_required:
+ # Fetch all blobs from Remote Execution CAS server
+ blobs_to_fetch = local_missing_blobs
+ else:
+ # Output files are not required in the local cache,
+ # however, artifact push remotes will need them.
+ # Only fetch blobs that are missing on one or multiple
+ # artifact servers.
+ blobs_to_fetch = artifactcache.find_missing_blobs(project, local_missing_blobs)
+
+ remote_missing_blobs = cascache.fetch_blobs(casremote, blobs_to_fetch)
+ if remote_missing_blobs:
+ raise SandboxError("{} output files are missing on the CAS server"
+ .format(len(remote_missing_blobs)))
+
+ def _run(self, command, flags, *, cwd, env):
+ stdout, stderr = self._get_output()
+
+ context = self._get_context()
+ project = self._get_project()
+ cascache = context.get_cascache()
+ artifactcache = context.artifactcache
+
+ # set up virtual dircetory
+ upload_vdir = self.get_virtual_directory()
+
+ # Create directories for all marked directories. This emulates
+ # some of the behaviour of other sandboxes, which create these
+ # to use as mount points.
+ for mark in self._get_marked_directories():
+ directory = mark['directory']
+ # Create each marked directory
+ upload_vdir.descend(*directory.split(os.path.sep), create=True)
+
+ # Generate action_digest first
+ input_root_digest = upload_vdir._get_digest()
+ command_proto = self._create_command(command, cwd, env)
+ command_digest = utils._message_digest(command_proto.SerializeToString())
+ action = remote_execution_pb2.Action(command_digest=command_digest,
+ input_root_digest=input_root_digest)
+ action_digest = utils._message_digest(action.SerializeToString())
+
+ # Next, try to create a communication channel to the BuildGrid server.
+ url = urlparse(self.exec_url)
+ if not url.port:
+ raise SandboxError("You must supply a protocol and port number in the execution-service url, "
+ "for example: http://buildservice:50051.")
+ if url.scheme == 'http':
+ channel = grpc.insecure_channel('{}:{}'.format(url.hostname, url.port))
+ elif url.scheme == 'https':
+ channel = grpc.secure_channel('{}:{}'.format(url.hostname, url.port), self.exec_credentials)
+ else:
+ raise SandboxError("Remote execution currently only supports the 'http' protocol "
+ "and '{}' was supplied.".format(url.scheme))
+
+ # check action cache download and download if there
+ action_result = self._check_action_cache(action_digest)
+
+ if not action_result:
+ casremote = CASRemote(self.storage_remote_spec)
+ try:
+ casremote.init()
+ except grpc.RpcError as e:
+ raise SandboxError("Failed to contact remote execution CAS endpoint at {}: {}"
+ .format(self.storage_url, e)) from e
+
+ # Determine blobs missing on remote
+ try:
+ missing_blobs = cascache.remote_missing_blobs_for_directory(casremote, input_root_digest)
+ except grpc.RpcError as e:
+ raise SandboxError("Failed to determine missing blobs: {}".format(e)) from e
+
+ # Check if any blobs are also missing locally (partial artifact)
+ # and pull them from the artifact cache.
+ try:
+ local_missing_blobs = cascache.local_missing_blobs(missing_blobs)
+ if local_missing_blobs:
+ artifactcache.fetch_missing_blobs(project, local_missing_blobs)
+ except (grpc.RpcError, BstError) as e:
+ raise SandboxError("Failed to pull missing blobs from artifact cache: {}".format(e)) from e
+
+ # Now, push the missing blobs to the remote.
+ try:
+ cascache.send_blobs(casremote, missing_blobs)
+ except grpc.RpcError as e:
+ raise SandboxError("Failed to push source directory to remote: {}".format(e)) from e
+
+ # Push command and action
+ try:
+ casremote.push_message(command_proto)
+ except grpc.RpcError as e:
+ raise SandboxError("Failed to push command to remote: {}".format(e))
+
+ try:
+ casremote.push_message(action)
+ except grpc.RpcError as e:
+ raise SandboxError("Failed to push action to remote: {}".format(e))
+
+ # Now request to execute the action
+ operation = self.run_remote_command(channel, action_digest)
+ action_result = self._extract_action_result(operation)
+
+ # Get output of build
+ self.process_job_output(action_result.output_directories, action_result.output_files,
+ failure=action_result.exit_code != 0)
+
+ if stdout:
+ if action_result.stdout_raw:
+ stdout.write(str(action_result.stdout_raw, 'utf-8', errors='ignore'))
+ if stderr:
+ if action_result.stderr_raw:
+ stderr.write(str(action_result.stderr_raw, 'utf-8', errors='ignore'))
+
+ if action_result.exit_code != 0:
+ # A normal error during the build: the remote execution system
+ # has worked correctly but the command failed.
+ return action_result.exit_code
+
+ return 0
+
+ def _check_action_cache(self, action_digest):
+ # Checks the action cache to see if this artifact has already been built
+ #
+ # Should return either the action response or None if not found, raise
+ # Sandboxerror if other grpc error was raised
+ if not self.action_url:
+ return None
+ url = urlparse(self.action_url)
+ if not url.port:
+ raise SandboxError("You must supply a protocol and port number in the action-cache-service url, "
+ "for example: http://buildservice:50051.")
+ if url.scheme == 'http':
+ channel = grpc.insecure_channel('{}:{}'.format(url.hostname, url.port))
+ elif url.scheme == 'https':
+ channel = grpc.secure_channel('{}:{}'.format(url.hostname, url.port), self.action_credentials)
+
+ request = remote_execution_pb2.GetActionResultRequest(instance_name=self.action_instance,
+ action_digest=action_digest)
+ stub = remote_execution_pb2_grpc.ActionCacheStub(channel)
+ try:
+ result = stub.GetActionResult(request)
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ raise SandboxError("Failed to query action cache: {} ({})"
+ .format(e.code(), e.details()))
+ else:
+ return None
+ else:
+ self.info("Action result found in action cache")
+ return result
+
+ def _create_command(self, command, working_directory, environment):
+ # Creates a command proto
+ environment_variables = [remote_execution_pb2.Command.
+ EnvironmentVariable(name=k, value=v)
+ for (k, v) in environment.items()]
+
+ # Request the whole directory tree as output
+ output_directory = os.path.relpath(os.path.sep, start=working_directory)
+
+ return remote_execution_pb2.Command(arguments=command,
+ working_directory=working_directory,
+ environment_variables=environment_variables,
+ output_files=[],
+ output_directories=[output_directory],
+ platform=None)
+
+ @staticmethod
+ def _extract_action_result(operation):
+ if operation is None:
+ # Failure of remote execution, usually due to an error in BuildStream
+ raise SandboxError("No response returned from server")
+
+ assert not operation.HasField('error') and operation.HasField('response')
+
+ execution_response = remote_execution_pb2.ExecuteResponse()
+ # The response is expected to be an ExecutionResponse message
+ assert operation.response.Is(execution_response.DESCRIPTOR)
+
+ operation.response.Unpack(execution_response)
+
+ if execution_response.status.code != code_pb2.OK:
+ # An unexpected error during execution: the remote execution
+ # system failed at processing the execution request.
+ if execution_response.status.message:
+ raise SandboxError(execution_response.status.message)
+ else:
+ raise SandboxError("Remote server failed at executing the build request.")
+
+ return execution_response.result
+
+ def _create_batch(self, main_group, flags, *, collect=None):
+ return _SandboxRemoteBatch(self, main_group, flags, collect=collect)
+
+ def _use_cas_based_directory(self):
+ # Always use CasBasedDirectory for remote execution
+ return True
+
+
+# _SandboxRemoteBatch()
+#
+# Command batching by shell script generation.
+#
+class _SandboxRemoteBatch(_SandboxBatch):
+
+ def __init__(self, sandbox, main_group, flags, *, collect=None):
+ super().__init__(sandbox, main_group, flags, collect=collect)
+
+ self.script = None
+ self.first_command = None
+ self.cwd = None
+ self.env = None
+
+ def execute(self):
+ self.script = ""
+
+ self.main_group.execute(self)
+
+ first = self.first_command
+ if first and self.sandbox.run(['sh', '-c', '-e', self.script], self.flags, cwd=first.cwd, env=first.env) != 0:
+ raise SandboxCommandError("Command execution failed", collect=self.collect)
+
+ def execute_group(self, group):
+ group.execute_children(self)
+
+ def execute_command(self, command):
+ if self.first_command is None:
+ # First command in batch
+ # Initial working directory and environment of script already matches
+ # the command configuration.
+ self.first_command = command
+ else:
+ # Change working directory for this command
+ if command.cwd != self.cwd:
+ self.script += "mkdir -p {}\n".format(command.cwd)
+ self.script += "cd {}\n".format(command.cwd)
+
+ # Update environment for this command
+ for key in self.env.keys():
+ if key not in command.env:
+ self.script += "unset {}\n".format(key)
+ for key, value in command.env.items():
+ if key not in self.env or self.env[key] != value:
+ self.script += "export {}={}\n".format(key, shlex.quote(value))
+
+ # Keep track of current working directory and environment
+ self.cwd = command.cwd
+ self.env = command.env
+
+ # Actual command execution
+ cmdline = ' '.join(shlex.quote(cmd) for cmd in command.command)
+ self.script += "(set -ex; {})".format(cmdline)
+
+ # Error handling
+ label = command.label or cmdline
+ quoted_label = shlex.quote("'{}'".format(label))
+ self.script += " || (echo Command {} failed with exitcode $? >&2 ; exit 1)\n".format(quoted_label)
+
+ def execute_call(self, call):
+ raise SandboxError("SandboxRemote does not support callbacks in command batches")
diff --git a/src/buildstream/sandbox/sandbox.py b/src/buildstream/sandbox/sandbox.py
new file mode 100644
index 000000000..c96ccb57b
--- /dev/null
+++ b/src/buildstream/sandbox/sandbox.py
@@ -0,0 +1,717 @@
+#
+# Copyright (C) 2017 Codethink Limited
+# Copyright (C) 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Andrew Leeming <andrew.leeming@codethink.co.uk>
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+"""
+Sandbox - The build sandbox
+===========================
+:class:`.Element` plugins which want to interface with the sandbox
+need only understand this interface, while it may be given a different
+sandbox implementation, any sandbox implementation it is given will
+conform to this interface.
+
+See also: :ref:`sandboxing`.
+"""
+
+import os
+import shlex
+import contextlib
+from contextlib import contextmanager
+
+from .._exceptions import ImplError, BstError, SandboxError
+from .._message import Message, MessageType
+from ..storage._filebaseddirectory import FileBasedDirectory
+from ..storage._casbaseddirectory import CasBasedDirectory
+
+
+class SandboxFlags():
+ """Flags indicating how the sandbox should be run.
+ """
+
+ NONE = 0
+ """Use default sandbox configuration.
+ """
+
+ ROOT_READ_ONLY = 0x01
+ """The root filesystem is read only.
+
+ This is normally true except when running integration commands
+ on staged dependencies, where we have to update caches and run
+ things such as ldconfig.
+ """
+
+ NETWORK_ENABLED = 0x02
+ """Whether to expose host network.
+
+ This should not be set when running builds, but can
+ be allowed for running a shell in a sandbox.
+ """
+
+ INTERACTIVE = 0x04
+ """Whether to run the sandbox interactively
+
+ This determines if the sandbox should attempt to connect
+ the terminal through to the calling process, or detach
+ the terminal entirely.
+ """
+
+ INHERIT_UID = 0x08
+ """Whether to use the user id and group id from the host environment
+
+ This determines if processes in the sandbox should run with the
+ same user id and group id as BuildStream itself. By default,
+ processes run with user id and group id 0, protected by a user
+ namespace where available.
+ """
+
+
+class SandboxCommandError(SandboxError):
+ """Raised by :class:`.Sandbox` implementations when a command fails.
+
+ Args:
+ message (str): The error message to report to the user
+ detail (str): The detailed error string
+ collect (str): An optional directory containing partial install contents
+ """
+ def __init__(self, message, *, detail=None, collect=None):
+ super().__init__(message, detail=detail, reason='command-failed')
+
+ self.collect = collect
+
+
+class Sandbox():
+ """Sandbox()
+
+ Sandbox programming interface for :class:`.Element` plugins.
+ """
+
+ # Minimal set of devices for the sandbox
+ DEVICES = [
+ '/dev/urandom',
+ '/dev/random',
+ '/dev/zero',
+ '/dev/null'
+ ]
+
+ def __init__(self, context, project, directory, **kwargs):
+ self.__context = context
+ self.__project = project
+ self.__directories = []
+ self.__cwd = None
+ self.__env = None
+ self.__mount_sources = {}
+ self.__allow_real_directory = kwargs['allow_real_directory']
+ self.__allow_run = True
+
+ # Plugin ID for logging
+ plugin = kwargs.get('plugin', None)
+ if plugin:
+ self.__plugin_id = plugin._unique_id
+ else:
+ self.__plugin_id = None
+
+ # Configuration from kwargs common to all subclasses
+ self.__config = kwargs['config']
+ self.__stdout = kwargs['stdout']
+ self.__stderr = kwargs['stderr']
+ self.__bare_directory = kwargs['bare_directory']
+
+ # Setup the directories. Root and output_directory should be
+ # available to subclasses, hence being single-underscore. The
+ # others are private to this class.
+ # If the directory is bare, it probably doesn't need scratch
+ if self.__bare_directory:
+ self._root = directory
+ self.__scratch = None
+ os.makedirs(self._root, exist_ok=True)
+ else:
+ self._root = os.path.join(directory, 'root')
+ self.__scratch = os.path.join(directory, 'scratch')
+ for directory_ in [self._root, self.__scratch]:
+ os.makedirs(directory_, exist_ok=True)
+
+ self._output_directory = None
+ self._build_directory = None
+ self._build_directory_always = None
+ self._vdir = None
+ self._usebuildtree = False
+
+ # This is set if anyone requests access to the underlying
+ # directory via get_directory.
+ self._never_cache_vdirs = False
+
+ # Pending command batch
+ self.__batch = None
+
+ def get_directory(self):
+ """Fetches the sandbox root directory
+
+ The root directory is where artifacts for the base
+ runtime environment should be staged. Only works if
+ BST_VIRTUAL_DIRECTORY is not set.
+
+ Returns:
+ (str): The sandbox root directory
+
+ """
+ if self.__allow_real_directory:
+ self._never_cache_vdirs = True
+ return self._root
+ else:
+ raise BstError("You can't use get_directory")
+
+ def get_virtual_directory(self):
+ """Fetches the sandbox root directory as a virtual Directory.
+
+ The root directory is where artifacts for the base
+ runtime environment should be staged.
+
+ Use caution if you use get_directory and
+ get_virtual_directory. If you alter the contents of the
+ directory returned by get_directory, all objects returned by
+ get_virtual_directory or derived from them are invalid and you
+ must call get_virtual_directory again to get a new copy.
+
+ Returns:
+ (Directory): The sandbox root directory
+
+ """
+ if self._vdir is None or self._never_cache_vdirs:
+ if self._use_cas_based_directory():
+ cascache = self.__context.get_cascache()
+ self._vdir = CasBasedDirectory(cascache)
+ else:
+ self._vdir = FileBasedDirectory(self._root)
+ return self._vdir
+
+ def _set_virtual_directory(self, virtual_directory):
+ """ Sets virtual directory. Useful after remote execution
+ has rewritten the working directory.
+ """
+ self._vdir = virtual_directory
+
+ def set_environment(self, environment):
+ """Sets the environment variables for the sandbox
+
+ Args:
+ environment (dict): The environment variables to use in the sandbox
+ """
+ self.__env = environment
+
+ def set_work_directory(self, directory):
+ """Sets the work directory for commands run in the sandbox
+
+ Args:
+ directory (str): An absolute path within the sandbox
+ """
+ self.__cwd = directory
+
+ def set_output_directory(self, directory):
+ """Sets the output directory - the directory which is preserved
+ as an artifact after assembly.
+
+ Args:
+ directory (str): An absolute path within the sandbox
+ """
+ self._output_directory = directory
+
+ def mark_directory(self, directory, *, artifact=False):
+ """Marks a sandbox directory and ensures it will exist
+
+ Args:
+ directory (str): An absolute path within the sandbox to mark
+ artifact (bool): Whether the content staged at this location
+ contains artifacts
+
+ .. note::
+ Any marked directories will be read-write in the sandboxed
+ environment, only the root directory is allowed to be readonly.
+ """
+ self.__directories.append({
+ 'directory': directory,
+ 'artifact': artifact
+ })
+
+ def run(self, command, flags, *, cwd=None, env=None, label=None):
+ """Run a command in the sandbox.
+
+ If this is called outside a batch context, the command is immediately
+ executed.
+
+ If this is called in a batch context, the command is added to the batch
+ for later execution. If the command fails, later commands will not be
+ executed. Command flags must match batch flags.
+
+ Args:
+ command (list): The command to run in the sandboxed environment, as a list
+ of strings starting with the binary to run.
+ flags (:class:`.SandboxFlags`): The flags for running this command.
+ cwd (str): The sandbox relative working directory in which to run the command.
+ env (dict): A dictionary of string key, value pairs to set as environment
+ variables inside the sandbox environment.
+ label (str): An optional label for the command, used for logging. (*Since: 1.4*)
+
+ Returns:
+ (int|None): The program exit code, or None if running in batch context.
+
+ Raises:
+ (:class:`.ProgramNotFoundError`): If a host tool which the given sandbox
+ implementation requires is not found.
+
+ .. note::
+
+ The optional *cwd* argument will default to the value set with
+ :func:`~buildstream.sandbox.Sandbox.set_work_directory` and this
+ function must make sure the directory will be created if it does
+ not exist yet, even if a workspace is being used.
+ """
+
+ if not self.__allow_run:
+ raise SandboxError("Sandbox.run() has been disabled")
+
+ # Fallback to the sandbox default settings for
+ # the cwd and env.
+ #
+ cwd = self._get_work_directory(cwd=cwd)
+ env = self._get_environment(cwd=cwd, env=env)
+
+ # Convert single-string argument to a list
+ if isinstance(command, str):
+ command = [command]
+
+ if self.__batch:
+ assert flags == self.__batch.flags, \
+ "Inconsistent sandbox flags in single command batch"
+
+ batch_command = _SandboxBatchCommand(command, cwd=cwd, env=env, label=label)
+
+ current_group = self.__batch.current_group
+ current_group.append(batch_command)
+ return None
+ else:
+ return self._run(command, flags, cwd=cwd, env=env)
+
+ @contextmanager
+ def batch(self, flags, *, label=None, collect=None):
+ """Context manager for command batching
+
+ This provides a batch context that defers execution of commands until
+ the end of the context. If a command fails, the batch will be aborted
+ and subsequent commands will not be executed.
+
+ Command batches may be nested. Execution will start only when the top
+ level batch context ends.
+
+ Args:
+ flags (:class:`.SandboxFlags`): The flags for this command batch.
+ label (str): An optional label for the batch group, used for logging.
+ collect (str): An optional directory containing partial install contents
+ on command failure.
+
+ Raises:
+ (:class:`.SandboxCommandError`): If a command fails.
+
+ *Since: 1.4*
+ """
+
+ group = _SandboxBatchGroup(label=label)
+
+ if self.__batch:
+ # Nested batch
+ assert flags == self.__batch.flags, \
+ "Inconsistent sandbox flags in single command batch"
+
+ parent_group = self.__batch.current_group
+ parent_group.append(group)
+ self.__batch.current_group = group
+ try:
+ yield
+ finally:
+ self.__batch.current_group = parent_group
+ else:
+ # Top-level batch
+ batch = self._create_batch(group, flags, collect=collect)
+
+ self.__batch = batch
+ try:
+ yield
+ finally:
+ self.__batch = None
+
+ batch.execute()
+
+ #####################################################
+ # Abstract Methods for Sandbox implementations #
+ #####################################################
+
+ # _run()
+ #
+ # Abstract method for running a single command
+ #
+ # Args:
+ # command (list): The command to run in the sandboxed environment, as a list
+ # of strings starting with the binary to run.
+ # flags (:class:`.SandboxFlags`): The flags for running this command.
+ # cwd (str): The sandbox relative working directory in which to run the command.
+ # env (dict): A dictionary of string key, value pairs to set as environment
+ # variables inside the sandbox environment.
+ #
+ # Returns:
+ # (int): The program exit code.
+ #
+ def _run(self, command, flags, *, cwd, env):
+ raise ImplError("Sandbox of type '{}' does not implement _run()"
+ .format(type(self).__name__))
+
+ # _create_batch()
+ #
+ # Abstract method for creating a batch object. Subclasses can override
+ # this method to instantiate a subclass of _SandboxBatch.
+ #
+ # Args:
+ # main_group (:class:`_SandboxBatchGroup`): The top level batch group.
+ # flags (:class:`.SandboxFlags`): The flags for commands in this batch.
+ # collect (str): An optional directory containing partial install contents
+ # on command failure.
+ #
+ def _create_batch(self, main_group, flags, *, collect=None):
+ return _SandboxBatch(self, main_group, flags, collect=collect)
+
+ # _use_cas_based_directory()
+ #
+ # Whether to use CasBasedDirectory as sandbox root. If this returns `False`,
+ # FileBasedDirectory will be used.
+ #
+ # Returns:
+ # (bool): Whether to use CasBasedDirectory
+ #
+ def _use_cas_based_directory(self):
+ # Use CasBasedDirectory as sandbox root if neither Sandbox.get_directory()
+ # nor Sandbox.run() are required. This allows faster staging.
+ if not self.__allow_real_directory and not self.__allow_run:
+ return True
+
+ return 'BST_CAS_DIRECTORIES' in os.environ
+
+ ################################################
+ # Private methods #
+ ################################################
+ # _get_context()
+ #
+ # Fetches the context BuildStream was launched with.
+ #
+ # Returns:
+ # (Context): The context of this BuildStream invocation
+ def _get_context(self):
+ return self.__context
+
+ # _get_project()
+ #
+ # Fetches the Project this sandbox was created to build for.
+ #
+ # Returns:
+ # (Project): The project this sandbox was created for.
+ def _get_project(self):
+ return self.__project
+
+ # _get_marked_directories()
+ #
+ # Fetches the marked directories in the sandbox
+ #
+ # Returns:
+ # (list): A list of directory mark objects.
+ #
+ # The returned objects are dictionaries with the following attributes:
+ # directory: The absolute path within the sandbox
+ # artifact: Whether the path will contain artifacts or not
+ #
+ def _get_marked_directories(self):
+ return self.__directories
+
+ # _get_mount_source()
+ #
+ # Fetches the list of mount sources
+ #
+ # Returns:
+ # (dict): A dictionary where keys are mount points and values are the mount sources
+ def _get_mount_sources(self):
+ return self.__mount_sources
+
+ # _set_mount_source()
+ #
+ # Sets the mount source for a given mountpoint
+ #
+ # Args:
+ # mountpoint (str): The absolute mountpoint path inside the sandbox
+ # mount_source (str): the host path to be mounted at the mount point
+ def _set_mount_source(self, mountpoint, mount_source):
+ self.__mount_sources[mountpoint] = mount_source
+
+ # _get_environment()
+ #
+ # Fetches the environment variables for running commands
+ # in the sandbox.
+ #
+ # Args:
+ # cwd (str): The working directory the command has been requested to run in, if any.
+ # env (str): The environment the command has been requested to run in, if any.
+ #
+ # Returns:
+ # (str): The sandbox work directory
+ def _get_environment(self, *, cwd=None, env=None):
+ cwd = self._get_work_directory(cwd=cwd)
+ if env is None:
+ env = self.__env
+
+ # Naive getcwd implementations can break when bind-mounts to different
+ # paths on the same filesystem are present. Letting the command know
+ # what directory it is in makes it unnecessary to call the faulty
+ # getcwd.
+ env = dict(env)
+ env['PWD'] = cwd
+
+ return env
+
+ # _get_work_directory()
+ #
+ # Fetches the working directory for running commands
+ # in the sandbox.
+ #
+ # Args:
+ # cwd (str): The working directory the command has been requested to run in, if any.
+ #
+ # Returns:
+ # (str): The sandbox work directory
+ def _get_work_directory(self, *, cwd=None):
+ return cwd or self.__cwd or '/'
+
+ # _get_scratch_directory()
+ #
+ # Fetches the sandbox scratch directory, this directory can
+ # be used by the sandbox implementation to cache things or
+ # redirect temporary fuse mounts.
+ #
+ # The scratch directory is guaranteed to be on the same
+ # filesystem as the root directory.
+ #
+ # Returns:
+ # (str): The sandbox scratch directory
+ def _get_scratch_directory(self):
+ assert not self.__bare_directory, "Scratch is not going to work with bare directories"
+ return self.__scratch
+
+ # _get_output()
+ #
+ # Fetches the stdout & stderr
+ #
+ # Returns:
+ # (file): The stdout, or None to inherit
+ # (file): The stderr, or None to inherit
+ def _get_output(self):
+ return (self.__stdout, self.__stderr)
+
+ # _get_config()
+ #
+ # Fetches the sandbox configuration object.
+ #
+ # Returns:
+ # (SandboxConfig): An object containing the configuration
+ # data passed in during construction.
+ def _get_config(self):
+ return self.__config
+
+ # _has_command()
+ #
+ # Tests whether a command exists inside the sandbox
+ #
+ # Args:
+ # command (list): The command to test.
+ # env (dict): A dictionary of string key, value pairs to set as environment
+ # variables inside the sandbox environment.
+ # Returns:
+ # (bool): Whether a command exists inside the sandbox.
+ def _has_command(self, command, env=None):
+ if os.path.isabs(command):
+ return os.path.lexists(os.path.join(
+ self._root, command.lstrip(os.sep)))
+
+ for path in env.get('PATH').split(':'):
+ if os.path.lexists(os.path.join(
+ self._root, path.lstrip(os.sep), command)):
+ return True
+
+ return False
+
+ # _get_plugin_id()
+ #
+ # Get the plugin's unique identifier
+ #
+ def _get_plugin_id(self):
+ return self.__plugin_id
+
+ # _callback()
+ #
+ # If this is called outside a batch context, the specified function is
+ # invoked immediately.
+ #
+ # If this is called in a batch context, the function is added to the batch
+ # for later invocation.
+ #
+ # Args:
+ # callback (callable): The function to invoke
+ #
+ def _callback(self, callback):
+ if self.__batch:
+ batch_call = _SandboxBatchCall(callback)
+
+ current_group = self.__batch.current_group
+ current_group.append(batch_call)
+ else:
+ callback()
+
+ # _disable_run()
+ #
+ # Raise exception if `Sandbox.run()` is called. This enables use of
+ # CasBasedDirectory for faster staging when command execution is not
+ # required.
+ #
+ def _disable_run(self):
+ self.__allow_run = False
+
+ # _set_build_directory()
+ #
+ # Sets the build directory - the directory which may be preserved as
+ # buildtree in the artifact.
+ #
+ # Args:
+ # directory (str): An absolute path within the sandbox
+ # always (bool): True if the build directory should always be downloaded,
+ # False if it should be downloaded only on failure
+ #
+ def _set_build_directory(self, directory, *, always):
+ self._build_directory = directory
+ self._build_directory_always = always
+
+
+# _SandboxBatch()
+#
+# A batch of sandbox commands.
+#
+class _SandboxBatch():
+
+ def __init__(self, sandbox, main_group, flags, *, collect=None):
+ self.sandbox = sandbox
+ self.main_group = main_group
+ self.current_group = main_group
+ self.flags = flags
+ self.collect = collect
+
+ def execute(self):
+ self.main_group.execute(self)
+
+ def execute_group(self, group):
+ if group.label:
+ context = self.sandbox._get_context()
+ cm = context.timed_activity(group.label, unique_id=self.sandbox._get_plugin_id())
+ else:
+ cm = contextlib.suppress()
+
+ with cm:
+ group.execute_children(self)
+
+ def execute_command(self, command):
+ if command.label:
+ context = self.sandbox._get_context()
+ message = Message(self.sandbox._get_plugin_id(), MessageType.STATUS,
+ 'Running command', detail=command.label)
+ context.message(message)
+
+ exitcode = self.sandbox._run(command.command, self.flags, cwd=command.cwd, env=command.env)
+ if exitcode != 0:
+ cmdline = ' '.join(shlex.quote(cmd) for cmd in command.command)
+ label = command.label or cmdline
+ raise SandboxCommandError("Command failed with exitcode {}".format(exitcode),
+ detail=label, collect=self.collect)
+
+ def execute_call(self, call):
+ call.callback()
+
+
+# _SandboxBatchItem()
+#
+# An item in a command batch.
+#
+class _SandboxBatchItem():
+
+ def __init__(self, *, label=None):
+ self.label = label
+
+
+# _SandboxBatchCommand()
+#
+# A command item in a command batch.
+#
+class _SandboxBatchCommand(_SandboxBatchItem):
+
+ def __init__(self, command, *, cwd, env, label=None):
+ super().__init__(label=label)
+
+ self.command = command
+ self.cwd = cwd
+ self.env = env
+
+ def execute(self, batch):
+ batch.execute_command(self)
+
+
+# _SandboxBatchGroup()
+#
+# A group in a command batch.
+#
+class _SandboxBatchGroup(_SandboxBatchItem):
+
+ def __init__(self, *, label=None):
+ super().__init__(label=label)
+
+ self.children = []
+
+ def append(self, item):
+ self.children.append(item)
+
+ def execute(self, batch):
+ batch.execute_group(self)
+
+ def execute_children(self, batch):
+ for item in self.children:
+ item.execute(batch)
+
+
+# _SandboxBatchCall()
+#
+# A call item in a command batch.
+#
+class _SandboxBatchCall(_SandboxBatchItem):
+
+ def __init__(self, callback):
+ super().__init__()
+
+ self.callback = callback
+
+ def execute(self, batch):
+ batch.execute_call(self)
diff --git a/src/buildstream/scriptelement.py b/src/buildstream/scriptelement.py
new file mode 100644
index 000000000..dfdbb45c0
--- /dev/null
+++ b/src/buildstream/scriptelement.py
@@ -0,0 +1,297 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jonathan Maw <jonathan.maw@codethink.co.uk>
+
+"""
+ScriptElement - Abstract class for scripting elements
+=====================================================
+The ScriptElement class is a convenience class one can derive for
+implementing elements that stage elements and run command-lines on them.
+
+Any derived classes must write their own configure() implementation, using
+the public APIs exposed in this class.
+
+Derived classes must also chain up to the parent method in their preflight()
+implementations.
+
+
+"""
+
+import os
+from collections import OrderedDict
+
+from .element import Element, ElementError
+from .sandbox import SandboxFlags
+from .types import Scope
+
+
+class ScriptElement(Element):
+ __install_root = "/"
+ __cwd = "/"
+ __root_read_only = False
+ __commands = None
+ __layout = []
+
+ # The compose element's output is its dependencies, so
+ # we must rebuild if the dependencies change even when
+ # not in strict build plans.
+ #
+ BST_STRICT_REBUILD = True
+
+ # Script artifacts must never have indirect dependencies,
+ # so runtime dependencies are forbidden.
+ BST_FORBID_RDEPENDS = True
+
+ # This element ignores sources, so we should forbid them from being
+ # added, to reduce the potential for confusion
+ BST_FORBID_SOURCES = True
+
+ def set_work_dir(self, work_dir=None):
+ """Sets the working dir
+
+ The working dir (a.k.a. cwd) is the directory which commands will be
+ called from.
+
+ Args:
+ work_dir (str): The working directory. If called without this argument
+ set, it'll default to the value of the variable ``cwd``.
+ """
+ if work_dir is None:
+ self.__cwd = self.get_variable("cwd") or "/"
+ else:
+ self.__cwd = work_dir
+
+ def set_install_root(self, install_root=None):
+ """Sets the install root
+
+ The install root is the directory which output will be collected from
+ once the commands have been run.
+
+ Args:
+ install_root(str): The install root. If called without this argument
+ set, it'll default to the value of the variable ``install-root``.
+ """
+ if install_root is None:
+ self.__install_root = self.get_variable("install-root") or "/"
+ else:
+ self.__install_root = install_root
+
+ def set_root_read_only(self, root_read_only):
+ """Sets root read-only
+
+ When commands are run, if root_read_only is true, then the root of the
+ filesystem will be protected. This is strongly recommended whenever
+ possible.
+
+ If this variable is not set, the default permission is read-write.
+
+ Args:
+ root_read_only (bool): Whether to mark the root filesystem as
+ read-only.
+ """
+ self.__root_read_only = root_read_only
+
+ def layout_add(self, element, destination):
+ """Adds an element-destination pair to the layout.
+
+ Layout is a way of defining how dependencies should be added to the
+ staging area for running commands.
+
+ Args:
+ element (str): The name of the element to stage, or None. This may be any
+ element found in the dependencies, whether it is a direct
+ or indirect dependency.
+ destination (str): The path inside the staging area for where to
+ stage this element. If it is not "/", then integration
+ commands will not be run.
+
+ If this function is never called, then the default behavior is to just
+ stage the Scope.BUILD dependencies of the element in question at the
+ sandbox root. Otherwise, the Scope.RUN dependencies of each specified
+ element will be staged in their specified destination directories.
+
+ .. note::
+
+ The order of directories in the layout is significant as they
+ will be mounted into the sandbox. It is an error to specify a parent
+ directory which will shadow a directory already present in the layout.
+
+ .. note::
+
+ In the case that no element is specified, a read-write directory will
+ be made available at the specified location.
+ """
+ #
+ # Even if this is an empty list by default, make sure that its
+ # instance data instead of appending stuff directly onto class data.
+ #
+ if not self.__layout:
+ self.__layout = []
+ self.__layout.append({"element": element,
+ "destination": destination})
+
+ def add_commands(self, group_name, command_list):
+ """Adds a list of commands under the group-name.
+
+ .. note::
+
+ Command groups will be run in the order they were added.
+
+ .. note::
+
+ This does not perform substitutions automatically. They must
+ be performed beforehand (see
+ :func:`~buildstream.element.Element.node_subst_list`)
+
+ Args:
+ group_name (str): The name of the group of commands.
+ command_list (list): The list of commands to be run.
+ """
+ if not self.__commands:
+ self.__commands = OrderedDict()
+ self.__commands[group_name] = command_list
+
+ def __validate_layout(self):
+ if self.__layout:
+ # Cannot proceeed if layout is used, but none are for "/"
+ root_defined = any([(entry['destination'] == '/') for entry in self.__layout])
+ if not root_defined:
+ raise ElementError("{}: Using layout, but none are staged as '/'"
+ .format(self))
+
+ # Cannot proceed if layout specifies an element that isn't part
+ # of the dependencies.
+ for item in self.__layout:
+ if item['element']:
+ if not self.search(Scope.BUILD, item['element']):
+ raise ElementError("{}: '{}' in layout not found in dependencies"
+ .format(self, item['element']))
+
+ def preflight(self):
+ # The layout, if set, must make sense.
+ self.__validate_layout()
+
+ def get_unique_key(self):
+ return {
+ 'commands': self.__commands,
+ 'cwd': self.__cwd,
+ 'install-root': self.__install_root,
+ 'layout': self.__layout,
+ 'root-read-only': self.__root_read_only
+ }
+
+ def configure_sandbox(self, sandbox):
+
+ # Setup the environment and work directory
+ sandbox.set_work_directory(self.__cwd)
+
+ # Setup environment
+ sandbox.set_environment(self.get_environment())
+
+ # Tell the sandbox to mount the install root
+ directories = {self.__install_root: False}
+
+ # Mark the artifact directories in the layout
+ for item in self.__layout:
+ destination = item['destination']
+ was_artifact = directories.get(destination, False)
+ directories[destination] = item['element'] or was_artifact
+
+ for directory, artifact in directories.items():
+ # Root does not need to be marked as it is always mounted
+ # with artifact (unless explicitly marked non-artifact)
+ if directory != '/':
+ sandbox.mark_directory(directory, artifact=artifact)
+
+ def stage(self, sandbox):
+
+ # Stage the elements, and run integration commands where appropriate.
+ if not self.__layout:
+ # if no layout set, stage all dependencies into /
+ for build_dep in self.dependencies(Scope.BUILD, recurse=False):
+ with self.timed_activity("Staging {} at /"
+ .format(build_dep.name), silent_nested=True):
+ build_dep.stage_dependency_artifacts(sandbox, Scope.RUN, path="/")
+
+ with sandbox.batch(SandboxFlags.NONE):
+ for build_dep in self.dependencies(Scope.BUILD, recurse=False):
+ with self.timed_activity("Integrating {}".format(build_dep.name), silent_nested=True):
+ for dep in build_dep.dependencies(Scope.RUN):
+ dep.integrate(sandbox)
+ else:
+ # If layout, follow its rules.
+ for item in self.__layout:
+
+ # Skip layout members which dont stage an element
+ if not item['element']:
+ continue
+
+ element = self.search(Scope.BUILD, item['element'])
+ if item['destination'] == '/':
+ with self.timed_activity("Staging {} at /".format(element.name),
+ silent_nested=True):
+ element.stage_dependency_artifacts(sandbox, Scope.RUN)
+ else:
+ with self.timed_activity("Staging {} at {}"
+ .format(element.name, item['destination']),
+ silent_nested=True):
+ virtual_dstdir = sandbox.get_virtual_directory()
+ virtual_dstdir.descend(*item['destination'].lstrip(os.sep).split(os.sep), create=True)
+ element.stage_dependency_artifacts(sandbox, Scope.RUN, path=item['destination'])
+
+ with sandbox.batch(SandboxFlags.NONE):
+ for item in self.__layout:
+
+ # Skip layout members which dont stage an element
+ if not item['element']:
+ continue
+
+ element = self.search(Scope.BUILD, item['element'])
+
+ # Integration commands can only be run for elements staged to /
+ if item['destination'] == '/':
+ with self.timed_activity("Integrating {}".format(element.name),
+ silent_nested=True):
+ for dep in element.dependencies(Scope.RUN):
+ dep.integrate(sandbox)
+
+ install_root_path_components = self.__install_root.lstrip(os.sep).split(os.sep)
+ sandbox.get_virtual_directory().descend(*install_root_path_components, create=True)
+
+ def assemble(self, sandbox):
+
+ flags = SandboxFlags.NONE
+ if self.__root_read_only:
+ flags |= SandboxFlags.ROOT_READ_ONLY
+
+ with sandbox.batch(flags, collect=self.__install_root):
+ for groupname, commands in self.__commands.items():
+ with sandbox.batch(flags, label="Running '{}'".format(groupname)):
+ for cmd in commands:
+ # Note the -e switch to 'sh' means to exit with an error
+ # if any untested command fails.
+ sandbox.run(['sh', '-c', '-e', cmd + '\n'],
+ flags,
+ label=cmd)
+
+ # Return where the result can be collected from
+ return self.__install_root
+
+
+def setup():
+ return ScriptElement
diff --git a/src/buildstream/source.py b/src/buildstream/source.py
new file mode 100644
index 000000000..fe94a15d7
--- /dev/null
+++ b/src/buildstream/source.py
@@ -0,0 +1,1274 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+"""
+Source - Base source class
+==========================
+
+.. _core_source_builtins:
+
+Built-in functionality
+----------------------
+
+The Source base class provides built in functionality that may be overridden
+by individual plugins.
+
+* Directory
+
+ The ``directory`` variable can be set for all sources of a type in project.conf
+ or per source within a element.
+
+ This sets the location within the build root that the content of the source
+ will be loaded in to. If the location does not exist, it will be created.
+
+.. _core_source_abstract_methods:
+
+Abstract Methods
+----------------
+For loading and configuration purposes, Sources must implement the
+:ref:`Plugin base class abstract methods <core_plugin_abstract_methods>`.
+
+.. attention::
+
+ In order to ensure that all configuration data is processed at
+ load time, it is important that all URLs have been processed during
+ :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>`.
+
+ Source implementations *must* either call
+ :func:`Source.translate_url() <buildstream.source.Source.translate_url>` or
+ :func:`Source.mark_download_url() <buildstream.source.Source.mark_download_url>`
+ for every URL that has been specified in the configuration during
+ :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>`
+
+Sources expose the following abstract methods. Unless explicitly mentioned,
+these methods are mandatory to implement.
+
+* :func:`Source.get_consistency() <buildstream.source.Source.get_consistency>`
+
+ Report the sources consistency state.
+
+* :func:`Source.load_ref() <buildstream.source.Source.load_ref>`
+
+ Load the ref from a specific YAML node
+
+* :func:`Source.get_ref() <buildstream.source.Source.get_ref>`
+
+ Fetch the source ref
+
+* :func:`Source.set_ref() <buildstream.source.Source.set_ref>`
+
+ Set a new ref explicitly
+
+* :func:`Source.track() <buildstream.source.Source.track>`
+
+ Automatically derive a new ref from a symbolic tracking branch
+
+* :func:`Source.fetch() <buildstream.source.Source.fetch>`
+
+ Fetch the actual payload for the currently set ref
+
+* :func:`Source.stage() <buildstream.source.Source.stage>`
+
+ Stage the sources for a given ref at a specified location
+
+* :func:`Source.init_workspace() <buildstream.source.Source.init_workspace>`
+
+ Stage sources in a local directory for use as a workspace.
+
+ **Optional**: If left unimplemented, this will default to calling
+ :func:`Source.stage() <buildstream.source.Source.stage>`
+
+* :func:`Source.get_source_fetchers() <buildstream.source.Source.get_source_fetchers>`
+
+ Get the objects that are used for fetching.
+
+ **Optional**: This only needs to be implemented for sources that need to
+ download from multiple URLs while fetching (e.g. a git repo and its
+ submodules). For details on how to define a SourceFetcher, see
+ :ref:`SourceFetcher <core_source_fetcher>`.
+
+* :func:`Source.validate_cache() <buildstream.source.Source.validate_cache>`
+
+ Perform any validations which require the sources to be cached.
+
+ **Optional**: This is completely optional and will do nothing if left unimplemented.
+
+Accessing previous sources
+--------------------------
+*Since: 1.4*
+
+In the general case, all sources are fetched and tracked independently of one
+another. In situations where a source needs to access previous source(s) in
+order to perform its own track and/or fetch, following attributes can be set to
+request access to previous sources:
+
+* :attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_TRACK`
+
+ Indicate that access to previous sources is required during track
+
+* :attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_FETCH`
+
+ Indicate that access to previous sources is required during fetch
+
+The intended use of such plugins is to fetch external dependencies of other
+sources, typically using some kind of package manager, such that all the
+dependencies of the original source(s) are available at build time.
+
+When implementing such a plugin, implementors should adhere to the following
+guidelines:
+
+* Implementations must be able to store the obtained artifacts in a
+ subdirectory.
+
+* Implementations must be able to deterministically generate a unique ref, such
+ that two refs are different if and only if they produce different outputs.
+
+* Implementations must not introduce host contamination.
+
+
+.. _core_source_fetcher:
+
+SourceFetcher - Object for fetching individual URLs
+===================================================
+
+
+Abstract Methods
+----------------
+SourceFetchers expose the following abstract methods. Unless explicitly
+mentioned, these methods are mandatory to implement.
+
+* :func:`SourceFetcher.fetch() <buildstream.source.SourceFetcher.fetch>`
+
+ Fetches the URL associated with this SourceFetcher, optionally taking an
+ alias override.
+
+Class Reference
+---------------
+"""
+
+import os
+from collections.abc import Mapping
+from contextlib import contextmanager
+
+from . import _yaml, utils
+from .plugin import Plugin
+from .types import Consistency
+from ._exceptions import BstError, ImplError, ErrorDomain
+from ._loader.metasource import MetaSource
+from ._projectrefs import ProjectRefStorage
+from ._cachekey import generate_key
+
+
+class SourceError(BstError):
+ """This exception should be raised by :class:`.Source` implementations
+ to report errors to the user.
+
+ Args:
+ message (str): The breif error description to report to the user
+ detail (str): A possibly multiline, more detailed error message
+ reason (str): An optional machine readable reason string, used for test cases
+ temporary (bool): An indicator to whether the error may occur if the operation was run again. (*Since: 1.2*)
+ """
+ def __init__(self, message, *, detail=None, reason=None, temporary=False):
+ super().__init__(message, detail=detail, domain=ErrorDomain.SOURCE, reason=reason, temporary=temporary)
+
+
+class SourceFetcher():
+ """SourceFetcher()
+
+ This interface exists so that a source that downloads from multiple
+ places (e.g. a git source with submodules) has a consistent interface for
+ fetching and substituting aliases.
+
+ *Since: 1.2*
+
+ .. attention::
+
+ When implementing a SourceFetcher, remember to call
+ :func:`Source.mark_download_url() <buildstream.source.Source.mark_download_url>`
+ for every URL found in the configuration data at
+ :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>` time.
+ """
+ def __init__(self):
+ self.__alias = None
+
+ #############################################################
+ # Abstract Methods #
+ #############################################################
+ def fetch(self, alias_override=None, **kwargs):
+ """Fetch remote sources and mirror them locally, ensuring at least
+ that the specific reference is cached locally.
+
+ Args:
+ alias_override (str): The alias to use instead of the default one
+ defined by the :ref:`aliases <project_source_aliases>` field
+ in the project's config.
+
+ Raises:
+ :class:`.SourceError`
+
+ Implementors should raise :class:`.SourceError` if the there is some
+ network error or if the source reference could not be matched.
+ """
+ raise ImplError("SourceFetcher '{}' does not implement fetch()".format(type(self)))
+
+ #############################################################
+ # Public Methods #
+ #############################################################
+ def mark_download_url(self, url):
+ """Identifies the URL that this SourceFetcher uses to download
+
+ This must be called during the fetcher's initialization
+
+ Args:
+ url (str): The url used to download.
+ """
+ self.__alias = _extract_alias(url)
+
+ #############################################################
+ # Private Methods used in BuildStream #
+ #############################################################
+
+ # Returns the alias used by this fetcher
+ def _get_alias(self):
+ return self.__alias
+
+
+class Source(Plugin):
+ """Source()
+
+ Base Source class.
+
+ All Sources derive from this class, this interface defines how
+ the core will be interacting with Sources.
+ """
+ __defaults = {} # The defaults from the project
+ __defaults_set = False # Flag, in case there are not defaults at all
+
+ BST_REQUIRES_PREVIOUS_SOURCES_TRACK = False
+ """Whether access to previous sources is required during track
+
+ When set to True:
+ * all sources listed before this source in the given element will be
+ fetched before this source is tracked
+ * Source.track() will be called with an additional keyword argument
+ `previous_sources_dir` where previous sources will be staged
+ * this source can not be the first source for an element
+
+ *Since: 1.4*
+ """
+
+ BST_REQUIRES_PREVIOUS_SOURCES_FETCH = False
+ """Whether access to previous sources is required during fetch
+
+ When set to True:
+ * all sources listed before this source in the given element will be
+ fetched before this source is fetched
+ * Source.fetch() will be called with an additional keyword argument
+ `previous_sources_dir` where previous sources will be staged
+ * this source can not be the first source for an element
+
+ *Since: 1.4*
+ """
+
+ BST_REQUIRES_PREVIOUS_SOURCES_STAGE = False
+ """Whether access to previous sources is required during cache
+
+ When set to True:
+ * All sources listed before current source in the given element will be
+ staged with the source when it's cached.
+ * This source can not be the first source for an element.
+
+ *Since: 1.4*
+ """
+
+ def __init__(self, context, project, meta, *, alias_override=None, unique_id=None):
+ provenance = _yaml.node_get_provenance(meta.config)
+ super().__init__("{}-{}".format(meta.element_name, meta.element_index),
+ context, project, provenance, "source", unique_id=unique_id)
+
+ self.__source_cache = context.sourcecache
+
+ self.__element_name = meta.element_name # The name of the element owning this source
+ self.__element_index = meta.element_index # The index of the source in the owning element's source list
+ self.__element_kind = meta.element_kind # The kind of the element owning this source
+ self.__directory = meta.directory # Staging relative directory
+ self.__consistency = Consistency.INCONSISTENT # Cached consistency state
+
+ self.__key = None # Cache key for source
+
+ # The alias_override is only set on a re-instantiated Source
+ self.__alias_override = alias_override # Tuple of alias and its override to use instead
+ self.__expected_alias = None # The primary alias
+ self.__marked_urls = set() # Set of marked download URLs
+
+ # Collect the composited element configuration and
+ # ask the element to configure itself.
+ self.__init_defaults(project, meta)
+ self.__config = self.__extract_config(meta)
+ self.__first_pass = meta.first_pass
+
+ self._configure(self.__config)
+
+ COMMON_CONFIG_KEYS = ['kind', 'directory']
+ """Common source config keys
+
+ Source config keys that must not be accessed in configure(), and
+ should be checked for using node_validate().
+ """
+
+ #############################################################
+ # Abstract Methods #
+ #############################################################
+ def get_consistency(self):
+ """Report whether the source has a resolved reference
+
+ Returns:
+ (:class:`.Consistency`): The source consistency
+ """
+ raise ImplError("Source plugin '{}' does not implement get_consistency()".format(self.get_kind()))
+
+ def load_ref(self, node):
+ """Loads the *ref* for this Source from the specified *node*.
+
+ Args:
+ node (dict): The YAML node to load the ref from
+
+ .. note::
+
+ The *ref* for the Source is expected to be read at
+ :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>` time,
+ this will only be used for loading refs from alternative locations
+ than in the `element.bst` file where the given Source object has
+ been declared.
+
+ *Since: 1.2*
+ """
+ raise ImplError("Source plugin '{}' does not implement load_ref()".format(self.get_kind()))
+
+ def get_ref(self):
+ """Fetch the internal ref, however it is represented
+
+ Returns:
+ (simple object): The internal source reference, or ``None``
+
+ .. note::
+
+ The reference is the user provided (or track resolved) value
+ the plugin uses to represent a specific input, like a commit
+ in a VCS or a tarball's checksum. Usually the reference is a string,
+ but the plugin may choose to represent it with a tuple or such.
+
+ Implementations *must* return a ``None`` value in the case that
+ the ref was not loaded. E.g. a ``(None, None)`` tuple is not acceptable.
+ """
+ raise ImplError("Source plugin '{}' does not implement get_ref()".format(self.get_kind()))
+
+ def set_ref(self, ref, node):
+ """Applies the internal ref, however it is represented
+
+ Args:
+ ref (simple object): The internal source reference to set, or ``None``
+ node (dict): The same dictionary which was previously passed
+ to :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>`
+
+ See :func:`Source.get_ref() <buildstream.source.Source.get_ref>`
+ for a discussion on the *ref* parameter.
+
+ .. note::
+
+ Implementors must support the special ``None`` value here to
+ allow clearing any existing ref.
+ """
+ raise ImplError("Source plugin '{}' does not implement set_ref()".format(self.get_kind()))
+
+ def track(self, **kwargs):
+ """Resolve a new ref from the plugin's track option
+
+ Args:
+ previous_sources_dir (str): directory where previous sources are staged.
+ Note that this keyword argument is available only when
+ :attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_TRACK`
+ is set to True.
+
+ Returns:
+ (simple object): A new internal source reference, or None
+
+ If the backend in question supports resolving references from
+ a symbolic tracking branch or tag, then this should be implemented
+ to perform this task on behalf of :ref:`bst source track <invoking_source_track>`
+ commands.
+
+ This usually requires fetching new content from a remote origin
+ to see if a new ref has appeared for your branch or tag. If the
+ backend store allows one to query for a new ref from a symbolic
+ tracking data without downloading then that is desirable.
+
+ See :func:`Source.get_ref() <buildstream.source.Source.get_ref>`
+ for a discussion on the *ref* parameter.
+ """
+ # Allow a non implementation
+ return None
+
+ def fetch(self, **kwargs):
+ """Fetch remote sources and mirror them locally, ensuring at least
+ that the specific reference is cached locally.
+
+ Args:
+ previous_sources_dir (str): directory where previous sources are staged.
+ Note that this keyword argument is available only when
+ :attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_FETCH`
+ is set to True.
+
+ Raises:
+ :class:`.SourceError`
+
+ Implementors should raise :class:`.SourceError` if the there is some
+ network error or if the source reference could not be matched.
+ """
+ raise ImplError("Source plugin '{}' does not implement fetch()".format(self.get_kind()))
+
+ def stage(self, directory):
+ """Stage the sources to a directory
+
+ Args:
+ directory (str): Path to stage the source
+
+ Raises:
+ :class:`.SourceError`
+
+ Implementors should assume that *directory* already exists
+ and stage already cached sources to the passed directory.
+
+ Implementors should raise :class:`.SourceError` when encountering
+ some system error.
+ """
+ raise ImplError("Source plugin '{}' does not implement stage()".format(self.get_kind()))
+
+ def init_workspace(self, directory):
+ """Initialises a new workspace
+
+ Args:
+ directory (str): Path of the workspace to init
+
+ Raises:
+ :class:`.SourceError`
+
+ Default implementation is to call
+ :func:`Source.stage() <buildstream.source.Source.stage>`.
+
+ Implementors overriding this method should assume that *directory*
+ already exists.
+
+ Implementors should raise :class:`.SourceError` when encountering
+ some system error.
+ """
+ self.stage(directory)
+
+ def get_source_fetchers(self):
+ """Get the objects that are used for fetching
+
+ If this source doesn't download from multiple URLs,
+ returning None and falling back on the default behaviour
+ is recommended.
+
+ Returns:
+ iterable: The Source's SourceFetchers, if any.
+
+ .. note::
+
+ Implementors can implement this as a generator.
+
+ The :func:`SourceFetcher.fetch() <buildstream.source.SourceFetcher.fetch>`
+ method will be called on the returned fetchers one by one,
+ before consuming the next fetcher in the list.
+
+ *Since: 1.2*
+ """
+ return []
+
+ def validate_cache(self):
+ """Implement any validations once we know the sources are cached
+
+ This is guaranteed to be called only once for a given session
+ once the sources are known to be
+ :attr:`Consistency.CACHED <buildstream.types.Consistency.CACHED>`,
+ if source tracking is enabled in the session for this source,
+ then this will only be called if the sources become cached after
+ tracking completes.
+
+ *Since: 1.4*
+ """
+
+ #############################################################
+ # Public Methods #
+ #############################################################
+ def get_mirror_directory(self):
+ """Fetches the directory where this source should store things
+
+ Returns:
+ (str): The directory belonging to this source
+ """
+
+ # Create the directory if it doesnt exist
+ context = self._get_context()
+ directory = os.path.join(context.sourcedir, self.get_kind())
+ os.makedirs(directory, exist_ok=True)
+ return directory
+
+ def translate_url(self, url, *, alias_override=None, primary=True):
+ """Translates the given url which may be specified with an alias
+ into a fully qualified url.
+
+ Args:
+ url (str): A URL, which may be using an alias
+ alias_override (str): Optionally, an URI to override the alias with. (*Since: 1.2*)
+ primary (bool): Whether this is the primary URL for the source. (*Since: 1.2*)
+
+ Returns:
+ str: The fully qualified URL, with aliases resolved
+ .. note::
+
+ This must be called for every URL in the configuration during
+ :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>` if
+ :func:`Source.mark_download_url() <buildstream.source.Source.mark_download_url>`
+ is not called.
+ """
+ # Ensure that the download URL is also marked
+ self.mark_download_url(url, primary=primary)
+
+ # Alias overriding can happen explicitly (by command-line) or
+ # implicitly (the Source being constructed with an __alias_override).
+ if alias_override or self.__alias_override:
+ url_alias, url_body = url.split(utils._ALIAS_SEPARATOR, 1)
+ if url_alias:
+ if alias_override:
+ url = alias_override + url_body
+ else:
+ # Implicit alias overrides may only be done for one
+ # specific alias, so that sources that fetch from multiple
+ # URLs and use different aliases default to only overriding
+ # one alias, rather than getting confused.
+ override_alias = self.__alias_override[0]
+ override_url = self.__alias_override[1]
+ if url_alias == override_alias:
+ url = override_url + url_body
+ return url
+ else:
+ project = self._get_project()
+ return project.translate_url(url, first_pass=self.__first_pass)
+
+ def mark_download_url(self, url, *, primary=True):
+ """Identifies the URL that this Source uses to download
+
+ Args:
+ url (str): The URL used to download
+ primary (bool): Whether this is the primary URL for the source
+
+ .. note::
+
+ This must be called for every URL in the configuration during
+ :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>` if
+ :func:`Source.translate_url() <buildstream.source.Source.translate_url>`
+ is not called.
+
+ *Since: 1.2*
+ """
+ # Only mark the Source level aliases on the main instance, not in
+ # a reinstantiated instance in mirroring.
+ if not self.__alias_override:
+ if primary:
+ expected_alias = _extract_alias(url)
+
+ assert (self.__expected_alias is None or
+ self.__expected_alias == expected_alias), \
+ "Primary URL marked twice with different URLs"
+
+ self.__expected_alias = expected_alias
+
+ # Enforce proper behaviour of plugins by ensuring that all
+ # aliased URLs have been marked at Plugin.configure() time.
+ #
+ if self._get_configuring():
+ # Record marked urls while configuring
+ #
+ self.__marked_urls.add(url)
+ else:
+ # If an unknown aliased URL is seen after configuring,
+ # this is an error.
+ #
+ # It is still possible that a URL that was not mentioned
+ # in the element configuration can be marked, this is
+ # the case for git submodules which might be automatically
+ # discovered.
+ #
+ assert (url in self.__marked_urls or not _extract_alias(url)), \
+ "URL was not seen at configure time: {}".format(url)
+
+ def get_project_directory(self):
+ """Fetch the project base directory
+
+ This is useful for sources which need to load resources
+ stored somewhere inside the project.
+
+ Returns:
+ str: The project base directory
+ """
+ project = self._get_project()
+ return project.directory
+
+ @contextmanager
+ def tempdir(self):
+ """Context manager for working in a temporary directory
+
+ Yields:
+ (str): A path to a temporary directory
+
+ This should be used by source plugins directly instead of the tempfile
+ module. This one will automatically cleanup in case of termination by
+ catching the signal before os._exit(). It will also use the 'mirror
+ directory' as expected for a source.
+ """
+ mirrordir = self.get_mirror_directory()
+ with utils._tempdir(dir=mirrordir) as tempdir:
+ yield tempdir
+
+ #############################################################
+ # Private Abstract Methods used in BuildStream #
+ #############################################################
+
+ # Returns the local path to the source
+ #
+ # If the source is locally available, this method returns the absolute
+ # path. Otherwise, the return value is None.
+ #
+ # This is an optimization for local sources and optional to implement.
+ #
+ # Returns:
+ # (str): The local absolute path, or None
+ #
+ def _get_local_path(self):
+ return None
+
+ #############################################################
+ # Private Methods used in BuildStream #
+ #############################################################
+
+ # Wrapper around preflight() method
+ #
+ def _preflight(self):
+ try:
+ self.preflight()
+ except BstError as e:
+ # Prepend provenance to the error
+ raise SourceError("{}: {}".format(self, e), reason=e.reason) from e
+
+ # Update cached consistency for a source
+ #
+ # This must be called whenever the state of a source may have changed.
+ #
+ def _update_state(self):
+
+ if self.__consistency < Consistency.CACHED:
+
+ # Source consistency interrogations are silent.
+ context = self._get_context()
+ with context.silence():
+ self.__consistency = self.get_consistency() # pylint: disable=assignment-from-no-return
+
+ # Give the Source an opportunity to validate the cached
+ # sources as soon as the Source becomes Consistency.CACHED.
+ if self.__consistency == Consistency.CACHED:
+ self.validate_cache()
+
+ # Return cached consistency
+ #
+ def _get_consistency(self):
+ return self.__consistency
+
+ # Wrapper function around plugin provided fetch method
+ #
+ # Args:
+ # previous_sources (list): List of Sources listed prior to this source
+ # fetch_original (bool): whether to fetch full source, or use local CAS
+ #
+ def _fetch(self, previous_sources):
+
+ if self.BST_REQUIRES_PREVIOUS_SOURCES_FETCH:
+ self.__ensure_previous_sources(previous_sources)
+ with self.tempdir() as staging_directory:
+ for src in previous_sources:
+ src._stage(staging_directory)
+ self.__do_fetch(previous_sources_dir=self.__ensure_directory(staging_directory))
+ else:
+ self.__do_fetch()
+
+ def _cache(self, previous_sources):
+ # stage the source into the source cache
+ self.__source_cache.commit(self, previous_sources)
+
+ # Wrapper for stage() api which gives the source
+ # plugin a fully constructed path considering the
+ # 'directory' option
+ #
+ def _stage(self, directory):
+ staging_directory = self.__ensure_directory(directory)
+
+ self.stage(staging_directory)
+
+ # Wrapper for init_workspace()
+ def _init_workspace(self, directory):
+ directory = self.__ensure_directory(directory)
+
+ self.init_workspace(directory)
+
+ # _get_unique_key():
+ #
+ # Wrapper for get_unique_key() api
+ #
+ # Args:
+ # include_source (bool): Whether to include the delegated source key
+ #
+ def _get_unique_key(self, include_source):
+ key = {}
+
+ key['directory'] = self.__directory
+ if include_source:
+ key['unique'] = self.get_unique_key() # pylint: disable=assignment-from-no-return
+
+ return key
+
+ # _project_refs():
+ #
+ # Gets the appropriate ProjectRefs object for this source,
+ # which depends on whether the owning element is a junction
+ #
+ # Args:
+ # project (Project): The project to check
+ #
+ def _project_refs(self, project):
+ element_kind = self.__element_kind
+ if element_kind == 'junction':
+ return project.junction_refs
+ return project.refs
+
+ # _load_ref():
+ #
+ # Loads the ref for the said source.
+ #
+ # Raises:
+ # (SourceError): If the source does not implement load_ref()
+ #
+ # Returns:
+ # (ref): A redundant ref specified inline for a project.refs using project
+ #
+ # This is partly a wrapper around `Source.load_ref()`, it will decide
+ # where to load the ref from depending on which project the source belongs
+ # to and whether that project uses a project.refs file.
+ #
+ # Note the return value is used to construct a summarized warning in the
+ # case that the toplevel project uses project.refs and also lists refs
+ # which will be ignored.
+ #
+ def _load_ref(self):
+ context = self._get_context()
+ project = self._get_project()
+ toplevel = context.get_toplevel_project()
+ redundant_ref = None
+
+ element_name = self.__element_name
+ element_idx = self.__element_index
+
+ def do_load_ref(node):
+ try:
+ self.load_ref(ref_node)
+ except ImplError as e:
+ raise SourceError("{}: Storing refs in project.refs is not supported by '{}' sources"
+ .format(self, self.get_kind()),
+ reason="unsupported-load-ref") from e
+
+ # If the main project overrides the ref, use the override
+ if project is not toplevel and toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS:
+ refs = self._project_refs(toplevel)
+ ref_node = refs.lookup_ref(project.name, element_name, element_idx)
+ if ref_node is not None:
+ do_load_ref(ref_node)
+
+ # If the project itself uses project.refs, clear the ref which
+ # was already loaded via Source.configure(), as this would
+ # violate the rule of refs being either in project.refs or in
+ # the elements themselves.
+ #
+ elif project.ref_storage == ProjectRefStorage.PROJECT_REFS:
+
+ # First warn if there is a ref already loaded, and reset it
+ redundant_ref = self.get_ref() # pylint: disable=assignment-from-no-return
+ if redundant_ref is not None:
+ self.set_ref(None, {})
+
+ # Try to load the ref
+ refs = self._project_refs(project)
+ ref_node = refs.lookup_ref(project.name, element_name, element_idx)
+ if ref_node is not None:
+ do_load_ref(ref_node)
+
+ return redundant_ref
+
+ # _set_ref()
+ #
+ # Persists the ref for this source. This will decide where to save the
+ # ref, or refuse to persist it, depending on active ref-storage project
+ # settings.
+ #
+ # Args:
+ # new_ref (smth): The new reference to save
+ # save (bool): Whether to write the new reference to file or not
+ #
+ # Returns:
+ # (bool): Whether the ref has changed
+ #
+ # Raises:
+ # (SourceError): In the case we encounter errors saving a file to disk
+ #
+ def _set_ref(self, new_ref, *, save):
+
+ context = self._get_context()
+ project = self._get_project()
+ toplevel = context.get_toplevel_project()
+ toplevel_refs = self._project_refs(toplevel)
+ provenance = self._get_provenance()
+
+ element_name = self.__element_name
+ element_idx = self.__element_index
+
+ #
+ # Step 1 - Obtain the node
+ #
+ node = {}
+ if toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS:
+ node = toplevel_refs.lookup_ref(project.name, element_name, element_idx, write=True)
+
+ if project is toplevel and not node:
+ node = provenance.node
+
+ # Ensure the node is not from a junction
+ if not toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS and provenance.project is not toplevel:
+ if provenance.project is project:
+ self.warn("{}: Not persisting new reference in junctioned project".format(self))
+ elif provenance.project is None:
+ assert provenance.filename == ""
+ assert provenance.shortname == ""
+ raise SourceError("{}: Error saving source reference to synthetic node."
+ .format(self))
+ else:
+ raise SourceError("{}: Cannot track source in a fragment from a junction"
+ .format(provenance.shortname),
+ reason="tracking-junction-fragment")
+
+ #
+ # Step 2 - Set the ref in memory, and determine changed state
+ #
+ clean = _yaml.node_sanitize(node, dict_type=dict)
+ to_modify = _yaml.node_sanitize(node, dict_type=dict)
+
+ current_ref = self.get_ref() # pylint: disable=assignment-from-no-return
+
+ # Set the ref regardless of whether it changed, the
+ # TrackQueue() will want to update a specific node with
+ # the ref, regardless of whether the original has changed.
+ self.set_ref(new_ref, to_modify)
+
+ if current_ref == new_ref or not save:
+ # Note: We do not look for and propagate changes at this point
+ # which might result in desync depending if something changes about
+ # tracking in the future. For now, this is quite safe.
+ return False
+
+ actions = {}
+ for k, v in clean.items():
+ if k not in to_modify:
+ actions[k] = 'del'
+ else:
+ if v != to_modify[k]:
+ actions[k] = 'mod'
+ for k in to_modify.keys():
+ if k not in clean:
+ actions[k] = 'add'
+
+ def walk_container(container, path):
+ # For each step along path, synthesise if we need to.
+ # If we're synthesising missing list entries, we know we're
+ # doing this for project.refs so synthesise empty dicts for the
+ # intervening entries too
+ lpath = [step for step in path]
+ lpath.append("") # We know the last step will be a string key
+ for step, next_step in zip(lpath, lpath[1:]):
+ if type(step) is str: # pylint: disable=unidiomatic-typecheck
+ # handle dict container
+ if step not in container:
+ if type(next_step) is str: # pylint: disable=unidiomatic-typecheck
+ container[step] = {}
+ else:
+ container[step] = []
+ container = container[step]
+ else:
+ # handle list container
+ if len(container) <= step:
+ while len(container) <= step:
+ container.append({})
+ container = container[step]
+ return container
+
+ def process_value(action, container, path, key, new_value):
+ container = walk_container(container, path)
+ if action == 'del':
+ del container[key]
+ elif action == 'mod':
+ container[key] = new_value
+ elif action == 'add':
+ container[key] = new_value
+ else:
+ assert False, \
+ "BUG: Unknown action: {}".format(action)
+
+ roundtrip_cache = {}
+ for key, action in actions.items():
+ # Obtain the top level node and its file
+ if action == 'add':
+ provenance = _yaml.node_get_provenance(node)
+ else:
+ provenance = _yaml.node_get_provenance(node, key=key)
+
+ toplevel_node = provenance.toplevel
+
+ # Get the path to whatever changed
+ if action == 'add':
+ path = _yaml.node_find_target(toplevel_node, node)
+ else:
+ path = _yaml.node_find_target(toplevel_node, node, key=key)
+
+ roundtrip_file = roundtrip_cache.get(provenance.filename)
+ if not roundtrip_file:
+ roundtrip_file = roundtrip_cache[provenance.filename] = _yaml.roundtrip_load(
+ provenance.filename,
+ allow_missing=True
+ )
+
+ # Get the value of the round trip file that we need to change
+ process_value(action, roundtrip_file, path, key, to_modify.get(key))
+
+ #
+ # Step 3 - Apply the change in project data
+ #
+ for filename, data in roundtrip_cache.items():
+ # This is our roundtrip dump from the track
+ try:
+ _yaml.roundtrip_dump(data, filename)
+ except OSError as e:
+ raise SourceError("{}: Error saving source reference to '{}': {}"
+ .format(self, filename, e),
+ reason="save-ref-error") from e
+
+ return True
+
+ # Wrapper for track()
+ #
+ # Args:
+ # previous_sources (list): List of Sources listed prior to this source
+ #
+ def _track(self, previous_sources):
+ if self.BST_REQUIRES_PREVIOUS_SOURCES_TRACK:
+ self.__ensure_previous_sources(previous_sources)
+ with self.tempdir() as staging_directory:
+ for src in previous_sources:
+ src._stage(staging_directory)
+ new_ref = self.__do_track(previous_sources_dir=self.__ensure_directory(staging_directory))
+ else:
+ new_ref = self.__do_track()
+
+ current_ref = self.get_ref() # pylint: disable=assignment-from-no-return
+
+ if new_ref is None:
+ # No tracking, keep current ref
+ new_ref = current_ref
+
+ if current_ref != new_ref:
+ self.info("Found new revision: {}".format(new_ref))
+
+ # Save ref in local process for subsequent sources
+ self._set_ref(new_ref, save=False)
+
+ return new_ref
+
+ # _requires_previous_sources()
+ #
+ # If a plugin requires access to previous sources at track or fetch time,
+ # then it cannot be the first source of an elemenet.
+ #
+ # Returns:
+ # (bool): Whether this source requires access to previous sources
+ #
+ def _requires_previous_sources(self):
+ return self.BST_REQUIRES_PREVIOUS_SOURCES_TRACK or self.BST_REQUIRES_PREVIOUS_SOURCES_FETCH
+
+ # Returns the alias if it's defined in the project
+ def _get_alias(self):
+ alias = self.__expected_alias
+ project = self._get_project()
+ if project.get_alias_uri(alias, first_pass=self.__first_pass):
+ # The alias must already be defined in the project's aliases
+ # otherwise http://foo gets treated like it contains an alias
+ return alias
+ else:
+ return None
+
+ def _generate_key(self, previous_sources):
+ keys = [self._get_unique_key(True)]
+
+ if self.BST_REQUIRES_PREVIOUS_SOURCES_STAGE:
+ for previous_source in previous_sources:
+ keys.append(previous_source._get_unique_key(True))
+
+ self.__key = generate_key(keys)
+
+ @property
+ def _key(self):
+ return self.__key
+
+ # Gives a ref path that points to where sources are kept in the CAS
+ def _get_source_name(self):
+ # @ is used to prevent conflicts with project names
+ return "{}/{}/{}".format(
+ '@sources',
+ self.get_kind(),
+ self._key)
+
+ def _get_brief_display_key(self):
+ context = self._get_context()
+ key = self._key
+
+ length = min(len(key), context.log_key_length)
+ return key[:length]
+
+ #############################################################
+ # Local Private Methods #
+ #############################################################
+
+ # __clone_for_uri()
+ #
+ # Clone the source with an alternative URI setup for the alias
+ # which this source uses.
+ #
+ # This is used for iteration over source mirrors.
+ #
+ # Args:
+ # uri (str): The alternative URI for this source's alias
+ #
+ # Returns:
+ # (Source): A new clone of this Source, with the specified URI
+ # as the value of the alias this Source has marked as
+ # primary with either mark_download_url() or
+ # translate_url().
+ #
+ def __clone_for_uri(self, uri):
+ project = self._get_project()
+ context = self._get_context()
+ alias = self._get_alias()
+ source_kind = type(self)
+
+ # Rebuild a MetaSource from the current element
+ meta = MetaSource(
+ self.__element_name,
+ self.__element_index,
+ self.__element_kind,
+ self.get_kind(),
+ self.__config,
+ self.__directory,
+ )
+
+ meta.first_pass = self.__first_pass
+
+ clone = source_kind(context, project, meta,
+ alias_override=(alias, uri),
+ unique_id=self._unique_id)
+
+ # Do the necessary post instantiation routines here
+ #
+ clone._preflight()
+ clone._load_ref()
+ clone._update_state()
+
+ return clone
+
+ # Tries to call fetch for every mirror, stopping once it succeeds
+ def __do_fetch(self, **kwargs):
+ project = self._get_project()
+ context = self._get_context()
+
+ # Silence the STATUS messages which might happen as a result
+ # of checking the source fetchers.
+ with context.silence():
+ source_fetchers = self.get_source_fetchers()
+
+ # Use the source fetchers if they are provided
+ #
+ if source_fetchers:
+
+ # Use a contorted loop here, this is to allow us to
+ # silence the messages which can result from consuming
+ # the items of source_fetchers, if it happens to be a generator.
+ #
+ source_fetchers = iter(source_fetchers)
+
+ while True:
+
+ with context.silence():
+ try:
+ fetcher = next(source_fetchers)
+ except StopIteration:
+ # as per PEP479, we are not allowed to let StopIteration
+ # thrown from a context manager.
+ # Catching it here and breaking instead.
+ break
+
+ alias = fetcher._get_alias()
+ for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
+ try:
+ fetcher.fetch(uri)
+ # FIXME: Need to consider temporary vs. permanent failures,
+ # and how this works with retries.
+ except BstError as e:
+ last_error = e
+ continue
+
+ # No error, we're done with this fetcher
+ break
+
+ else:
+ # No break occurred, raise the last detected error
+ raise last_error
+
+ # Default codepath is to reinstantiate the Source
+ #
+ else:
+ alias = self._get_alias()
+ if self.__first_pass:
+ mirrors = project.first_pass_config.mirrors
+ else:
+ mirrors = project.config.mirrors
+ if not mirrors or not alias:
+ self.fetch(**kwargs)
+ return
+
+ for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
+ new_source = self.__clone_for_uri(uri)
+ try:
+ new_source.fetch(**kwargs)
+ # FIXME: Need to consider temporary vs. permanent failures,
+ # and how this works with retries.
+ except BstError as e:
+ last_error = e
+ continue
+
+ # No error, we're done here
+ return
+
+ # Re raise the last detected error
+ raise last_error
+
+ # Tries to call track for every mirror, stopping once it succeeds
+ def __do_track(self, **kwargs):
+ project = self._get_project()
+ alias = self._get_alias()
+ if self.__first_pass:
+ mirrors = project.first_pass_config.mirrors
+ else:
+ mirrors = project.config.mirrors
+ # If there are no mirrors, or no aliases to replace, there's nothing to do here.
+ if not mirrors or not alias:
+ return self.track(**kwargs)
+
+ # NOTE: We are assuming here that tracking only requires substituting the
+ # first alias used
+ for uri in reversed(project.get_alias_uris(alias, first_pass=self.__first_pass)):
+ new_source = self.__clone_for_uri(uri)
+ try:
+ ref = new_source.track(**kwargs) # pylint: disable=assignment-from-none
+ # FIXME: Need to consider temporary vs. permanent failures,
+ # and how this works with retries.
+ except BstError as e:
+ last_error = e
+ continue
+ return ref
+ raise last_error
+
+ # Ensures a fully constructed path and returns it
+ def __ensure_directory(self, directory):
+
+ if self.__directory is not None:
+ directory = os.path.join(directory, self.__directory.lstrip(os.sep))
+
+ try:
+ os.makedirs(directory, exist_ok=True)
+ except OSError as e:
+ raise SourceError("Failed to create staging directory: {}"
+ .format(e),
+ reason="ensure-stage-dir-fail") from e
+ return directory
+
+ @classmethod
+ def __init_defaults(cls, project, meta):
+ if not cls.__defaults_set:
+ if meta.first_pass:
+ sources = project.first_pass_config.source_overrides
+ else:
+ sources = project.source_overrides
+ cls.__defaults = _yaml.node_get(sources, Mapping, meta.kind, default_value={})
+ cls.__defaults_set = True
+
+ # This will resolve the final configuration to be handed
+ # off to source.configure()
+ #
+ @classmethod
+ def __extract_config(cls, meta):
+ config = _yaml.node_get(cls.__defaults, Mapping, 'config', default_value={})
+ config = _yaml.node_copy(config)
+
+ _yaml.composite(config, meta.config)
+ _yaml.node_final_assertions(config)
+
+ return config
+
+ # Ensures that previous sources have been tracked and fetched.
+ #
+ def __ensure_previous_sources(self, previous_sources):
+ for index, src in enumerate(previous_sources):
+ # BuildStream should track sources in the order they appear so
+ # previous sources should never be in an inconsistent state
+ assert src.get_consistency() != Consistency.INCONSISTENT
+
+ if src.get_consistency() == Consistency.RESOLVED:
+ src._fetch(previous_sources[0:index])
+
+
+def _extract_alias(url):
+ parts = url.split(utils._ALIAS_SEPARATOR, 1)
+ if len(parts) > 1 and not parts[0].lower() in utils._URI_SCHEMES:
+ return parts[0]
+ else:
+ return ""
diff --git a/src/buildstream/storage/__init__.py b/src/buildstream/storage/__init__.py
new file mode 100644
index 000000000..33424ac8d
--- /dev/null
+++ b/src/buildstream/storage/__init__.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jim MacArthur <jim.macarthur@codethink.co.uk>
+
+from ._filebaseddirectory import FileBasedDirectory
+from .directory import Directory
diff --git a/src/buildstream/storage/_casbaseddirectory.py b/src/buildstream/storage/_casbaseddirectory.py
new file mode 100644
index 000000000..2aff29b98
--- /dev/null
+++ b/src/buildstream/storage/_casbaseddirectory.py
@@ -0,0 +1,622 @@
+#
+# Copyright (C) 2018 Bloomberg LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jim MacArthur <jim.macarthur@codethink.co.uk>
+
+"""
+CasBasedDirectory
+=========
+
+Implementation of the Directory class which backs onto a Merkle-tree based content
+addressable storage system.
+
+See also: :ref:`sandboxing`.
+"""
+
+import os
+
+from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
+from .directory import Directory, VirtualDirectoryError, _FileType
+from ._filebaseddirectory import FileBasedDirectory
+from ..utils import FileListResult, _magic_timestamp
+
+
+class IndexEntry():
+ """ Directory entry used in CasBasedDirectory.index """
+ def __init__(self, name, entrytype, *, digest=None, target=None, is_executable=False,
+ buildstream_object=None, modified=False):
+ self.name = name
+ self.type = entrytype
+ self.digest = digest
+ self.target = target
+ self.is_executable = is_executable
+ self.buildstream_object = buildstream_object
+ self.modified = modified
+
+ def get_directory(self, parent):
+ if not self.buildstream_object:
+ self.buildstream_object = CasBasedDirectory(parent.cas_cache, digest=self.digest,
+ parent=parent, filename=self.name)
+ self.digest = None
+
+ return self.buildstream_object
+
+ def get_digest(self):
+ if self.digest:
+ return self.digest
+ else:
+ return self.buildstream_object._get_digest()
+
+
+class ResolutionException(VirtualDirectoryError):
+ """ Superclass of all exceptions that can be raised by
+ CasBasedDirectory._resolve. Should not be used outside this module. """
+
+
+class InfiniteSymlinkException(ResolutionException):
+ """ Raised when an infinite symlink loop is found. """
+
+
+class AbsoluteSymlinkException(ResolutionException):
+ """Raised if we try to follow an absolute symlink (i.e. one whose
+ target starts with the path separator) and we have disallowed
+ following such symlinks.
+ """
+
+
+class UnexpectedFileException(ResolutionException):
+ """Raised if we were found a file where a directory or symlink was
+ expected, for example we try to resolve a symlink pointing to
+ /a/b/c but /a/b is a file.
+ """
+ def __init__(self, message=""):
+ """Allow constructor with no arguments, since this can be raised in
+ places where there isn't sufficient information to write the
+ message.
+ """
+ super().__init__(message)
+
+
+# CasBasedDirectory intentionally doesn't call its superclass constuctor,
+# which is meant to be unimplemented.
+# pylint: disable=super-init-not-called
+
+class CasBasedDirectory(Directory):
+ """
+ CAS-based directories can have two names; one is a 'common name' which has no effect
+ on functionality, and the 'filename'. If a CasBasedDirectory has a parent, then 'filename'
+ must be the name of an entry in the parent directory's index which points to this object.
+ This is used to inform a parent directory that it must update the given hash for this
+ object when this object changes.
+
+ Typically a top-level CasBasedDirectory will have a common_name and no filename, and
+ subdirectories wil have a filename and no common_name. common_name can used to identify
+ CasBasedDirectory objects in a log file, since they have no unique position in a file
+ system.
+ """
+
+ # Two constants which define the separators used by the remote execution API.
+ _pb2_path_sep = "/"
+ _pb2_absolute_path_prefix = "/"
+
+ def __init__(self, cas_cache, *, digest=None, parent=None, common_name="untitled", filename=None):
+ self.filename = filename
+ self.common_name = common_name
+ self.cas_cache = cas_cache
+ self.__digest = digest
+ self.index = {}
+ self.parent = parent
+ if digest:
+ self._populate_index(digest)
+
+ def _populate_index(self, digest):
+ try:
+ pb2_directory = remote_execution_pb2.Directory()
+ with open(self.cas_cache.objpath(digest), 'rb') as f:
+ pb2_directory.ParseFromString(f.read())
+ except FileNotFoundError as e:
+ raise VirtualDirectoryError("Directory not found in local cache: {}".format(e)) from e
+
+ for entry in pb2_directory.directories:
+ self.index[entry.name] = IndexEntry(entry.name, _FileType.DIRECTORY,
+ digest=entry.digest)
+ for entry in pb2_directory.files:
+ self.index[entry.name] = IndexEntry(entry.name, _FileType.REGULAR_FILE,
+ digest=entry.digest,
+ is_executable=entry.is_executable)
+ for entry in pb2_directory.symlinks:
+ self.index[entry.name] = IndexEntry(entry.name, _FileType.SYMLINK,
+ target=entry.target)
+
+ def _find_self_in_parent(self):
+ assert self.parent is not None
+ parent = self.parent
+ for (k, v) in parent.index.items():
+ if v.buildstream_object == self:
+ return k
+ return None
+
+ def _add_directory(self, name):
+ assert name not in self.index
+
+ newdir = CasBasedDirectory(self.cas_cache, parent=self, filename=name)
+
+ self.index[name] = IndexEntry(name, _FileType.DIRECTORY, buildstream_object=newdir)
+
+ self.__invalidate_digest()
+
+ return newdir
+
+ def _add_file(self, basename, filename, modified=False):
+ entry = IndexEntry(filename, _FileType.REGULAR_FILE,
+ modified=modified or filename in self.index)
+ path = os.path.join(basename, filename)
+ entry.digest = self.cas_cache.add_object(path=path)
+ entry.is_executable = os.access(path, os.X_OK)
+ self.index[filename] = entry
+
+ self.__invalidate_digest()
+
+ def _copy_link_from_filesystem(self, basename, filename):
+ self._add_new_link_direct(filename, os.readlink(os.path.join(basename, filename)))
+
+ def _add_new_link_direct(self, name, target):
+ self.index[name] = IndexEntry(name, _FileType.SYMLINK, target=target, modified=name in self.index)
+
+ self.__invalidate_digest()
+
+ def delete_entry(self, name):
+ if name in self.index:
+ del self.index[name]
+
+ self.__invalidate_digest()
+
+ def descend(self, *paths, create=False):
+ """Descend one or more levels of directory hierarchy and return a new
+ Directory object for that directory.
+
+ Arguments:
+ * *paths (str): A list of strings which are all directory names.
+ * create (boolean): If this is true, the directories will be created if
+ they don't already exist.
+
+ Note: At the moment, creating a directory by descending does
+ not update this object in the CAS cache. However, performing
+ an import_files() into a subdirectory of any depth obtained by
+ descending from this object *will* cause this directory to be
+ updated and stored.
+
+ """
+
+ current_dir = self
+
+ for path in paths:
+ # Skip empty path segments
+ if not path:
+ continue
+
+ entry = current_dir.index.get(path)
+ if entry:
+ if entry.type == _FileType.DIRECTORY:
+ current_dir = entry.get_directory(current_dir)
+ else:
+ error = "Cannot descend into {}, which is a '{}' in the directory {}"
+ raise VirtualDirectoryError(error.format(path,
+ current_dir.index[path].type,
+ current_dir))
+ else:
+ if create:
+ current_dir = current_dir._add_directory(path)
+ else:
+ error = "'{}' not found in {}"
+ raise VirtualDirectoryError(error.format(path, str(current_dir)))
+
+ return current_dir
+
+ def _check_replacement(self, name, relative_pathname, fileListResult):
+ """ Checks whether 'name' exists, and if so, whether we can overwrite it.
+ If we can, add the name to 'overwritten_files' and delete the existing entry.
+ Returns 'True' if the import should go ahead.
+ fileListResult.overwritten and fileListResult.ignore are updated depending
+ on the result. """
+ existing_entry = self.index.get(name)
+ if existing_entry is None:
+ return True
+ elif existing_entry.type == _FileType.DIRECTORY:
+ # If 'name' maps to a DirectoryNode, then there must be an entry in index
+ # pointing to another Directory.
+ subdir = existing_entry.get_directory(self)
+ if subdir.is_empty():
+ self.delete_entry(name)
+ fileListResult.overwritten.append(relative_pathname)
+ return True
+ else:
+ # We can't overwrite a non-empty directory, so we just ignore it.
+ fileListResult.ignored.append(relative_pathname)
+ return False
+ else:
+ self.delete_entry(name)
+ fileListResult.overwritten.append(relative_pathname)
+ return True
+
+ def _import_files_from_directory(self, source_directory, filter_callback, *, path_prefix="", result):
+ """ Import files from a traditional directory. """
+
+ for direntry in os.scandir(source_directory):
+ # The destination filename, relative to the root where the import started
+ relative_pathname = os.path.join(path_prefix, direntry.name)
+
+ is_dir = direntry.is_dir(follow_symlinks=False)
+
+ if is_dir:
+ src_subdir = os.path.join(source_directory, direntry.name)
+
+ try:
+ create_subdir = direntry.name not in self.index
+ dest_subdir = self.descend(direntry.name, create=create_subdir)
+ except VirtualDirectoryError:
+ filetype = self.index[direntry.name].type
+ raise VirtualDirectoryError('Destination is a {}, not a directory: /{}'
+ .format(filetype, relative_pathname))
+
+ dest_subdir._import_files_from_directory(src_subdir, filter_callback,
+ path_prefix=relative_pathname, result=result)
+
+ if filter_callback and not filter_callback(relative_pathname):
+ if is_dir and create_subdir and dest_subdir.is_empty():
+ # Complete subdirectory has been filtered out, remove it
+ self.delete_entry(direntry.name)
+
+ # Entry filtered out, move to next
+ continue
+
+ if direntry.is_file(follow_symlinks=False):
+ if self._check_replacement(direntry.name, relative_pathname, result):
+ self._add_file(source_directory, direntry.name, modified=relative_pathname in result.overwritten)
+ result.files_written.append(relative_pathname)
+ elif direntry.is_symlink():
+ if self._check_replacement(direntry.name, relative_pathname, result):
+ self._copy_link_from_filesystem(source_directory, direntry.name)
+ result.files_written.append(relative_pathname)
+
+ def _partial_import_cas_into_cas(self, source_directory, filter_callback, *, path_prefix="", result):
+ """ Import files from a CAS-based directory. """
+
+ for name, entry in source_directory.index.items():
+ # The destination filename, relative to the root where the import started
+ relative_pathname = os.path.join(path_prefix, name)
+
+ is_dir = entry.type == _FileType.DIRECTORY
+
+ if is_dir:
+ create_subdir = name not in self.index
+
+ if create_subdir and not filter_callback:
+ # If subdirectory does not exist yet and there is no filter,
+ # we can import the whole source directory by digest instead
+ # of importing each directory entry individually.
+ subdir_digest = entry.get_digest()
+ dest_entry = IndexEntry(name, _FileType.DIRECTORY, digest=subdir_digest)
+ self.index[name] = dest_entry
+ self.__invalidate_digest()
+
+ # However, we still need to iterate over the directory entries
+ # to fill in `result.files_written`.
+
+ # Use source subdirectory object if it already exists,
+ # otherwise create object for destination subdirectory.
+ # This is based on the assumption that the destination
+ # subdirectory is more likely to be modified later on
+ # (e.g., by further import_files() calls).
+ if entry.buildstream_object:
+ subdir = entry.buildstream_object
+ else:
+ subdir = dest_entry.get_directory(self)
+
+ subdir.__add_files_to_result(path_prefix=relative_pathname, result=result)
+ else:
+ src_subdir = source_directory.descend(name)
+
+ try:
+ dest_subdir = self.descend(name, create=create_subdir)
+ except VirtualDirectoryError:
+ filetype = self.index[name].type
+ raise VirtualDirectoryError('Destination is a {}, not a directory: /{}'
+ .format(filetype, relative_pathname))
+
+ dest_subdir._partial_import_cas_into_cas(src_subdir, filter_callback,
+ path_prefix=relative_pathname, result=result)
+
+ if filter_callback and not filter_callback(relative_pathname):
+ if is_dir and create_subdir and dest_subdir.is_empty():
+ # Complete subdirectory has been filtered out, remove it
+ self.delete_entry(name)
+
+ # Entry filtered out, move to next
+ continue
+
+ if not is_dir:
+ if self._check_replacement(name, relative_pathname, result):
+ if entry.type == _FileType.REGULAR_FILE:
+ self.index[name] = IndexEntry(name, _FileType.REGULAR_FILE,
+ digest=entry.digest,
+ is_executable=entry.is_executable,
+ modified=True)
+ self.__invalidate_digest()
+ else:
+ assert entry.type == _FileType.SYMLINK
+ self._add_new_link_direct(name=name, target=entry.target)
+ result.files_written.append(relative_pathname)
+
+ def import_files(self, external_pathspec, *,
+ filter_callback=None,
+ report_written=True, update_mtime=False,
+ can_link=False):
+ """ See superclass Directory for arguments """
+
+ result = FileListResult()
+
+ if isinstance(external_pathspec, FileBasedDirectory):
+ source_directory = external_pathspec._get_underlying_directory()
+ self._import_files_from_directory(source_directory, filter_callback, result=result)
+ elif isinstance(external_pathspec, str):
+ source_directory = external_pathspec
+ self._import_files_from_directory(source_directory, filter_callback, result=result)
+ else:
+ assert isinstance(external_pathspec, CasBasedDirectory)
+ self._partial_import_cas_into_cas(external_pathspec, filter_callback, result=result)
+
+ # TODO: No notice is taken of report_written, update_mtime or can_link.
+ # Current behaviour is to fully populate the report, which is inefficient,
+ # but still correct.
+
+ return result
+
+ def set_deterministic_mtime(self):
+ """ Sets a static modification time for all regular files in this directory.
+ Since we don't store any modification time, we don't need to do anything.
+ """
+
+ def set_deterministic_user(self):
+ """ Sets all files in this directory to the current user's euid/egid.
+ We also don't store user data, so this can be ignored.
+ """
+
+ def export_files(self, to_directory, *, can_link=False, can_destroy=False):
+ """Copies everything from this into to_directory, which must be the name
+ of a traditional filesystem directory.
+
+ Arguments:
+
+ to_directory (string): a path outside this directory object
+ where the contents will be copied to.
+
+ can_link (bool): Whether we can create hard links in to_directory
+ instead of copying.
+
+ can_destroy (bool): Whether we can destroy elements in this
+ directory to export them (e.g. by renaming them as the
+ target).
+
+ """
+
+ self.cas_cache.checkout(to_directory, self._get_digest(), can_link=can_link)
+
+ def export_to_tar(self, tarfile, destination_dir, mtime=_magic_timestamp):
+ raise NotImplementedError()
+
+ def mark_changed(self):
+ """ It should not be possible to externally modify a CAS-based
+ directory at the moment."""
+ raise NotImplementedError()
+
+ def is_empty(self):
+ """ Return true if this directory has no files, subdirectories or links in it.
+ """
+ return len(self.index) == 0
+
+ def _mark_directory_unmodified(self):
+ # Marks all entries in this directory and all child directories as unmodified.
+ for i in self.index.values():
+ i.modified = False
+ if i.type == _FileType.DIRECTORY and i.buildstream_object:
+ i.buildstream_object._mark_directory_unmodified()
+
+ def _mark_entry_unmodified(self, name):
+ # Marks an entry as unmodified. If the entry is a directory, it will
+ # recursively mark all its tree as unmodified.
+ self.index[name].modified = False
+ if self.index[name].buildstream_object:
+ self.index[name].buildstream_object._mark_directory_unmodified()
+
+ def mark_unmodified(self):
+ """ Marks all files in this directory (recursively) as unmodified.
+ If we have a parent, we mark our own entry as unmodified in that parent's
+ index.
+ """
+ if self.parent:
+ self.parent._mark_entry_unmodified(self._find_self_in_parent())
+ else:
+ self._mark_directory_unmodified()
+
+ def _lightweight_resolve_to_index(self, path):
+ """A lightweight function for transforming paths into IndexEntry
+ objects. This does not follow symlinks.
+
+ path: The string to resolve. This should be a series of path
+ components separated by the protocol buffer path separator
+ _pb2_path_sep.
+
+ Returns: the IndexEntry found, or None if any of the path components were not present.
+
+ """
+ directory = self
+ path_components = path.split(CasBasedDirectory._pb2_path_sep)
+ for component in path_components[:-1]:
+ if component not in directory.index:
+ return None
+ if directory.index[component].type == _FileType.DIRECTORY:
+ directory = directory.index[component].get_directory(self)
+ else:
+ return None
+ return directory.index.get(path_components[-1], None)
+
+ def list_modified_paths(self):
+ """Provide a list of relative paths which have been modified since the
+ last call to mark_unmodified.
+
+ Return value: List(str) - list of modified paths
+ """
+
+ for p in self.list_relative_paths():
+ i = self._lightweight_resolve_to_index(p)
+ if i and i.modified:
+ yield p
+
+ def list_relative_paths(self, relpath=""):
+ """Provide a list of all relative paths.
+
+ Return value: List(str) - list of all paths
+ """
+
+ file_list = list(filter(lambda i: i[1].type != _FileType.DIRECTORY,
+ self.index.items()))
+ directory_list = filter(lambda i: i[1].type == _FileType.DIRECTORY,
+ self.index.items())
+
+ if relpath != "":
+ yield relpath
+
+ for (k, v) in sorted(file_list):
+ yield os.path.join(relpath, k)
+
+ for (k, v) in sorted(directory_list):
+ subdir = v.get_directory(self)
+ yield from subdir.list_relative_paths(relpath=os.path.join(relpath, k))
+
+ def get_size(self):
+ digest = self._get_digest()
+ total = digest.size_bytes
+ for i in self.index.values():
+ if i.type == _FileType.DIRECTORY:
+ subdir = i.get_directory(self)
+ total += subdir.get_size()
+ elif i.type == _FileType.REGULAR_FILE:
+ total += i.digest.size_bytes
+ # Symlink nodes are encoded as part of the directory serialization.
+ return total
+
+ def _get_identifier(self):
+ path = ""
+ if self.parent:
+ path = self.parent._get_identifier()
+ if self.filename:
+ path += "/" + self.filename
+ else:
+ path += "/" + self.common_name
+ return path
+
+ def __str__(self):
+ return "[CAS:{}]".format(self._get_identifier())
+
+ def _get_underlying_directory(self):
+ """ There is no underlying directory for a CAS-backed directory, so
+ throw an exception. """
+ raise VirtualDirectoryError("_get_underlying_directory was called on a CAS-backed directory," +
+ " which has no underlying directory.")
+
+ # _get_digest():
+ #
+ # Return the Digest for this directory.
+ #
+ # Returns:
+ # (Digest): The Digest protobuf object for the Directory protobuf
+ #
+ def _get_digest(self):
+ if not self.__digest:
+ # Create updated Directory proto
+ pb2_directory = remote_execution_pb2.Directory()
+
+ for name, entry in sorted(self.index.items()):
+ if entry.type == _FileType.DIRECTORY:
+ dirnode = pb2_directory.directories.add()
+ dirnode.name = name
+
+ # Update digests for subdirectories in DirectoryNodes.
+ # No need to call entry.get_directory().
+ # If it hasn't been instantiated, digest must be up-to-date.
+ subdir = entry.buildstream_object
+ if subdir:
+ dirnode.digest.CopyFrom(subdir._get_digest())
+ else:
+ dirnode.digest.CopyFrom(entry.digest)
+ elif entry.type == _FileType.REGULAR_FILE:
+ filenode = pb2_directory.files.add()
+ filenode.name = name
+ filenode.digest.CopyFrom(entry.digest)
+ filenode.is_executable = entry.is_executable
+ elif entry.type == _FileType.SYMLINK:
+ symlinknode = pb2_directory.symlinks.add()
+ symlinknode.name = name
+ symlinknode.target = entry.target
+
+ self.__digest = self.cas_cache.add_object(buffer=pb2_directory.SerializeToString())
+
+ return self.__digest
+
+ def _get_child_digest(self, *path):
+ subdir = self.descend(*path[:-1])
+ entry = subdir.index[path[-1]]
+ if entry.type == _FileType.DIRECTORY:
+ subdir = entry.buildstream_object
+ if subdir:
+ return subdir._get_digest()
+ else:
+ return entry.digest
+ elif entry.type == _FileType.REGULAR_FILE:
+ return entry.digest
+ else:
+ raise VirtualDirectoryError("Directory entry has no digest: {}".format(os.path.join(*path)))
+
+ def _objpath(self, *path):
+ subdir = self.descend(*path[:-1])
+ entry = subdir.index[path[-1]]
+ return self.cas_cache.objpath(entry.digest)
+
+ def _exists(self, *path):
+ try:
+ subdir = self.descend(*path[:-1])
+ return path[-1] in subdir.index
+ except VirtualDirectoryError:
+ return False
+
+ def __invalidate_digest(self):
+ if self.__digest:
+ self.__digest = None
+ if self.parent:
+ self.parent.__invalidate_digest()
+
+ def __add_files_to_result(self, *, path_prefix="", result):
+ for name, entry in self.index.items():
+ # The destination filename, relative to the root where the import started
+ relative_pathname = os.path.join(path_prefix, name)
+
+ if entry.type == _FileType.DIRECTORY:
+ subdir = self.descend(name)
+ subdir.__add_files_to_result(path_prefix=relative_pathname, result=result)
+ else:
+ result.files_written.append(relative_pathname)
diff --git a/src/buildstream/storage/_filebaseddirectory.py b/src/buildstream/storage/_filebaseddirectory.py
new file mode 100644
index 000000000..9a746f731
--- /dev/null
+++ b/src/buildstream/storage/_filebaseddirectory.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jim MacArthur <jim.macarthur@codethink.co.uk>
+
+"""
+FileBasedDirectory
+=========
+
+Implementation of the Directory class which backs onto a normal POSIX filing system.
+
+See also: :ref:`sandboxing`.
+"""
+
+import os
+import stat
+import time
+
+from .directory import Directory, VirtualDirectoryError, _FileType
+from .. import utils
+from ..utils import link_files, copy_files, list_relative_paths, _get_link_mtime, _magic_timestamp
+from ..utils import _set_deterministic_user, _set_deterministic_mtime
+from ..utils import FileListResult
+
+# FileBasedDirectory intentionally doesn't call its superclass constuctor,
+# which is meant to be unimplemented.
+# pylint: disable=super-init-not-called
+
+
+class FileBasedDirectory(Directory):
+ def __init__(self, external_directory=None):
+ self.external_directory = external_directory
+
+ def descend(self, *paths, create=False):
+ """ See superclass Directory for arguments """
+
+ current_dir = self
+
+ for path in paths:
+ # Skip empty path segments
+ if not path:
+ continue
+
+ new_path = os.path.join(current_dir.external_directory, path)
+ try:
+ st = os.lstat(new_path)
+ if not stat.S_ISDIR(st.st_mode):
+ raise VirtualDirectoryError("Cannot descend into '{}': '{}' is not a directory"
+ .format(path, new_path))
+ except FileNotFoundError:
+ if create:
+ os.mkdir(new_path)
+ else:
+ raise VirtualDirectoryError("Cannot descend into '{}': '{}' does not exist"
+ .format(path, new_path))
+
+ current_dir = FileBasedDirectory(new_path)
+
+ return current_dir
+
+ def import_files(self, external_pathspec, *,
+ filter_callback=None,
+ report_written=True, update_mtime=False,
+ can_link=False):
+ """ See superclass Directory for arguments """
+
+ from ._casbaseddirectory import CasBasedDirectory # pylint: disable=cyclic-import
+
+ if isinstance(external_pathspec, CasBasedDirectory):
+ if can_link and not update_mtime:
+ actionfunc = utils.safe_link
+ else:
+ actionfunc = utils.safe_copy
+
+ import_result = FileListResult()
+ self._import_files_from_cas(external_pathspec, actionfunc, filter_callback, result=import_result)
+ else:
+ if isinstance(external_pathspec, Directory):
+ source_directory = external_pathspec.external_directory
+ else:
+ source_directory = external_pathspec
+
+ if can_link and not update_mtime:
+ import_result = link_files(source_directory, self.external_directory,
+ filter_callback=filter_callback,
+ ignore_missing=False, report_written=report_written)
+ else:
+ import_result = copy_files(source_directory, self.external_directory,
+ filter_callback=filter_callback,
+ ignore_missing=False, report_written=report_written)
+
+ if update_mtime:
+ cur_time = time.time()
+
+ for f in import_result.files_written:
+ os.utime(os.path.join(self.external_directory, f), times=(cur_time, cur_time))
+ return import_result
+
+ def _mark_changed(self):
+ pass
+
+ def set_deterministic_mtime(self):
+ _set_deterministic_mtime(self.external_directory)
+
+ def set_deterministic_user(self):
+ _set_deterministic_user(self.external_directory)
+
+ def export_files(self, to_directory, *, can_link=False, can_destroy=False):
+ if can_destroy:
+ # Try a simple rename of the sandbox root; if that
+ # doesnt cut it, then do the regular link files code path
+ try:
+ os.rename(self.external_directory, to_directory)
+ return
+ except OSError:
+ # Proceed using normal link/copy
+ pass
+
+ os.makedirs(to_directory, exist_ok=True)
+ if can_link:
+ link_files(self.external_directory, to_directory)
+ else:
+ copy_files(self.external_directory, to_directory)
+
+ # Add a directory entry deterministically to a tar file
+ #
+ # This function takes extra steps to ensure the output is deterministic.
+ # First, it sorts the results of os.listdir() to ensure the ordering of
+ # the files in the archive is the same. Second, it sets a fixed
+ # timestamp for each entry. See also https://bugs.python.org/issue24465.
+ def export_to_tar(self, tf, dir_arcname, mtime=_magic_timestamp):
+ # We need directories here, including non-empty ones,
+ # so list_relative_paths is not used.
+ for filename in sorted(os.listdir(self.external_directory)):
+ source_name = os.path.join(self.external_directory, filename)
+ arcname = os.path.join(dir_arcname, filename)
+ tarinfo = tf.gettarinfo(source_name, arcname)
+ tarinfo.mtime = mtime
+
+ if tarinfo.isreg():
+ with open(source_name, "rb") as f:
+ tf.addfile(tarinfo, f)
+ elif tarinfo.isdir():
+ tf.addfile(tarinfo)
+ self.descend(*filename.split(os.path.sep)).export_to_tar(tf, arcname, mtime)
+ else:
+ tf.addfile(tarinfo)
+
+ def is_empty(self):
+ it = os.scandir(self.external_directory)
+ return next(it, None) is None
+
+ def mark_unmodified(self):
+ """ Marks all files in this directory (recursively) as unmodified.
+ """
+ _set_deterministic_mtime(self.external_directory)
+
+ def list_modified_paths(self):
+ """Provide a list of relative paths which have been modified since the
+ last call to mark_unmodified.
+
+ Return value: List(str) - list of modified paths
+ """
+ return [f for f in list_relative_paths(self.external_directory)
+ if _get_link_mtime(os.path.join(self.external_directory, f)) != _magic_timestamp]
+
+ def list_relative_paths(self):
+ """Provide a list of all relative paths.
+
+ Return value: List(str) - list of all paths
+ """
+
+ return list_relative_paths(self.external_directory)
+
+ def get_size(self):
+ return utils._get_dir_size(self.external_directory)
+
+ def __str__(self):
+ # This returns the whole path (since we don't know where the directory started)
+ # which exposes the sandbox directory; we will have to assume for the time being
+ # that people will not abuse __str__.
+ return self.external_directory
+
+ def _get_underlying_directory(self) -> str:
+ """ Returns the underlying (real) file system directory this
+ object refers to. """
+ return self.external_directory
+
+ def _get_filetype(self, name=None):
+ path = self.external_directory
+
+ if name:
+ path = os.path.join(path, name)
+
+ st = os.lstat(path)
+ if stat.S_ISDIR(st.st_mode):
+ return _FileType.DIRECTORY
+ elif stat.S_ISLNK(st.st_mode):
+ return _FileType.SYMLINK
+ elif stat.S_ISREG(st.st_mode):
+ return _FileType.REGULAR_FILE
+ else:
+ return _FileType.SPECIAL_FILE
+
+ def _import_files_from_cas(self, source_directory, actionfunc, filter_callback, *, path_prefix="", result):
+ """ Import files from a CAS-based directory. """
+
+ for name, entry in source_directory.index.items():
+ # The destination filename, relative to the root where the import started
+ relative_pathname = os.path.join(path_prefix, name)
+
+ # The full destination path
+ dest_path = os.path.join(self.external_directory, name)
+
+ is_dir = entry.type == _FileType.DIRECTORY
+
+ if is_dir:
+ src_subdir = source_directory.descend(name)
+
+ try:
+ create_subdir = not os.path.lexists(dest_path)
+ dest_subdir = self.descend(name, create=create_subdir)
+ except VirtualDirectoryError:
+ filetype = self._get_filetype(name)
+ raise VirtualDirectoryError('Destination is a {}, not a directory: /{}'
+ .format(filetype, relative_pathname))
+
+ dest_subdir._import_files_from_cas(src_subdir, actionfunc, filter_callback,
+ path_prefix=relative_pathname, result=result)
+
+ if filter_callback and not filter_callback(relative_pathname):
+ if is_dir and create_subdir and dest_subdir.is_empty():
+ # Complete subdirectory has been filtered out, remove it
+ os.rmdir(dest_subdir.external_directory)
+
+ # Entry filtered out, move to next
+ continue
+
+ if not is_dir:
+ if os.path.lexists(dest_path):
+ # Collect overlaps
+ if not os.path.isdir(dest_path):
+ result.overwritten.append(relative_pathname)
+
+ if not utils.safe_remove(dest_path):
+ result.ignored.append(relative_pathname)
+ continue
+
+ if entry.type == _FileType.REGULAR_FILE:
+ src_path = source_directory.cas_cache.objpath(entry.digest)
+ actionfunc(src_path, dest_path, result=result)
+ if entry.is_executable:
+ os.chmod(dest_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
+ stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
+ else:
+ assert entry.type == _FileType.SYMLINK
+ os.symlink(entry.target, dest_path)
+ result.files_written.append(relative_pathname)
diff --git a/src/buildstream/storage/directory.py b/src/buildstream/storage/directory.py
new file mode 100644
index 000000000..bad818fef
--- /dev/null
+++ b/src/buildstream/storage/directory.py
@@ -0,0 +1,211 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jim MacArthur <jim.macarthur@codethink.co.uk>
+
+"""
+Directory
+=========
+
+This is a virtual Directory class to isolate the rest of BuildStream
+from the backing store implementation. Sandboxes are allowed to read
+from and write to the underlying storage, but all others must use this
+Directory class to access files and directories in the sandbox.
+
+See also: :ref:`sandboxing`.
+
+"""
+
+from enum import Enum
+
+from .._exceptions import BstError, ErrorDomain
+from ..utils import _magic_timestamp
+
+
+class VirtualDirectoryError(BstError):
+ """Raised by Directory functions when system calls fail.
+ This will be handled internally by the BuildStream core,
+ if you need to handle this error, then it should be reraised,
+ or either of the :class:`.ElementError` or :class:`.SourceError`
+ exceptions should be raised from this error.
+ """
+ def __init__(self, message, reason=None):
+ super().__init__(message, domain=ErrorDomain.VIRTUAL_FS, reason=reason)
+
+
+class Directory():
+ def __init__(self, external_directory=None):
+ raise NotImplementedError()
+
+ def descend(self, *paths, create=False):
+ """Descend one or more levels of directory hierarchy and return a new
+ Directory object for that directory.
+
+ Args:
+ *paths (str): A list of strings which are all directory names.
+ create (boolean): If this is true, the directories will be created if
+ they don't already exist.
+
+ Yields:
+ A Directory object representing the found directory.
+
+ Raises:
+ VirtualDirectoryError: if any of the components in subdirectory_spec
+ cannot be found, or are files, or symlinks to files.
+
+ """
+ raise NotImplementedError()
+
+ # Import and export of files and links
+ def import_files(self, external_pathspec, *,
+ filter_callback=None,
+ report_written=True, update_mtime=False,
+ can_link=False):
+ """Imports some or all files from external_path into this directory.
+
+ Args:
+ external_pathspec: Either a string containing a pathname, or a
+ Directory object, to use as the source.
+ filter_callback (callable): Optional filter callback. Called with the
+ relative path as argument for every file in the source directory.
+ The file is imported only if the callable returns True.
+ If no filter callback is specified, all files will be imported.
+ report_written (bool): Return the full list of files
+ written. Defaults to true. If false, only a list of
+ overwritten files is returned.
+ update_mtime (bool): Update the access and modification time
+ of each file copied to the current time.
+ can_link (bool): Whether it's OK to create a hard link to the
+ original content, meaning the stored copy will change when the
+ original files change. Setting this doesn't guarantee hard
+ links will be made. can_link will never be used if
+ update_mtime is set.
+
+ Yields:
+ (FileListResult) - A report of files imported and overwritten.
+
+ """
+
+ raise NotImplementedError()
+
+ def export_files(self, to_directory, *, can_link=False, can_destroy=False):
+ """Copies everything from this into to_directory.
+
+ Args:
+ to_directory (string): a path outside this directory object
+ where the contents will be copied to.
+ can_link (bool): Whether we can create hard links in to_directory
+ instead of copying. Setting this does not guarantee hard links will be used.
+ can_destroy (bool): Can we destroy the data already in this
+ directory when exporting? If set, this may allow data to be
+ moved rather than copied which will be quicker.
+ """
+
+ raise NotImplementedError()
+
+ def export_to_tar(self, tarfile, destination_dir, mtime=_magic_timestamp):
+ """ Exports this directory into the given tar file.
+
+ Args:
+ tarfile (TarFile): A Python TarFile object to export into.
+ destination_dir (str): The prefix for all filenames inside the archive.
+ mtime (int): mtimes of all files in the archive are set to this.
+ """
+ raise NotImplementedError()
+
+ # Convenience functions
+ def is_empty(self):
+ """ Return true if this directory has no files, subdirectories or links in it.
+ """
+ raise NotImplementedError()
+
+ def set_deterministic_mtime(self):
+ """ Sets a static modification time for all regular files in this directory.
+ The magic number for timestamps is 2011-11-11 11:11:11.
+ """
+ raise NotImplementedError()
+
+ def set_deterministic_user(self):
+ """ Sets all files in this directory to the current user's euid/egid.
+ """
+ raise NotImplementedError()
+
+ def mark_unmodified(self):
+ """ Marks all files in this directory (recursively) as unmodified.
+ """
+ raise NotImplementedError()
+
+ def list_modified_paths(self):
+ """Provide a list of relative paths which have been modified since the
+ last call to mark_unmodified. Includes directories only if
+ they are empty.
+
+ Yields:
+ (List(str)) - list of all modified files with relative paths.
+
+ """
+ raise NotImplementedError()
+
+ def list_relative_paths(self):
+ """Provide a list of all relative paths in this directory. Includes
+ directories only if they are empty.
+
+ Yields:
+ (List(str)) - list of all files with relative paths.
+
+ """
+ raise NotImplementedError()
+
+ def _mark_changed(self):
+ """Internal function to mark this directory as having been changed
+ outside this API. This normally can only happen by calling the
+ Sandbox's `run` method. This does *not* mark everything as modified
+ (i.e. list_modified_paths will not necessarily return the same results
+ as list_relative_paths after calling this.)
+
+ """
+ raise NotImplementedError()
+
+ def get_size(self):
+ """ Get an approximation of the storage space in bytes used by this directory
+ and all files and subdirectories in it. Storage space varies by implementation
+ and effective space used may be lower than this number due to deduplication. """
+ raise NotImplementedError()
+
+
+# FileType:
+#
+# Type of file or directory entry.
+#
+class _FileType(Enum):
+
+ # Directory
+ DIRECTORY = 1
+
+ # Regular file
+ REGULAR_FILE = 2
+
+ # Symbolic link
+ SYMLINK = 3
+
+ # Special file (FIFO, character device, block device, or socket)
+ SPECIAL_FILE = 4
+
+ def __str__(self):
+ # https://github.com/PyCQA/pylint/issues/2062
+ return self.name.lower().replace('_', ' ') # pylint: disable=no-member
diff --git a/src/buildstream/testing/__init__.py b/src/buildstream/testing/__init__.py
new file mode 100644
index 000000000..0b1c1fd73
--- /dev/null
+++ b/src/buildstream/testing/__init__.py
@@ -0,0 +1,121 @@
+#
+# Copyright (C) 2019 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+This package contains various utilities which make it easier to test plugins.
+"""
+
+import os
+from collections import OrderedDict
+from . import _sourcetests
+from .repo import Repo
+from .runcli import cli, cli_integration, cli_remote_execution
+from .integration import integration_cache
+
+# To make use of these test utilities it is necessary to have pytest
+# available. However, we don't want to have a hard dependency on
+# pytest.
+try:
+ import pytest
+except ImportError:
+ module_name = globals()['__name__']
+ msg = "Could not import pytest:\n" \
+ "To use the {} module, you must have pytest installed.".format(module_name)
+ raise ImportError(msg)
+
+
+ALL_REPO_KINDS = OrderedDict()
+
+
+def create_repo(kind, directory, subdir='repo'):
+ """Convenience method for creating a Repo
+
+ Args:
+ kind (str): The kind of repo to create (a source plugin basename). This
+ must have previously been registered using
+ `register_repo_kind`
+ directory (str): The path where the repo will keep a cache
+
+ Returns:
+ (Repo): A new Repo object
+ """
+ try:
+ constructor = ALL_REPO_KINDS[kind]
+ except KeyError as e:
+ raise AssertionError("Unsupported repo kind {}".format(kind)) from e
+
+ return constructor(directory, subdir=subdir)
+
+
+def register_repo_kind(kind, cls):
+ """Register a new repo kind.
+
+ Registering a repo kind will allow the use of the `create_repo`
+ method for that kind and include that repo kind in ALL_REPO_KINDS
+
+ In addition, repo_kinds registred prior to
+ `sourcetests_collection_hook` being called will be automatically
+ used to test the basic behaviour of their associated source
+ plugins using the tests in `testing._sourcetests`.
+
+ Args:
+ kind (str): The kind of repo to create (a source plugin basename)
+ cls (cls) : A class derived from Repo.
+
+ """
+ ALL_REPO_KINDS[kind] = cls
+
+
+def sourcetests_collection_hook(session):
+ """ Used to hook the templated source plugin tests into a pyest test suite.
+
+ This should be called via the `pytest_sessionstart
+ hook <https://docs.pytest.org/en/latest/reference.html#collection-hooks>`_.
+ The tests in the _sourcetests package will be collected as part of
+ whichever test package this hook is called from.
+
+ Args:
+ session (pytest.Session): The current pytest session
+ """
+ def should_collect_tests(config):
+ args = config.args
+ rootdir = config.rootdir
+ # When no args are supplied, pytest defaults the arg list to
+ # just include the session's root_dir. We want to collect
+ # tests as part of the default collection
+ if args == [str(rootdir)]:
+ return True
+
+ # If specific tests are passed, don't collect
+ # everything. Pytest will handle this correctly without
+ # modification.
+ if len(args) > 1 or rootdir not in args:
+ return False
+
+ # If in doubt, collect them, this will be an easier bug to
+ # spot and is less likely to result in bug not being found.
+ return True
+
+ SOURCE_TESTS_PATH = os.path.dirname(_sourcetests.__file__)
+ # Add the location of the source tests to the session's
+ # python_files config. Without this, pytest may filter out these
+ # tests during collection.
+ session.config.addinivalue_line("python_files", os.path.join(SOURCE_TESTS_PATH, "*.py"))
+ # If test invocation has specified specic tests, don't
+ # automatically collect templated tests.
+ if should_collect_tests(session.config):
+ session.config.args.append(SOURCE_TESTS_PATH)
diff --git a/src/buildstream/testing/_sourcetests/__init__.py b/src/buildstream/testing/_sourcetests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/__init__.py
diff --git a/src/buildstream/testing/_sourcetests/build_checkout.py b/src/buildstream/testing/_sourcetests/build_checkout.py
new file mode 100644
index 000000000..3619d2b7e
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/build_checkout.py
@@ -0,0 +1,83 @@
+#
+# Copyright (C) 2018 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Pylint doesn't play well with fixtures and dependency injection from pytest
+# pylint: disable=redefined-outer-name
+
+import os
+import pytest
+
+from buildstream.testing import create_repo, ALL_REPO_KINDS
+from buildstream.testing import cli # pylint: disable=unused-import
+from buildstream import _yaml
+
+# Project directory
+TOP_DIR = os.path.dirname(os.path.realpath(__file__))
+DATA_DIR = os.path.join(TOP_DIR, 'project')
+
+fetch_build_checkout_combos = \
+ [("strict", kind) for kind in ALL_REPO_KINDS] + \
+ [("non-strict", kind) for kind in ALL_REPO_KINDS]
+
+
+def strict_args(args, strict):
+ if strict != "strict":
+ return ['--no-strict', *args]
+ return args
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("strict,kind", fetch_build_checkout_combos)
+def test_fetch_build_checkout(cli, tmpdir, datafiles, strict, kind):
+ checkout = os.path.join(cli.directory, 'checkout')
+ project = str(datafiles)
+ dev_files_path = os.path.join(project, 'files', 'dev-files')
+ element_path = os.path.join(project, 'elements')
+ element_name = 'build-test-{}.bst'.format(kind)
+
+ # Create our repo object of the given source type with
+ # the dev files, and then collect the initial ref.
+ #
+ repo = create_repo(kind, str(tmpdir))
+ ref = repo.create(dev_files_path)
+
+ # Write out our test target
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ repo.source_config(ref=ref)
+ ]
+ }
+ _yaml.dump(element,
+ os.path.join(element_path,
+ element_name))
+
+ assert cli.get_element_state(project, element_name) == 'fetch needed'
+ result = cli.run(project=project, args=strict_args(['build', element_name], strict))
+ result.assert_success()
+ assert cli.get_element_state(project, element_name) == 'cached'
+
+ # Now check it out
+ result = cli.run(project=project, args=strict_args([
+ 'artifact', 'checkout', element_name, '--directory', checkout
+ ], strict))
+ result.assert_success()
+
+ # Check that the pony.h include from files/dev-files exists
+ filename = os.path.join(checkout, 'usr', 'include', 'pony.h')
+ assert os.path.exists(filename)
diff --git a/src/buildstream/testing/_sourcetests/fetch.py b/src/buildstream/testing/_sourcetests/fetch.py
new file mode 100644
index 000000000..aaf92a14d
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/fetch.py
@@ -0,0 +1,107 @@
+#
+# Copyright (C) 2018 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Pylint doesn't play well with fixtures and dependency injection from pytest
+# pylint: disable=redefined-outer-name
+
+import os
+import pytest
+
+from buildstream import _yaml
+from .._utils import generate_junction, configure_project
+from .. import create_repo, ALL_REPO_KINDS
+from .. import cli # pylint: disable=unused-import
+
+# Project directory
+TOP_DIR = os.path.dirname(os.path.realpath(__file__))
+DATA_DIR = os.path.join(TOP_DIR, 'project')
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_fetch(cli, tmpdir, datafiles, kind):
+ project = str(datafiles)
+ bin_files_path = os.path.join(project, 'files', 'bin-files')
+ element_path = os.path.join(project, 'elements')
+ element_name = 'fetch-test-{}.bst'.format(kind)
+
+ # Create our repo object of the given source type with
+ # the bin files, and then collect the initial ref.
+ #
+ repo = create_repo(kind, str(tmpdir))
+ ref = repo.create(bin_files_path)
+
+ # Write out our test target
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ repo.source_config(ref=ref)
+ ]
+ }
+ _yaml.dump(element,
+ os.path.join(element_path,
+ element_name))
+
+ # Assert that a fetch is needed
+ assert cli.get_element_state(project, element_name) == 'fetch needed'
+
+ # Now try to fetch it
+ result = cli.run(project=project, args=['source', 'fetch', element_name])
+ result.assert_success()
+
+ # Assert that we are now buildable because the source is
+ # now cached.
+ assert cli.get_element_state(project, element_name) == 'buildable'
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')])
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_fetch_cross_junction(cli, tmpdir, datafiles, ref_storage, kind):
+ project = str(datafiles)
+ subproject_path = os.path.join(project, 'files', 'sub-project')
+ junction_path = os.path.join(project, 'elements', 'junction.bst')
+
+ import_etc_path = os.path.join(subproject_path, 'elements', 'import-etc-repo.bst')
+ etc_files_path = os.path.join(subproject_path, 'files', 'etc-files')
+
+ repo = create_repo(kind, str(tmpdir.join('import-etc')))
+ ref = repo.create(etc_files_path)
+
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ repo.source_config(ref=(ref if ref_storage == 'inline' else None))
+ ]
+ }
+ _yaml.dump(element, import_etc_path)
+
+ configure_project(project, {
+ 'ref-storage': ref_storage
+ })
+
+ generate_junction(tmpdir, subproject_path, junction_path, store_ref=(ref_storage == 'inline'))
+
+ if ref_storage == 'project.refs':
+ result = cli.run(project=project, args=['source', 'track', 'junction.bst'])
+ result.assert_success()
+ result = cli.run(project=project, args=['source', 'track', 'junction.bst:import-etc.bst'])
+ result.assert_success()
+
+ result = cli.run(project=project, args=['source', 'fetch', 'junction.bst:import-etc.bst'])
+ result.assert_success()
diff --git a/src/buildstream/testing/_sourcetests/mirror.py b/src/buildstream/testing/_sourcetests/mirror.py
new file mode 100644
index 000000000..d682bb2ef
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/mirror.py
@@ -0,0 +1,427 @@
+#
+# Copyright (C) 2018 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Pylint doesn't play well with fixtures and dependency injection from pytest
+# pylint: disable=redefined-outer-name
+
+import os
+import pytest
+
+from buildstream import _yaml
+from buildstream._exceptions import ErrorDomain
+from .._utils import generate_junction
+from .. import create_repo, ALL_REPO_KINDS
+from .. import cli # pylint: disable=unused-import
+
+# Project directory
+TOP_DIR = os.path.dirname(os.path.realpath(__file__))
+DATA_DIR = os.path.join(TOP_DIR, 'project')
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_mirror_fetch(cli, tmpdir, datafiles, kind):
+ bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr')
+ dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr')
+ upstream_repodir = os.path.join(str(tmpdir), 'upstream')
+ mirror_repodir = os.path.join(str(tmpdir), 'mirror')
+ project_dir = os.path.join(str(tmpdir), 'project')
+ os.makedirs(project_dir)
+ element_dir = os.path.join(project_dir, 'elements')
+
+ # Create repo objects of the upstream and mirror
+ upstream_repo = create_repo(kind, upstream_repodir)
+ upstream_repo.create(bin_files_path)
+ mirror_repo = upstream_repo.copy(mirror_repodir)
+ upstream_ref = upstream_repo.create(dev_files_path)
+
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ upstream_repo.source_config(ref=upstream_ref)
+ ]
+ }
+ element_name = 'test.bst'
+ element_path = os.path.join(element_dir, element_name)
+ full_repo = element['sources'][0]['url']
+ upstream_map, repo_name = os.path.split(full_repo)
+ alias = 'foo-' + kind
+ aliased_repo = alias + ':' + repo_name
+ element['sources'][0]['url'] = aliased_repo
+ full_mirror = mirror_repo.source_config()['url']
+ mirror_map, _ = os.path.split(full_mirror)
+ os.makedirs(element_dir)
+ _yaml.dump(element, element_path)
+
+ project = {
+ 'name': 'test',
+ 'element-path': 'elements',
+ 'aliases': {
+ alias: upstream_map + "/"
+ },
+ 'mirrors': [
+ {
+ 'name': 'middle-earth',
+ 'aliases': {
+ alias: [mirror_map + "/"],
+ },
+ },
+ ]
+ }
+ project_file = os.path.join(project_dir, 'project.conf')
+ _yaml.dump(project, project_file)
+
+ # No obvious ways of checking that the mirror has been fetched
+ # But at least we can be sure it succeeds
+ result = cli.run(project=project_dir, args=['source', 'fetch', element_name])
+ result.assert_success()
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_mirror_fetch_upstream_absent(cli, tmpdir, datafiles, kind):
+ dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr')
+ upstream_repodir = os.path.join(str(tmpdir), 'upstream')
+ mirror_repodir = os.path.join(str(tmpdir), 'mirror')
+ project_dir = os.path.join(str(tmpdir), 'project')
+ os.makedirs(project_dir)
+ element_dir = os.path.join(project_dir, 'elements')
+
+ # Create repo objects of the upstream and mirror
+ upstream_repo = create_repo(kind, upstream_repodir)
+ ref = upstream_repo.create(dev_files_path)
+ mirror_repo = upstream_repo.copy(mirror_repodir)
+
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ upstream_repo.source_config(ref=ref)
+ ]
+ }
+
+ element_name = 'test.bst'
+ element_path = os.path.join(element_dir, element_name)
+ full_repo = element['sources'][0]['url']
+ _, repo_name = os.path.split(full_repo)
+ alias = 'foo-' + kind
+ aliased_repo = alias + ':' + repo_name
+ element['sources'][0]['url'] = aliased_repo
+ full_mirror = mirror_repo.source_config()['url']
+ mirror_map, _ = os.path.split(full_mirror)
+ os.makedirs(element_dir)
+ _yaml.dump(element, element_path)
+
+ project = {
+ 'name': 'test',
+ 'element-path': 'elements',
+ 'aliases': {
+ alias: 'http://www.example.com/'
+ },
+ 'mirrors': [
+ {
+ 'name': 'middle-earth',
+ 'aliases': {
+ alias: [mirror_map + "/"],
+ },
+ },
+ ]
+ }
+ project_file = os.path.join(project_dir, 'project.conf')
+ _yaml.dump(project, project_file)
+
+ result = cli.run(project=project_dir, args=['source', 'fetch', element_name])
+ result.assert_success()
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_mirror_from_includes(cli, tmpdir, datafiles, kind):
+ bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr')
+ upstream_repodir = os.path.join(str(tmpdir), 'upstream')
+ mirror_repodir = os.path.join(str(tmpdir), 'mirror')
+ project_dir = os.path.join(str(tmpdir), 'project')
+ os.makedirs(project_dir)
+ element_dir = os.path.join(project_dir, 'elements')
+
+ # Create repo objects of the upstream and mirror
+ upstream_repo = create_repo(kind, upstream_repodir)
+ upstream_ref = upstream_repo.create(bin_files_path)
+ mirror_repo = upstream_repo.copy(mirror_repodir)
+
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ upstream_repo.source_config(ref=upstream_ref)
+ ]
+ }
+ element_name = 'test.bst'
+ element_path = os.path.join(element_dir, element_name)
+ full_repo = element['sources'][0]['url']
+ upstream_map, repo_name = os.path.split(full_repo)
+ alias = 'foo-' + kind
+ aliased_repo = alias + ':' + repo_name
+ element['sources'][0]['url'] = aliased_repo
+ full_mirror = mirror_repo.source_config()['url']
+ mirror_map, _ = os.path.split(full_mirror)
+ os.makedirs(element_dir)
+ _yaml.dump(element, element_path)
+
+ config_project_dir = str(tmpdir.join('config'))
+ os.makedirs(config_project_dir, exist_ok=True)
+ config_project = {
+ 'name': 'config'
+ }
+ _yaml.dump(config_project, os.path.join(config_project_dir, 'project.conf'))
+ extra_mirrors = {
+ 'mirrors': [
+ {
+ 'name': 'middle-earth',
+ 'aliases': {
+ alias: [mirror_map + "/"],
+ }
+ }
+ ]
+ }
+ _yaml.dump(extra_mirrors, os.path.join(config_project_dir, 'mirrors.yml'))
+ generate_junction(str(tmpdir.join('config_repo')),
+ config_project_dir,
+ os.path.join(element_dir, 'config.bst'))
+
+ project = {
+ 'name': 'test',
+ 'element-path': 'elements',
+ 'aliases': {
+ alias: upstream_map + "/"
+ },
+ '(@)': [
+ 'config.bst:mirrors.yml'
+ ]
+ }
+ project_file = os.path.join(project_dir, 'project.conf')
+ _yaml.dump(project, project_file)
+
+ # Now make the upstream unavailable.
+ os.rename(upstream_repo.repo, '{}.bak'.format(upstream_repo.repo))
+ result = cli.run(project=project_dir, args=['source', 'fetch', element_name])
+ result.assert_success()
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_mirror_junction_from_includes(cli, tmpdir, datafiles, kind):
+ bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr')
+ upstream_repodir = os.path.join(str(tmpdir), 'upstream')
+ mirror_repodir = os.path.join(str(tmpdir), 'mirror')
+ project_dir = os.path.join(str(tmpdir), 'project')
+ os.makedirs(project_dir)
+ element_dir = os.path.join(project_dir, 'elements')
+
+ # Create repo objects of the upstream and mirror
+ upstream_repo = create_repo(kind, upstream_repodir)
+ upstream_ref = upstream_repo.create(bin_files_path)
+ mirror_repo = upstream_repo.copy(mirror_repodir)
+
+ element = {
+ 'kind': 'junction',
+ 'sources': [
+ upstream_repo.source_config(ref=upstream_ref)
+ ]
+ }
+ element_name = 'test.bst'
+ element_path = os.path.join(element_dir, element_name)
+ full_repo = element['sources'][0]['url']
+ upstream_map, repo_name = os.path.split(full_repo)
+ alias = 'foo-' + kind
+ aliased_repo = alias + ':' + repo_name
+ element['sources'][0]['url'] = aliased_repo
+ full_mirror = mirror_repo.source_config()['url']
+ mirror_map, _ = os.path.split(full_mirror)
+ os.makedirs(element_dir)
+ _yaml.dump(element, element_path)
+
+ config_project_dir = str(tmpdir.join('config'))
+ os.makedirs(config_project_dir, exist_ok=True)
+ config_project = {
+ 'name': 'config'
+ }
+ _yaml.dump(config_project, os.path.join(config_project_dir, 'project.conf'))
+ extra_mirrors = {
+ 'mirrors': [
+ {
+ 'name': 'middle-earth',
+ 'aliases': {
+ alias: [mirror_map + "/"],
+ }
+ }
+ ]
+ }
+ _yaml.dump(extra_mirrors, os.path.join(config_project_dir, 'mirrors.yml'))
+ generate_junction(str(tmpdir.join('config_repo')),
+ config_project_dir,
+ os.path.join(element_dir, 'config.bst'))
+
+ project = {
+ 'name': 'test',
+ 'element-path': 'elements',
+ 'aliases': {
+ alias: upstream_map + "/"
+ },
+ '(@)': [
+ 'config.bst:mirrors.yml'
+ ]
+ }
+ project_file = os.path.join(project_dir, 'project.conf')
+ _yaml.dump(project, project_file)
+
+ # Now make the upstream unavailable.
+ os.rename(upstream_repo.repo, '{}.bak'.format(upstream_repo.repo))
+ result = cli.run(project=project_dir, args=['source', 'fetch', element_name])
+ result.assert_main_error(ErrorDomain.STREAM, None)
+ # Now make the upstream available again.
+ os.rename('{}.bak'.format(upstream_repo.repo), upstream_repo.repo)
+ result = cli.run(project=project_dir, args=['source', 'fetch', element_name])
+ result.assert_success()
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_mirror_track_upstream_present(cli, tmpdir, datafiles, kind):
+ bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr')
+ dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr')
+ upstream_repodir = os.path.join(str(tmpdir), 'upstream')
+ mirror_repodir = os.path.join(str(tmpdir), 'mirror')
+ project_dir = os.path.join(str(tmpdir), 'project')
+ os.makedirs(project_dir)
+ element_dir = os.path.join(project_dir, 'elements')
+
+ # Create repo objects of the upstream and mirror
+ upstream_repo = create_repo(kind, upstream_repodir)
+ upstream_repo.create(bin_files_path)
+ mirror_repo = upstream_repo.copy(mirror_repodir)
+ upstream_ref = upstream_repo.create(dev_files_path)
+
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ upstream_repo.source_config(ref=upstream_ref)
+ ]
+ }
+
+ element_name = 'test.bst'
+ element_path = os.path.join(element_dir, element_name)
+ full_repo = element['sources'][0]['url']
+ upstream_map, repo_name = os.path.split(full_repo)
+ alias = 'foo-' + kind
+ aliased_repo = alias + ':' + repo_name
+ element['sources'][0]['url'] = aliased_repo
+ full_mirror = mirror_repo.source_config()['url']
+ mirror_map, _ = os.path.split(full_mirror)
+ os.makedirs(element_dir)
+ _yaml.dump(element, element_path)
+
+ project = {
+ 'name': 'test',
+ 'element-path': 'elements',
+ 'aliases': {
+ alias: upstream_map + "/"
+ },
+ 'mirrors': [
+ {
+ 'name': 'middle-earth',
+ 'aliases': {
+ alias: [mirror_map + "/"],
+ },
+ },
+ ]
+ }
+ project_file = os.path.join(project_dir, 'project.conf')
+ _yaml.dump(project, project_file)
+
+ result = cli.run(project=project_dir, args=['source', 'track', element_name])
+ result.assert_success()
+
+ # Tracking tries upstream first. Check the ref is from upstream.
+ new_element = _yaml.load(element_path)
+ source = _yaml.node_get(new_element, dict, 'sources', [0])
+ if 'ref' in source:
+ assert _yaml.node_get(source, str, 'ref') == upstream_ref
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_mirror_track_upstream_absent(cli, tmpdir, datafiles, kind):
+ bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr')
+ dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr')
+ upstream_repodir = os.path.join(str(tmpdir), 'upstream')
+ mirror_repodir = os.path.join(str(tmpdir), 'mirror')
+ project_dir = os.path.join(str(tmpdir), 'project')
+ os.makedirs(project_dir)
+ element_dir = os.path.join(project_dir, 'elements')
+
+ # Create repo objects of the upstream and mirror
+ upstream_repo = create_repo(kind, upstream_repodir)
+ upstream_ref = upstream_repo.create(bin_files_path)
+ mirror_repo = upstream_repo.copy(mirror_repodir)
+ mirror_ref = upstream_ref
+ upstream_ref = upstream_repo.create(dev_files_path)
+
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ upstream_repo.source_config(ref=upstream_ref)
+ ]
+ }
+
+ element_name = 'test.bst'
+ element_path = os.path.join(element_dir, element_name)
+ full_repo = element['sources'][0]['url']
+ _, repo_name = os.path.split(full_repo)
+ alias = 'foo-' + kind
+ aliased_repo = alias + ':' + repo_name
+ element['sources'][0]['url'] = aliased_repo
+ full_mirror = mirror_repo.source_config()['url']
+ mirror_map, _ = os.path.split(full_mirror)
+ os.makedirs(element_dir)
+ _yaml.dump(element, element_path)
+
+ project = {
+ 'name': 'test',
+ 'element-path': 'elements',
+ 'aliases': {
+ alias: 'http://www.example.com/'
+ },
+ 'mirrors': [
+ {
+ 'name': 'middle-earth',
+ 'aliases': {
+ alias: [mirror_map + "/"],
+ },
+ },
+ ]
+ }
+ project_file = os.path.join(project_dir, 'project.conf')
+ _yaml.dump(project, project_file)
+
+ result = cli.run(project=project_dir, args=['source', 'track', element_name])
+ result.assert_success()
+
+ # Check that tracking fell back to the mirror
+ new_element = _yaml.load(element_path)
+ source = _yaml.node_get(new_element, dict, 'sources', [0])
+ if 'ref' in source:
+ assert _yaml.node_get(source, str, 'ref') == mirror_ref
diff --git a/src/buildstream/testing/_sourcetests/project/elements/base.bst b/src/buildstream/testing/_sourcetests/project/elements/base.bst
new file mode 100644
index 000000000..428afa736
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/base.bst
@@ -0,0 +1,5 @@
+# elements/base.bst
+
+kind: stack
+depends:
+ - base/base-alpine.bst
diff --git a/src/buildstream/testing/_sourcetests/project/elements/base/base-alpine.bst b/src/buildstream/testing/_sourcetests/project/elements/base/base-alpine.bst
new file mode 100644
index 000000000..c5833095d
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/base/base-alpine.bst
@@ -0,0 +1,17 @@
+kind: import
+
+description: |
+ Alpine Linux base for tests
+
+ Generated using the `tests/integration-tests/base/generate-base.sh` script.
+
+sources:
+ - kind: tar
+ base-dir: ''
+ (?):
+ - arch == "x86-64":
+ ref: 3eb559250ba82b64a68d86d0636a6b127aa5f6d25d3601a79f79214dc9703639
+ url: "alpine:integration-tests-base.v1.x86_64.tar.xz"
+ - arch == "aarch64":
+ ref: 431fb5362032ede6f172e70a3258354a8fd71fcbdeb1edebc0e20968c792329a
+ url: "alpine:integration-tests-base.v1.aarch64.tar.xz"
diff --git a/src/buildstream/testing/_sourcetests/project/elements/import-bin.bst b/src/buildstream/testing/_sourcetests/project/elements/import-bin.bst
new file mode 100644
index 000000000..a847c0c23
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/import-bin.bst
@@ -0,0 +1,4 @@
+kind: import
+sources:
+- kind: local
+ path: files/bin-files
diff --git a/src/buildstream/testing/_sourcetests/project/elements/import-dev.bst b/src/buildstream/testing/_sourcetests/project/elements/import-dev.bst
new file mode 100644
index 000000000..152a54667
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/import-dev.bst
@@ -0,0 +1,4 @@
+kind: import
+sources:
+- kind: local
+ path: files/dev-files
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/horsey.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/horsey.bst
new file mode 100644
index 000000000..bd1ffae9c
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/horsey.bst
@@ -0,0 +1,3 @@
+kind: autotools
+depends:
+ - multiple_targets/dependency/pony.bst
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/pony.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/pony.bst
new file mode 100644
index 000000000..3c29b4ea1
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/pony.bst
@@ -0,0 +1 @@
+kind: autotools
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/zebry.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/zebry.bst
new file mode 100644
index 000000000..98447ab52
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/dependency/zebry.bst
@@ -0,0 +1,3 @@
+kind: autotools
+depends:
+ - multiple_targets/dependency/horsey.bst
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/0.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/0.bst
new file mode 100644
index 000000000..a99be06a0
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/0.bst
@@ -0,0 +1,7 @@
+kind: autotools
+description: Root node
+depends:
+ - multiple_targets/order/2.bst
+ - multiple_targets/order/3.bst
+ - filename: multiple_targets/order/run.bst
+ type: runtime
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/1.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/1.bst
new file mode 100644
index 000000000..82b507a62
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/1.bst
@@ -0,0 +1,4 @@
+kind: autotools
+description: Root node
+depends:
+ - multiple_targets/order/9.bst
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/2.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/2.bst
new file mode 100644
index 000000000..ee1afae20
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/2.bst
@@ -0,0 +1,4 @@
+kind: autotools
+description: First dependency level
+depends:
+ - multiple_targets/order/3.bst
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/3.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/3.bst
new file mode 100644
index 000000000..4c3a23dab
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/3.bst
@@ -0,0 +1,6 @@
+kind: autotools
+description: Second dependency level
+depends:
+ - multiple_targets/order/4.bst
+ - multiple_targets/order/5.bst
+ - multiple_targets/order/6.bst
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/4.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/4.bst
new file mode 100644
index 000000000..b663a0b52
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/4.bst
@@ -0,0 +1,2 @@
+kind: autotools
+description: Third level dependency
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/5.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/5.bst
new file mode 100644
index 000000000..b9efcf71b
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/5.bst
@@ -0,0 +1,2 @@
+kind: autotools
+description: Fifth level dependency
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/6.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/6.bst
new file mode 100644
index 000000000..6c19d04e3
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/6.bst
@@ -0,0 +1,4 @@
+kind: autotools
+description: Fourth level dependency
+depends:
+ - multiple_targets/order/5.bst
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/7.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/7.bst
new file mode 100644
index 000000000..6805b3e6d
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/7.bst
@@ -0,0 +1,4 @@
+kind: autotools
+description: Third level dependency
+depends:
+ - multiple_targets/order/6.bst
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/8.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/8.bst
new file mode 100644
index 000000000..b8d8964a0
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/8.bst
@@ -0,0 +1,4 @@
+kind: autotools
+description: Second level dependency
+depends:
+ - multiple_targets/order/7.bst
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/9.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/9.bst
new file mode 100644
index 000000000..cc13bf3f0
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/9.bst
@@ -0,0 +1,4 @@
+kind: autotools
+description: First level dependency
+depends:
+ - multiple_targets/order/8.bst
diff --git a/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/run.bst b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/run.bst
new file mode 100644
index 000000000..9b3d2446c
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/elements/multiple_targets/order/run.bst
@@ -0,0 +1,2 @@
+kind: autotools
+description: Not a root node, yet built at the same time as root nodes
diff --git a/src/buildstream/testing/_sourcetests/project/files/bar b/src/buildstream/testing/_sourcetests/project/files/bar
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/files/bar
diff --git a/src/buildstream/testing/_sourcetests/project/files/bin-files/usr/bin/hello b/src/buildstream/testing/_sourcetests/project/files/bin-files/usr/bin/hello
new file mode 100755
index 000000000..f534a4083
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/files/bin-files/usr/bin/hello
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "Hello !"
diff --git a/src/buildstream/testing/_sourcetests/project/files/dev-files/usr/include/pony.h b/src/buildstream/testing/_sourcetests/project/files/dev-files/usr/include/pony.h
new file mode 100644
index 000000000..40bd0c2e7
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/files/dev-files/usr/include/pony.h
@@ -0,0 +1,12 @@
+#ifndef __PONY_H__
+#define __PONY_H__
+
+#define PONY_BEGIN "Once upon a time, there was a pony."
+#define PONY_END "And they lived happily ever after, the end."
+
+#define MAKE_PONY(story) \
+ PONY_BEGIN \
+ story \
+ PONY_END
+
+#endif /* __PONY_H__ */
diff --git a/src/buildstream/testing/_sourcetests/project/files/etc-files/etc/buildstream/config b/src/buildstream/testing/_sourcetests/project/files/etc-files/etc/buildstream/config
new file mode 100644
index 000000000..04204c7c9
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/files/etc-files/etc/buildstream/config
@@ -0,0 +1 @@
+config
diff --git a/src/buildstream/testing/_sourcetests/project/files/foo b/src/buildstream/testing/_sourcetests/project/files/foo
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/files/foo
diff --git a/src/buildstream/testing/_sourcetests/project/files/source-bundle/llamas.txt b/src/buildstream/testing/_sourcetests/project/files/source-bundle/llamas.txt
new file mode 100644
index 000000000..f98b24871
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/files/source-bundle/llamas.txt
@@ -0,0 +1 @@
+llamas
diff --git a/src/buildstream/testing/_sourcetests/project/files/sub-project/elements/import-etc.bst b/src/buildstream/testing/_sourcetests/project/files/sub-project/elements/import-etc.bst
new file mode 100644
index 000000000..f0171990e
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/files/sub-project/elements/import-etc.bst
@@ -0,0 +1,4 @@
+kind: import
+sources:
+- kind: local
+ path: files/etc-files
diff --git a/src/buildstream/testing/_sourcetests/project/files/sub-project/files/etc-files/etc/animal.conf b/src/buildstream/testing/_sourcetests/project/files/sub-project/files/etc-files/etc/animal.conf
new file mode 100644
index 000000000..db8c36cba
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/files/sub-project/files/etc-files/etc/animal.conf
@@ -0,0 +1 @@
+animal=Pony
diff --git a/src/buildstream/testing/_sourcetests/project/files/sub-project/project.conf b/src/buildstream/testing/_sourcetests/project/files/sub-project/project.conf
new file mode 100644
index 000000000..bbb8414a3
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/files/sub-project/project.conf
@@ -0,0 +1,4 @@
+# Project config for frontend build test
+name: subtest
+
+element-path: elements
diff --git a/src/buildstream/testing/_sourcetests/project/project.conf b/src/buildstream/testing/_sourcetests/project/project.conf
new file mode 100644
index 000000000..05b68bfeb
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/project/project.conf
@@ -0,0 +1,27 @@
+# Project config for frontend build test
+name: test
+element-path: elements
+aliases:
+ alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/
+ project_dir: file://{project_dir}
+options:
+ linux:
+ type: bool
+ description: Whether to expect a linux platform
+ default: True
+ arch:
+ type: arch
+ description: Current architecture
+ values:
+ - x86-64
+ - aarch64
+split-rules:
+ test:
+ - |
+ /tests
+ - |
+ /tests/*
+
+fatal-warnings:
+- bad-element-suffix
+- bad-characters-in-name
diff --git a/src/buildstream/testing/_sourcetests/source_determinism.py b/src/buildstream/testing/_sourcetests/source_determinism.py
new file mode 100644
index 000000000..3a5c264d9
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/source_determinism.py
@@ -0,0 +1,114 @@
+#
+# Copyright (C) 2018 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Pylint doesn't play well with fixtures and dependency injection from pytest
+# pylint: disable=redefined-outer-name
+
+import os
+import pytest
+
+from buildstream import _yaml
+from .._utils.site import HAVE_SANDBOX
+from .. import create_repo, ALL_REPO_KINDS
+from .. import cli # pylint: disable=unused-import
+
+# Project directory
+TOP_DIR = os.path.dirname(os.path.realpath(__file__))
+DATA_DIR = os.path.join(TOP_DIR, 'project')
+
+
+def create_test_file(*path, mode=0o644, content='content\n'):
+ path = os.path.join(*path)
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+ with open(path, 'w') as f:
+ f.write(content)
+ os.fchmod(f.fileno(), mode)
+
+
+def create_test_directory(*path, mode=0o644):
+ create_test_file(*path, '.keep', content='')
+ path = os.path.join(*path)
+ os.chmod(path, mode)
+
+
+@pytest.mark.integration
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", [*ALL_REPO_KINDS])
+@pytest.mark.skipif(not HAVE_SANDBOX, reason='Only available with a functioning sandbox')
+def test_deterministic_source_umask(cli, tmpdir, datafiles, kind):
+ project = str(datafiles)
+ element_name = 'list.bst'
+ element_path = os.path.join(project, 'elements', element_name)
+ repodir = os.path.join(str(tmpdir), 'repo')
+ sourcedir = os.path.join(project, 'source')
+
+ create_test_file(sourcedir, 'a.txt', mode=0o700)
+ create_test_file(sourcedir, 'b.txt', mode=0o755)
+ create_test_file(sourcedir, 'c.txt', mode=0o600)
+ create_test_file(sourcedir, 'd.txt', mode=0o400)
+ create_test_file(sourcedir, 'e.txt', mode=0o644)
+ create_test_file(sourcedir, 'f.txt', mode=0o4755)
+ create_test_file(sourcedir, 'g.txt', mode=0o2755)
+ create_test_file(sourcedir, 'h.txt', mode=0o1755)
+ create_test_directory(sourcedir, 'dir-a', mode=0o0700)
+ create_test_directory(sourcedir, 'dir-c', mode=0o0755)
+ create_test_directory(sourcedir, 'dir-d', mode=0o4755)
+ create_test_directory(sourcedir, 'dir-e', mode=0o2755)
+ create_test_directory(sourcedir, 'dir-f', mode=0o1755)
+
+ repo = create_repo(kind, repodir)
+ ref = repo.create(sourcedir)
+ source = repo.source_config(ref=ref)
+ element = {
+ 'kind': 'manual',
+ 'depends': [
+ {
+ 'filename': 'base.bst',
+ 'type': 'build'
+ }
+ ],
+ 'sources': [
+ source
+ ],
+ 'config': {
+ 'install-commands': [
+ 'ls -l >"%{install-root}/ls-l"'
+ ]
+ }
+ }
+ _yaml.dump(element, element_path)
+
+ def get_value_for_umask(umask):
+ checkoutdir = os.path.join(str(tmpdir), 'checkout-{}'.format(umask))
+
+ old_umask = os.umask(umask)
+
+ try:
+ result = cli.run(project=project, args=['build', element_name])
+ result.assert_success()
+
+ result = cli.run(project=project, args=['artifact', 'checkout', element_name, '--directory', checkoutdir])
+ result.assert_success()
+
+ with open(os.path.join(checkoutdir, 'ls-l'), 'r') as f:
+ return f.read()
+ finally:
+ os.umask(old_umask)
+ cli.remove_artifact_from_cache(project, element_name)
+
+ assert get_value_for_umask(0o022) == get_value_for_umask(0o077)
diff --git a/src/buildstream/testing/_sourcetests/track.py b/src/buildstream/testing/_sourcetests/track.py
new file mode 100644
index 000000000..668ea29e5
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/track.py
@@ -0,0 +1,420 @@
+#
+# Copyright (C) 2018 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Pylint doesn't play well with fixtures and dependency injection from pytest
+# pylint: disable=redefined-outer-name
+
+import os
+import pytest
+
+from buildstream import _yaml
+from buildstream._exceptions import ErrorDomain
+from .._utils import generate_junction, configure_project
+from .. import create_repo, ALL_REPO_KINDS
+from .. import cli # pylint: disable=unused-import
+
+# Project directory
+TOP_DIR = os.path.dirname(os.path.realpath(__file__))
+DATA_DIR = os.path.join(TOP_DIR, 'project')
+
+
+def generate_element(repo, element_path, dep_name=None):
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ repo.source_config()
+ ]
+ }
+ if dep_name:
+ element['depends'] = [dep_name]
+
+ _yaml.dump(element, element_path)
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')])
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_track(cli, tmpdir, datafiles, ref_storage, kind):
+ project = str(datafiles)
+ dev_files_path = os.path.join(project, 'files', 'dev-files')
+ element_path = os.path.join(project, 'elements')
+ element_name = 'track-test-{}.bst'.format(kind)
+
+ configure_project(project, {
+ 'ref-storage': ref_storage
+ })
+
+ # Create our repo object of the given source type with
+ # the dev files, and then collect the initial ref.
+ #
+ repo = create_repo(kind, str(tmpdir))
+ repo.create(dev_files_path)
+
+ # Generate the element
+ generate_element(repo, os.path.join(element_path, element_name))
+
+ # Assert that a fetch is needed
+ assert cli.get_element_state(project, element_name) == 'no reference'
+
+ # Now first try to track it
+ result = cli.run(project=project, args=['source', 'track', element_name])
+ result.assert_success()
+
+ # And now fetch it: The Source has probably already cached the
+ # latest ref locally, but it is not required to have cached
+ # the associated content of the latest ref at track time, that
+ # is the job of fetch.
+ result = cli.run(project=project, args=['source', 'fetch', element_name])
+ result.assert_success()
+
+ # Assert that we are now buildable because the source is
+ # now cached.
+ assert cli.get_element_state(project, element_name) == 'buildable'
+
+ # Assert there was a project.refs created, depending on the configuration
+ if ref_storage == 'project.refs':
+ assert os.path.exists(os.path.join(project, 'project.refs'))
+ else:
+ assert not os.path.exists(os.path.join(project, 'project.refs'))
+
+
+# NOTE:
+#
+# This test checks that recursive tracking works by observing
+# element states after running a recursive tracking operation.
+#
+# However, this test is ALSO valuable as it stresses the source
+# plugins in a situation where many source plugins are operating
+# at once on the same backing repository.
+#
+# Do not change this test to use a separate 'Repo' per element
+# as that would defeat the purpose of the stress test, otherwise
+# please refactor that aspect into another test.
+#
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("amount", [(1), (10)])
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_track_recurse(cli, tmpdir, datafiles, kind, amount):
+ project = str(datafiles)
+ dev_files_path = os.path.join(project, 'files', 'dev-files')
+ element_path = os.path.join(project, 'elements')
+
+ # Try to actually launch as many fetch jobs as possible at the same time
+ #
+ # This stresses the Source plugins and helps to ensure that
+ # they handle concurrent access to the store correctly.
+ cli.configure({
+ 'scheduler': {
+ 'fetchers': amount,
+ }
+ })
+
+ # Create our repo object of the given source type with
+ # the dev files, and then collect the initial ref.
+ #
+ repo = create_repo(kind, str(tmpdir))
+ repo.create(dev_files_path)
+
+ # Write out our test targets
+ element_names = []
+ last_element_name = None
+ for i in range(amount + 1):
+ element_name = 'track-test-{}-{}.bst'.format(kind, i + 1)
+ filename = os.path.join(element_path, element_name)
+
+ element_names.append(element_name)
+
+ generate_element(repo, filename, dep_name=last_element_name)
+ last_element_name = element_name
+
+ # Assert that a fetch is needed
+ states = cli.get_element_states(project, [last_element_name])
+ for element_name in element_names:
+ assert states[element_name] == 'no reference'
+
+ # Now first try to track it
+ result = cli.run(project=project, args=[
+ 'source', 'track', '--deps', 'all',
+ last_element_name])
+ result.assert_success()
+
+ # And now fetch it: The Source has probably already cached the
+ # latest ref locally, but it is not required to have cached
+ # the associated content of the latest ref at track time, that
+ # is the job of fetch.
+ result = cli.run(project=project, args=[
+ 'source', 'fetch', '--deps', 'all',
+ last_element_name])
+ result.assert_success()
+
+ # Assert that the base is buildable and the rest are waiting
+ states = cli.get_element_states(project, [last_element_name])
+ for element_name in element_names:
+ if element_name == element_names[0]:
+ assert states[element_name] == 'buildable'
+ else:
+ assert states[element_name] == 'waiting'
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_track_recurse_except(cli, tmpdir, datafiles, kind):
+ project = str(datafiles)
+ dev_files_path = os.path.join(project, 'files', 'dev-files')
+ element_path = os.path.join(project, 'elements')
+ element_dep_name = 'track-test-dep-{}.bst'.format(kind)
+ element_target_name = 'track-test-target-{}.bst'.format(kind)
+
+ # Create our repo object of the given source type with
+ # the dev files, and then collect the initial ref.
+ #
+ repo = create_repo(kind, str(tmpdir))
+ repo.create(dev_files_path)
+
+ # Write out our test targets
+ generate_element(repo, os.path.join(element_path, element_dep_name))
+ generate_element(repo, os.path.join(element_path, element_target_name),
+ dep_name=element_dep_name)
+
+ # Assert that a fetch is needed
+ states = cli.get_element_states(project, [element_target_name])
+ assert states[element_dep_name] == 'no reference'
+ assert states[element_target_name] == 'no reference'
+
+ # Now first try to track it
+ result = cli.run(project=project, args=[
+ 'source', 'track', '--deps', 'all', '--except', element_dep_name,
+ element_target_name])
+ result.assert_success()
+
+ # And now fetch it: The Source has probably already cached the
+ # latest ref locally, but it is not required to have cached
+ # the associated content of the latest ref at track time, that
+ # is the job of fetch.
+ result = cli.run(project=project, args=[
+ 'source', 'fetch', '--deps', 'none',
+ element_target_name])
+ result.assert_success()
+
+ # Assert that the dependency is buildable and the target is waiting
+ states = cli.get_element_states(project, [element_target_name])
+ assert states[element_dep_name] == 'no reference'
+ assert states[element_target_name] == 'waiting'
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')])
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_cross_junction(cli, tmpdir, datafiles, ref_storage, kind):
+ project = str(datafiles)
+ subproject_path = os.path.join(project, 'files', 'sub-project')
+ junction_path = os.path.join(project, 'elements', 'junction.bst')
+ etc_files = os.path.join(subproject_path, 'files', 'etc-files')
+ repo_element_path = os.path.join(subproject_path, 'elements',
+ 'import-etc-repo.bst')
+
+ configure_project(project, {
+ 'ref-storage': ref_storage
+ })
+
+ repo = create_repo(kind, str(tmpdir.join('element_repo')))
+ repo.create(etc_files)
+
+ generate_element(repo, repo_element_path)
+
+ generate_junction(str(tmpdir.join('junction_repo')),
+ subproject_path, junction_path, store_ref=False)
+
+ # Track the junction itself first.
+ result = cli.run(project=project, args=['source', 'track', 'junction.bst'])
+ result.assert_success()
+
+ assert cli.get_element_state(project, 'junction.bst:import-etc-repo.bst') == 'no reference'
+
+ # Track the cross junction element. -J is not given, it is implied.
+ result = cli.run(project=project, args=['source', 'track', 'junction.bst:import-etc-repo.bst'])
+
+ if ref_storage == 'inline':
+ # This is not allowed to track cross junction without project.refs.
+ result.assert_main_error(ErrorDomain.PIPELINE, 'untrackable-sources')
+ else:
+ result.assert_success()
+
+ assert cli.get_element_state(project, 'junction.bst:import-etc-repo.bst') == 'buildable'
+
+ assert os.path.exists(os.path.join(project, 'project.refs'))
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')])
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_track_include(cli, tmpdir, datafiles, ref_storage, kind):
+ project = str(datafiles)
+ dev_files_path = os.path.join(project, 'files', 'dev-files')
+ element_path = os.path.join(project, 'elements')
+ element_name = 'track-test-{}.bst'.format(kind)
+
+ configure_project(project, {
+ 'ref-storage': ref_storage
+ })
+
+ # Create our repo object of the given source type with
+ # the dev files, and then collect the initial ref.
+ #
+ repo = create_repo(kind, str(tmpdir))
+ ref = repo.create(dev_files_path)
+
+ # Generate the element
+ element = {
+ 'kind': 'import',
+ '(@)': ['elements/sources.yml']
+ }
+ sources = {
+ 'sources': [
+ repo.source_config()
+ ]
+ }
+
+ _yaml.dump(element, os.path.join(element_path, element_name))
+ _yaml.dump(sources, os.path.join(element_path, 'sources.yml'))
+
+ # Assert that a fetch is needed
+ assert cli.get_element_state(project, element_name) == 'no reference'
+
+ # Now first try to track it
+ result = cli.run(project=project, args=['source', 'track', element_name])
+ result.assert_success()
+
+ # And now fetch it: The Source has probably already cached the
+ # latest ref locally, but it is not required to have cached
+ # the associated content of the latest ref at track time, that
+ # is the job of fetch.
+ result = cli.run(project=project, args=['source', 'fetch', element_name])
+ result.assert_success()
+
+ # Assert that we are now buildable because the source is
+ # now cached.
+ assert cli.get_element_state(project, element_name) == 'buildable'
+
+ # Assert there was a project.refs created, depending on the configuration
+ if ref_storage == 'project.refs':
+ assert os.path.exists(os.path.join(project, 'project.refs'))
+ else:
+ assert not os.path.exists(os.path.join(project, 'project.refs'))
+
+ new_sources = _yaml.load(os.path.join(element_path, 'sources.yml'))
+
+ # Get all of the sources
+ assert 'sources' in new_sources
+ sources_list = _yaml.node_get(new_sources, list, 'sources')
+ assert len(sources_list) == 1
+
+ # Get the first source from the sources list
+ new_source = _yaml.node_get(new_sources, dict, 'sources', indices=[0])
+ assert 'ref' in new_source
+ assert ref == _yaml.node_get(new_source, str, 'ref')
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')])
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_track_include_junction(cli, tmpdir, datafiles, ref_storage, kind):
+ project = str(datafiles)
+ dev_files_path = os.path.join(project, 'files', 'dev-files')
+ element_path = os.path.join(project, 'elements')
+ element_name = 'track-test-{}.bst'.format(kind)
+ subproject_path = os.path.join(project, 'files', 'sub-project')
+ sub_element_path = os.path.join(subproject_path, 'elements')
+ junction_path = os.path.join(element_path, 'junction.bst')
+
+ configure_project(project, {
+ 'ref-storage': ref_storage
+ })
+
+ # Create our repo object of the given source type with
+ # the dev files, and then collect the initial ref.
+ #
+ repo = create_repo(kind, str(tmpdir.join('element_repo')))
+ repo.create(dev_files_path)
+
+ # Generate the element
+ element = {
+ 'kind': 'import',
+ '(@)': ['junction.bst:elements/sources.yml']
+ }
+ sources = {
+ 'sources': [
+ repo.source_config()
+ ]
+ }
+
+ _yaml.dump(element, os.path.join(element_path, element_name))
+ _yaml.dump(sources, os.path.join(sub_element_path, 'sources.yml'))
+
+ generate_junction(str(tmpdir.join('junction_repo')),
+ subproject_path, junction_path, store_ref=True)
+
+ result = cli.run(project=project, args=['source', 'track', 'junction.bst'])
+ result.assert_success()
+
+ # Assert that a fetch is needed
+ assert cli.get_element_state(project, element_name) == 'no reference'
+
+ # Now first try to track it
+ result = cli.run(project=project, args=['source', 'track', element_name])
+
+ # Assert there was a project.refs created, depending on the configuration
+ if ref_storage == 'inline':
+ # FIXME: We should expect an error. But only a warning is emitted
+ # result.assert_main_error(ErrorDomain.SOURCE, 'tracking-junction-fragment')
+
+ assert 'junction.bst:elements/sources.yml: Cannot track source in a fragment from a junction' in result.stderr
+ else:
+ assert os.path.exists(os.path.join(project, 'project.refs'))
+
+ # And now fetch it: The Source has probably already cached the
+ # latest ref locally, but it is not required to have cached
+ # the associated content of the latest ref at track time, that
+ # is the job of fetch.
+ result = cli.run(project=project, args=['source', 'fetch', element_name])
+ result.assert_success()
+
+ # Assert that we are now buildable because the source is
+ # now cached.
+ assert cli.get_element_state(project, element_name) == 'buildable'
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')])
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_track_junction_included(cli, tmpdir, datafiles, ref_storage, kind):
+ project = str(datafiles)
+ element_path = os.path.join(project, 'elements')
+ subproject_path = os.path.join(project, 'files', 'sub-project')
+ junction_path = os.path.join(element_path, 'junction.bst')
+
+ configure_project(project, {
+ 'ref-storage': ref_storage,
+ '(@)': ['junction.bst:test.yml']
+ })
+
+ generate_junction(str(tmpdir.join('junction_repo')),
+ subproject_path, junction_path, store_ref=False)
+
+ result = cli.run(project=project, args=['source', 'track', 'junction.bst'])
+ result.assert_success()
diff --git a/src/buildstream/testing/_sourcetests/track_cross_junction.py b/src/buildstream/testing/_sourcetests/track_cross_junction.py
new file mode 100644
index 000000000..ece3e0b8f
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/track_cross_junction.py
@@ -0,0 +1,186 @@
+#
+# Copyright (C) 2018 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Pylint doesn't play well with fixtures and dependency injection from pytest
+# pylint: disable=redefined-outer-name
+
+import os
+import pytest
+
+from buildstream import _yaml
+from .._utils import generate_junction
+from .. import create_repo, ALL_REPO_KINDS
+from .. import cli # pylint: disable=unused-import
+
+# Project directory
+TOP_DIR = os.path.dirname(os.path.realpath(__file__))
+DATA_DIR = os.path.join(TOP_DIR, 'project')
+
+
+def generate_element(repo, element_path, dep_name=None):
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ repo.source_config()
+ ]
+ }
+ if dep_name:
+ element['depends'] = [dep_name]
+
+ _yaml.dump(element, element_path)
+
+
+def generate_import_element(tmpdir, kind, project, name):
+ element_name = 'import-{}.bst'.format(name)
+ repo_element_path = os.path.join(project, 'elements', element_name)
+ files = str(tmpdir.join("imported_files_{}".format(name)))
+ os.makedirs(files)
+
+ with open(os.path.join(files, '{}.txt'.format(name)), 'w') as f:
+ f.write(name)
+
+ repo = create_repo(kind, str(tmpdir.join('element_{}_repo'.format(name))))
+ repo.create(files)
+
+ generate_element(repo, repo_element_path)
+
+ return element_name
+
+
+def generate_project(tmpdir, name, config=None):
+ if config is None:
+ config = {}
+
+ project_name = 'project-{}'.format(name)
+ subproject_path = os.path.join(str(tmpdir.join(project_name)))
+ os.makedirs(os.path.join(subproject_path, 'elements'))
+
+ project_conf = {
+ 'name': name,
+ 'element-path': 'elements'
+ }
+ project_conf.update(config)
+ _yaml.dump(project_conf, os.path.join(subproject_path, 'project.conf'))
+
+ return project_name, subproject_path
+
+
+def generate_simple_stack(project, name, dependencies):
+ element_name = '{}.bst'.format(name)
+ element_path = os.path.join(project, 'elements', element_name)
+ element = {
+ 'kind': 'stack',
+ 'depends': dependencies
+ }
+ _yaml.dump(element, element_path)
+
+ return element_name
+
+
+def generate_cross_element(project, subproject_name, import_name):
+ basename, _ = os.path.splitext(import_name)
+ return generate_simple_stack(project, 'import-{}-{}'.format(subproject_name, basename),
+ [{
+ 'junction': '{}.bst'.format(subproject_name),
+ 'filename': import_name
+ }])
+
+
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_cross_junction_multiple_projects(cli, tmpdir, kind):
+ tmpdir = tmpdir.join(kind)
+
+ # Generate 3 projects: main, a, b
+ _, project = generate_project(tmpdir, 'main', {'ref-storage': 'project.refs'})
+ project_a, project_a_path = generate_project(tmpdir, 'a')
+ project_b, project_b_path = generate_project(tmpdir, 'b')
+
+ # Generate an element with a trackable source for each project
+ element_a = generate_import_element(tmpdir, kind, project_a_path, 'a')
+ element_b = generate_import_element(tmpdir, kind, project_b_path, 'b')
+ element_c = generate_import_element(tmpdir, kind, project, 'c')
+
+ # Create some indirections to the elements with dependencies to test --deps
+ stack_a = generate_simple_stack(project_a_path, 'stack-a', [element_a])
+ stack_b = generate_simple_stack(project_b_path, 'stack-b', [element_b])
+
+ # Create junctions for projects a and b in main.
+ junction_a = '{}.bst'.format(project_a)
+ junction_a_path = os.path.join(project, 'elements', junction_a)
+ generate_junction(tmpdir.join('repo_a'), project_a_path, junction_a_path, store_ref=False)
+
+ junction_b = '{}.bst'.format(project_b)
+ junction_b_path = os.path.join(project, 'elements', junction_b)
+ generate_junction(tmpdir.join('repo_b'), project_b_path, junction_b_path, store_ref=False)
+
+ # Track the junctions.
+ result = cli.run(project=project, args=['source', 'track', junction_a, junction_b])
+ result.assert_success()
+
+ # Import elements from a and b in to main.
+ imported_a = generate_cross_element(project, project_a, stack_a)
+ imported_b = generate_cross_element(project, project_b, stack_b)
+
+ # Generate a top level stack depending on everything
+ all_bst = generate_simple_stack(project, 'all', [imported_a, imported_b, element_c])
+
+ # Track without following junctions. But explicitly also track the elements in project a.
+ result = cli.run(project=project, args=['source', 'track',
+ '--deps', 'all',
+ all_bst,
+ '{}:{}'.format(junction_a, stack_a)])
+ result.assert_success()
+
+ # Elements in project b should not be tracked. But elements in project a and main should.
+ expected = [element_c,
+ '{}:{}'.format(junction_a, element_a)]
+ assert set(result.get_tracked_elements()) == set(expected)
+
+
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_track_exceptions(cli, tmpdir, kind):
+ tmpdir = tmpdir.join(kind)
+
+ _, project = generate_project(tmpdir, 'main', {'ref-storage': 'project.refs'})
+ project_a, project_a_path = generate_project(tmpdir, 'a')
+
+ element_a = generate_import_element(tmpdir, kind, project_a_path, 'a')
+ element_b = generate_import_element(tmpdir, kind, project_a_path, 'b')
+
+ all_bst = generate_simple_stack(project_a_path, 'all', [element_a,
+ element_b])
+
+ junction_a = '{}.bst'.format(project_a)
+ junction_a_path = os.path.join(project, 'elements', junction_a)
+ generate_junction(tmpdir.join('repo_a'), project_a_path, junction_a_path, store_ref=False)
+
+ result = cli.run(project=project, args=['source', 'track', junction_a])
+ result.assert_success()
+
+ imported_b = generate_cross_element(project, project_a, element_b)
+ indirection = generate_simple_stack(project, 'indirection', [imported_b])
+
+ result = cli.run(project=project,
+ args=['source', 'track', '--deps', 'all',
+ '--except', indirection,
+ '{}:{}'.format(junction_a, all_bst), imported_b])
+ result.assert_success()
+
+ expected = ['{}:{}'.format(junction_a, element_a),
+ '{}:{}'.format(junction_a, element_b)]
+ assert set(result.get_tracked_elements()) == set(expected)
diff --git a/src/buildstream/testing/_sourcetests/workspace.py b/src/buildstream/testing/_sourcetests/workspace.py
new file mode 100644
index 000000000..5218f8f1e
--- /dev/null
+++ b/src/buildstream/testing/_sourcetests/workspace.py
@@ -0,0 +1,161 @@
+#
+# Copyright (C) 2018 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Pylint doesn't play well with fixtures and dependency injection from pytest
+# pylint: disable=redefined-outer-name
+
+import os
+import shutil
+import pytest
+
+from buildstream import _yaml
+from .. import create_repo, ALL_REPO_KINDS
+from .. import cli # pylint: disable=unused-import
+
+# Project directory
+TOP_DIR = os.path.dirname(os.path.realpath(__file__))
+DATA_DIR = os.path.join(TOP_DIR, 'project')
+
+
+class WorkspaceCreator():
+ def __init__(self, cli, tmpdir, datafiles, project_path=None):
+ self.cli = cli
+ self.tmpdir = tmpdir
+ self.datafiles = datafiles
+
+ if not project_path:
+ project_path = str(datafiles)
+ else:
+ shutil.copytree(str(datafiles), project_path)
+
+ self.project_path = project_path
+ self.bin_files_path = os.path.join(project_path, 'files', 'bin-files')
+
+ self.workspace_cmd = os.path.join(self.project_path, 'workspace_cmd')
+
+ def create_workspace_element(self, kind, track, suffix='', workspace_dir=None,
+ element_attrs=None):
+ element_name = 'workspace-test-{}{}.bst'.format(kind, suffix)
+ element_path = os.path.join(self.project_path, 'elements')
+ if not workspace_dir:
+ workspace_dir = os.path.join(self.workspace_cmd, element_name)
+ if workspace_dir[-4:] == '.bst':
+ workspace_dir = workspace_dir[:-4]
+
+ # Create our repo object of the given source type with
+ # the bin files, and then collect the initial ref.
+ repo = create_repo(kind, str(self.tmpdir))
+ ref = repo.create(self.bin_files_path)
+ if track:
+ ref = None
+
+ # Write out our test target
+ element = {
+ 'kind': 'import',
+ 'sources': [
+ repo.source_config(ref=ref)
+ ]
+ }
+ if element_attrs:
+ element = {**element, **element_attrs}
+ _yaml.dump(element,
+ os.path.join(element_path,
+ element_name))
+ return element_name, element_path, workspace_dir
+
+ def create_workspace_elements(self, kinds, track, suffixs=None, workspace_dir_usr=None,
+ element_attrs=None):
+
+ element_tuples = []
+
+ if suffixs is None:
+ suffixs = ['', ] * len(kinds)
+ else:
+ if len(suffixs) != len(kinds):
+ raise "terable error"
+
+ for suffix, kind in zip(suffixs, kinds):
+ element_name, _, workspace_dir = \
+ self.create_workspace_element(kind, track, suffix, workspace_dir_usr,
+ element_attrs)
+ element_tuples.append((element_name, workspace_dir))
+
+ # Assert that there is no reference, a track & fetch is needed
+ states = self.cli.get_element_states(self.project_path, [
+ e for e, _ in element_tuples
+ ])
+ if track:
+ assert not any(states[e] != 'no reference' for e, _ in element_tuples)
+ else:
+ assert not any(states[e] != 'fetch needed' for e, _ in element_tuples)
+
+ return element_tuples
+
+ def open_workspaces(self, kinds, track, suffixs=None, workspace_dir=None,
+ element_attrs=None, no_checkout=False):
+
+ element_tuples = self.create_workspace_elements(kinds, track, suffixs, workspace_dir,
+ element_attrs)
+ os.makedirs(self.workspace_cmd, exist_ok=True)
+
+ # Now open the workspace, this should have the effect of automatically
+ # tracking & fetching the source from the repo.
+ args = ['workspace', 'open']
+ if track:
+ args.append('--track')
+ if no_checkout:
+ args.append('--no-checkout')
+ if workspace_dir is not None:
+ assert len(element_tuples) == 1, "test logic error"
+ _, workspace_dir = element_tuples[0]
+ args.extend(['--directory', workspace_dir])
+
+ args.extend([element_name for element_name, workspace_dir_suffix in element_tuples])
+ result = self.cli.run(cwd=self.workspace_cmd, project=self.project_path, args=args)
+
+ result.assert_success()
+
+ if not no_checkout:
+ # Assert that we are now buildable because the source is now cached.
+ states = self.cli.get_element_states(self.project_path, [
+ e for e, _ in element_tuples
+ ])
+ assert not any(states[e] != 'buildable' for e, _ in element_tuples)
+
+ # Check that the executable hello file is found in each workspace
+ for _, workspace in element_tuples:
+ filename = os.path.join(workspace, 'usr', 'bin', 'hello')
+ assert os.path.exists(filename)
+
+ return element_tuples
+
+
+def open_workspace(cli, tmpdir, datafiles, kind, track, suffix='', workspace_dir=None,
+ project_path=None, element_attrs=None, no_checkout=False):
+ workspace_object = WorkspaceCreator(cli, tmpdir, datafiles, project_path)
+ workspaces = workspace_object.open_workspaces((kind, ), track, (suffix, ), workspace_dir,
+ element_attrs, no_checkout)
+ assert len(workspaces) == 1
+ element_name, workspace = workspaces[0]
+ return element_name, workspace_object.project_path, workspace
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", ALL_REPO_KINDS)
+def test_open(cli, tmpdir, datafiles, kind):
+ open_workspace(cli, tmpdir, datafiles, kind, False)
diff --git a/src/buildstream/testing/_utils/__init__.py b/src/buildstream/testing/_utils/__init__.py
new file mode 100644
index 000000000..b419d72b7
--- /dev/null
+++ b/src/buildstream/testing/_utils/__init__.py
@@ -0,0 +1,10 @@
+import os
+
+from buildstream import _yaml
+from .junction import generate_junction
+
+
+def configure_project(path, config):
+ config['name'] = 'test'
+ config['element-path'] = 'elements'
+ _yaml.dump(config, os.path.join(path, 'project.conf'))
diff --git a/src/buildstream/testing/_utils/junction.py b/src/buildstream/testing/_utils/junction.py
new file mode 100644
index 000000000..ca059eb8b
--- /dev/null
+++ b/src/buildstream/testing/_utils/junction.py
@@ -0,0 +1,83 @@
+import subprocess
+import pytest
+
+from buildstream import _yaml
+from .. import Repo
+from .site import HAVE_GIT, GIT, GIT_ENV
+
+
+# generate_junction()
+#
+# Generates a junction element with a git repository
+#
+# Args:
+# tmpdir: The tmpdir fixture, for storing the generated git repo
+# subproject_path: The path for the subproject, to add to the git repo
+# junction_path: The location to store the generated junction element
+# store_ref: Whether to store the ref in the junction.bst file
+#
+# Returns:
+# (str): The ref
+#
+def generate_junction(tmpdir, subproject_path, junction_path, *, store_ref=True):
+ # Create a repo to hold the subproject and generate
+ # a junction element for it
+ #
+ repo = _SimpleGit(str(tmpdir))
+ source_ref = ref = repo.create(subproject_path)
+ if not store_ref:
+ source_ref = None
+
+ element = {
+ 'kind': 'junction',
+ 'sources': [
+ repo.source_config(ref=source_ref)
+ ]
+ }
+ _yaml.dump(element, junction_path)
+
+ return ref
+
+
+# A barebones Git Repo class to use for generating junctions
+class _SimpleGit(Repo):
+ def __init__(self, directory, subdir='repo'):
+ if not HAVE_GIT:
+ pytest.skip('git is not available')
+ super().__init__(directory, subdir)
+
+ def create(self, directory):
+ self.copy_directory(directory, self.repo)
+ self._run_git('init', '.')
+ self._run_git('add', '.')
+ self._run_git('commit', '-m', 'Initial commit')
+ return self.latest_commit()
+
+ def latest_commit(self):
+ return self._run_git(
+ 'rev-parse', 'HEAD',
+ stdout=subprocess.PIPE,
+ universal_newlines=True,
+ ).stdout.strip()
+
+ def source_config(self, ref=None, checkout_submodules=None):
+ config = {
+ 'kind': 'git',
+ 'url': 'file://' + self.repo,
+ 'track': 'master'
+ }
+ if ref is not None:
+ config['ref'] = ref
+ if checkout_submodules is not None:
+ config['checkout-submodules'] = checkout_submodules
+
+ return config
+
+ def _run_git(self, *args, **kwargs):
+ argv = [GIT]
+ argv.extend(args)
+ if 'env' not in kwargs:
+ kwargs['env'] = dict(GIT_ENV, PWD=self.repo)
+ kwargs.setdefault('cwd', self.repo)
+ kwargs.setdefault('check', True)
+ return subprocess.run(argv, **kwargs)
diff --git a/src/buildstream/testing/_utils/site.py b/src/buildstream/testing/_utils/site.py
new file mode 100644
index 000000000..54c5b467b
--- /dev/null
+++ b/src/buildstream/testing/_utils/site.py
@@ -0,0 +1,46 @@
+# Some things resolved about the execution site,
+# so we dont have to repeat this everywhere
+#
+import os
+import sys
+import platform
+
+from buildstream import _site, utils, ProgramNotFoundError
+
+
+try:
+ GIT = utils.get_host_tool('git')
+ HAVE_GIT = True
+ GIT_ENV = {
+ 'GIT_AUTHOR_DATE': '1320966000 +0200',
+ 'GIT_AUTHOR_NAME': 'tomjon',
+ 'GIT_AUTHOR_EMAIL': 'tom@jon.com',
+ 'GIT_COMMITTER_DATE': '1320966000 +0200',
+ 'GIT_COMMITTER_NAME': 'tomjon',
+ 'GIT_COMMITTER_EMAIL': 'tom@jon.com'
+ }
+except ProgramNotFoundError:
+ GIT = None
+ HAVE_GIT = False
+ GIT_ENV = dict()
+
+try:
+ utils.get_host_tool('bwrap')
+ HAVE_BWRAP = True
+ HAVE_BWRAP_JSON_STATUS = _site.get_bwrap_version() >= (0, 3, 2)
+except ProgramNotFoundError:
+ HAVE_BWRAP = False
+ HAVE_BWRAP_JSON_STATUS = False
+
+IS_LINUX = os.getenv('BST_FORCE_BACKEND', sys.platform).startswith('linux')
+IS_WSL = (IS_LINUX and 'Microsoft' in platform.uname().release)
+IS_WINDOWS = (os.name == 'nt')
+
+if not IS_LINUX:
+ HAVE_SANDBOX = True # fallback to a chroot sandbox on unix
+elif IS_WSL:
+ HAVE_SANDBOX = False # Sandboxes are inoperable under WSL due to lack of FUSE
+elif IS_LINUX and HAVE_BWRAP:
+ HAVE_SANDBOX = True
+else:
+ HAVE_SANDBOX = False
diff --git a/src/buildstream/testing/integration.py b/src/buildstream/testing/integration.py
new file mode 100644
index 000000000..01635de74
--- /dev/null
+++ b/src/buildstream/testing/integration.py
@@ -0,0 +1,97 @@
+#
+# Copyright (C) 2017 Codethink Limited
+# Copyright (C) 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+"""
+Integration - tools for inspecting the output of plugin integration tests
+=========================================================================
+
+This module contains utilities for inspecting the artifacts produced during
+integration tests.
+"""
+
+import os
+import shutil
+import tempfile
+
+import pytest
+
+
+# Return a list of files relative to the given directory
+def walk_dir(root):
+ for dirname, dirnames, filenames in os.walk(root):
+ # ensure consistent traversal order, needed for consistent
+ # handling of symlinks.
+ dirnames.sort()
+ filenames.sort()
+
+ # print path to all subdirectories first.
+ for subdirname in dirnames:
+ yield os.path.join(dirname, subdirname)[len(root):]
+
+ # print path to all filenames.
+ for filename in filenames:
+ yield os.path.join(dirname, filename)[len(root):]
+
+
+# Ensure that a directory contains the given filenames.
+def assert_contains(directory, expected):
+ missing = set(expected)
+ missing.difference_update(walk_dir(directory))
+ if missing:
+ raise AssertionError("Missing {} expected elements from list: {}"
+ .format(len(missing), missing))
+
+
+class IntegrationCache:
+
+ def __init__(self, cache):
+ self.root = os.path.abspath(cache)
+ os.makedirs(cache, exist_ok=True)
+
+ # Use the same sources every time
+ self.sources = os.path.join(self.root, 'sources')
+
+ # Create a temp directory for the duration of the test for
+ # the artifacts directory
+ try:
+ self.cachedir = tempfile.mkdtemp(dir=self.root, prefix='cache-')
+ except OSError as e:
+ raise AssertionError("Unable to create test directory !") from e
+
+
+@pytest.fixture(scope='session')
+def integration_cache(request):
+ # Set the cache dir to the INTEGRATION_CACHE variable, or the
+ # default if that is not set.
+ if 'INTEGRATION_CACHE' in os.environ:
+ cache_dir = os.environ['INTEGRATION_CACHE']
+ else:
+ cache_dir = os.path.abspath('./integration-cache')
+
+ cache = IntegrationCache(cache_dir)
+
+ yield cache
+
+ # Clean up the artifacts after each test session - we only want to
+ # cache sources between tests
+ try:
+ shutil.rmtree(cache.cachedir)
+ except FileNotFoundError:
+ pass
+ try:
+ shutil.rmtree(os.path.join(cache.root, 'cas'))
+ except FileNotFoundError:
+ pass
diff --git a/src/buildstream/testing/repo.py b/src/buildstream/testing/repo.py
new file mode 100644
index 000000000..c1538685d
--- /dev/null
+++ b/src/buildstream/testing/repo.py
@@ -0,0 +1,109 @@
+#
+# Copyright (C) 2016-2018 Codethink Limited
+# Copyright (C) 2019 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Repo - Utility class for testing source plugins
+===============================================
+
+
+"""
+import os
+import shutil
+
+
+class Repo():
+ """Repo()
+
+ Abstract class providing scaffolding for generating data to be
+ used with various sources. Subclasses of Repo may be registered to
+ run through the suite of generic source plugin tests provided in
+ buildstream.testing.
+
+ Args:
+ directory (str): The base temp directory for the test
+ subdir (str): The subdir for the repo, in case there is more than one
+
+ """
+ def __init__(self, directory, subdir='repo'):
+
+ # The working directory for the repo object
+ #
+ self.directory = os.path.abspath(directory)
+
+ # The directory the actual repo will be stored in
+ self.repo = os.path.join(self.directory, subdir)
+
+ os.makedirs(self.repo, exist_ok=True)
+
+ def create(self, directory):
+ """Create a repository in self.directory and add the initial content
+
+ Args:
+ directory: A directory with content to commit
+
+ Returns:
+ (smth): A new ref corresponding to this commit, which can
+ be passed as the ref in the Repo.source_config() API.
+ """
+ raise NotImplementedError("create method has not been implemeted")
+
+ def source_config(self, ref=None):
+ """
+ Args:
+ ref (smth): An optional abstract ref object, usually a string.
+
+ Returns:
+ (dict): A configuration which can be serialized as a
+ source when generating an element file on the fly
+
+ """
+ raise NotImplementedError("source_config method has not been implemeted")
+
+ def copy_directory(self, src, dest):
+ """ Copies the content of src to the directory dest
+
+ Like shutil.copytree(), except dest is expected
+ to exist.
+
+ Args:
+ src (str): The source directory
+ dest (str): The destination directory
+ """
+ for filename in os.listdir(src):
+ src_path = os.path.join(src, filename)
+ dest_path = os.path.join(dest, filename)
+ if os.path.isdir(src_path):
+ shutil.copytree(src_path, dest_path)
+ else:
+ shutil.copy2(src_path, dest_path)
+
+ def copy(self, dest):
+ """Creates a copy of this repository in the specified destination.
+
+ Args:
+ dest (str): The destination directory
+
+ Returns:
+ (Repo): A Repo object for the new repository.
+ """
+ subdir = self.repo[len(self.directory):].lstrip(os.sep)
+ new_dir = os.path.join(dest, subdir)
+ os.makedirs(new_dir, exist_ok=True)
+ self.copy_directory(self.repo, new_dir)
+ repo_type = type(self)
+ new_repo = repo_type(dest, subdir)
+ return new_repo
diff --git a/src/buildstream/testing/runcli.py b/src/buildstream/testing/runcli.py
new file mode 100644
index 000000000..8b3185143
--- /dev/null
+++ b/src/buildstream/testing/runcli.py
@@ -0,0 +1,883 @@
+#
+# Copyright (C) 2017 Codethink Limited
+# Copyright (C) 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+"""
+runcli - Test fixtures used for running BuildStream commands
+============================================================
+
+:function:'cli' Use result = cli.run([arg1, arg2]) to run buildstream commands
+
+:function:'cli_integration' A variant of the main fixture that keeps persistent
+ artifact and source caches. It also does not use
+ the click test runner to avoid deadlock issues when
+ running `bst shell`, but unfortunately cannot produce
+ nice stacktraces.
+
+"""
+
+
+import os
+import re
+import sys
+import shutil
+import tempfile
+import itertools
+import traceback
+from contextlib import contextmanager, ExitStack
+from ruamel import yaml
+import pytest
+
+# XXX Using pytest private internals here
+#
+# We use pytest internals to capture the stdout/stderr during
+# a run of the buildstream CLI. We do this because click's
+# CliRunner convenience API (click.testing module) does not support
+# separation of stdout/stderr.
+#
+from _pytest.capture import MultiCapture, FDCapture, FDCaptureBinary
+
+# Import the main cli entrypoint
+from buildstream._frontend import cli as bst_cli
+from buildstream import _yaml
+from buildstream._cas import CASCache
+from buildstream.element import _get_normal_name, _compose_artifact_name
+
+# Special private exception accessor, for test case purposes
+from buildstream._exceptions import BstError, get_last_exception, get_last_task_error
+from buildstream._protos.buildstream.v2 import artifact_pb2
+
+
+# Wrapper for the click.testing result
+class Result():
+
+ def __init__(self,
+ exit_code=None,
+ exception=None,
+ exc_info=None,
+ output=None,
+ stderr=None):
+ self.exit_code = exit_code
+ self.exc = exception
+ self.exc_info = exc_info
+ self.output = output
+ self.stderr = stderr
+ self.unhandled_exception = False
+
+ # The last exception/error state is stored at exception
+ # creation time in BstError(), but this breaks down with
+ # recoverable errors where code blocks ignore some errors
+ # and fallback to alternative branches.
+ #
+ # For this reason, we just ignore the exception and errors
+ # in the case that the exit code reported is 0 (success).
+ #
+ if self.exit_code != 0:
+
+ # Check if buildstream failed to handle an
+ # exception, topevel CLI exit should always
+ # be a SystemExit exception.
+ #
+ if not isinstance(exception, SystemExit):
+ self.unhandled_exception = True
+
+ self.exception = get_last_exception()
+ self.task_error_domain, \
+ self.task_error_reason = get_last_task_error()
+ else:
+ self.exception = None
+ self.task_error_domain = None
+ self.task_error_reason = None
+
+ # assert_success()
+ #
+ # Asserts that the buildstream session completed successfully
+ #
+ # Args:
+ # fail_message (str): An optional message to override the automatic
+ # assertion error messages
+ # Raises:
+ # (AssertionError): If the session did not complete successfully
+ #
+ def assert_success(self, fail_message=''):
+ assert self.exit_code == 0, fail_message
+ assert self.exc is None, fail_message
+ assert self.exception is None, fail_message
+ assert self.unhandled_exception is False
+
+ # assert_main_error()
+ #
+ # Asserts that the buildstream session failed, and that
+ # the main process error report is as expected
+ #
+ # Args:
+ # error_domain (ErrorDomain): The domain of the error which occurred
+ # error_reason (any): The reason field of the error which occurred
+ # fail_message (str): An optional message to override the automatic
+ # assertion error messages
+ # debug (bool): If true, prints information regarding the exit state of the result()
+ # Raises:
+ # (AssertionError): If any of the assertions fail
+ #
+ def assert_main_error(self,
+ error_domain,
+ error_reason,
+ fail_message='',
+ *, debug=False):
+ if debug:
+ print(
+ """
+ Exit code: {}
+ Exception: {}
+ Domain: {}
+ Reason: {}
+ """.format(
+ self.exit_code,
+ self.exception,
+ self.exception.domain,
+ self.exception.reason
+ ))
+ assert self.exit_code == -1, fail_message
+ assert self.exc is not None, fail_message
+ assert self.exception is not None, fail_message
+ assert isinstance(self.exception, BstError), fail_message
+ assert self.unhandled_exception is False
+
+ assert self.exception.domain == error_domain, fail_message
+ assert self.exception.reason == error_reason, fail_message
+
+ # assert_task_error()
+ #
+ # Asserts that the buildstream session failed, and that
+ # the child task error which caused buildstream to exit
+ # is as expected.
+ #
+ # Args:
+ # error_domain (ErrorDomain): The domain of the error which occurred
+ # error_reason (any): The reason field of the error which occurred
+ # fail_message (str): An optional message to override the automatic
+ # assertion error messages
+ # Raises:
+ # (AssertionError): If any of the assertions fail
+ #
+ def assert_task_error(self,
+ error_domain,
+ error_reason,
+ fail_message=''):
+
+ assert self.exit_code == -1, fail_message
+ assert self.exc is not None, fail_message
+ assert self.exception is not None, fail_message
+ assert isinstance(self.exception, BstError), fail_message
+ assert self.unhandled_exception is False
+
+ assert self.task_error_domain == error_domain, fail_message
+ assert self.task_error_reason == error_reason, fail_message
+
+ # assert_shell_error()
+ #
+ # Asserts that the buildstream created a shell and that the task in the
+ # shell failed.
+ #
+ # Args:
+ # fail_message (str): An optional message to override the automatic
+ # assertion error messages
+ # Raises:
+ # (AssertionError): If any of the assertions fail
+ #
+ def assert_shell_error(self, fail_message=''):
+ assert self.exit_code == 1, fail_message
+
+ # get_start_order()
+ #
+ # Gets the list of elements processed in a given queue, in the
+ # order of their first appearances in the session.
+ #
+ # Args:
+ # activity (str): The queue activity name (like 'fetch')
+ #
+ # Returns:
+ # (list): A list of element names in the order which they first appeared in the result
+ #
+ def get_start_order(self, activity):
+ results = re.findall(r'\[\s*{}:(\S+)\s*\]\s*START\s*.*\.log'.format(activity), self.stderr)
+ if results is None:
+ return []
+ return list(results)
+
+ # get_tracked_elements()
+ #
+ # Produces a list of element names on which tracking occurred
+ # during the session.
+ #
+ # This is done by parsing the buildstream stderr log
+ #
+ # Returns:
+ # (list): A list of element names
+ #
+ def get_tracked_elements(self):
+ tracked = re.findall(r'\[\s*track:(\S+)\s*]', self.stderr)
+ if tracked is None:
+ return []
+
+ return list(tracked)
+
+ def get_pushed_elements(self):
+ pushed = re.findall(r'\[\s*push:(\S+)\s*\]\s*INFO\s*Pushed artifact', self.stderr)
+ if pushed is None:
+ return []
+
+ return list(pushed)
+
+ def get_pulled_elements(self):
+ pulled = re.findall(r'\[\s*pull:(\S+)\s*\]\s*INFO\s*Pulled artifact', self.stderr)
+ if pulled is None:
+ return []
+
+ return list(pulled)
+
+
+class Cli():
+
+ def __init__(self, directory, verbose=True, default_options=None):
+ self.directory = directory
+ self.config = None
+ self.verbose = verbose
+ self.artifact = TestArtifact()
+
+ if default_options is None:
+ default_options = []
+
+ self.default_options = default_options
+
+ # configure():
+ #
+ # Serializes a user configuration into a buildstream.conf
+ # to use for this test cli.
+ #
+ # Args:
+ # config (dict): The user configuration to use
+ #
+ def configure(self, config):
+ if self.config is None:
+ self.config = {}
+
+ for key, val in config.items():
+ self.config[key] = val
+
+ # remove_artifact_from_cache():
+ #
+ # Remove given element artifact from artifact cache
+ #
+ # Args:
+ # project (str): The project path under test
+ # element_name (str): The name of the element artifact
+ # cache_dir (str): Specific cache dir to remove artifact from
+ #
+ def remove_artifact_from_cache(self, project, element_name,
+ *, cache_dir=None):
+ # Read configuration to figure out where artifacts are stored
+ if not cache_dir:
+ default = os.path.join(project, 'cache')
+
+ if self.config is not None:
+ cache_dir = self.config.get('cachedir', default)
+ else:
+ cache_dir = default
+
+ self.artifact.remove_artifact_from_cache(cache_dir, element_name)
+
+ # run():
+ #
+ # Runs buildstream with the given arguments, additionally
+ # also passes some global options to buildstream in order
+ # to stay contained in the testing environment.
+ #
+ # Args:
+ # configure (bool): Whether to pass a --config argument
+ # project (str): An optional path to a project
+ # silent (bool): Whether to pass --no-verbose
+ # env (dict): Environment variables to temporarily set during the test
+ # args (list): A list of arguments to pass buildstream
+ # binary_capture (bool): Whether to capture the stdout/stderr as binary
+ #
+ def run(self, configure=True, project=None, silent=False, env=None,
+ cwd=None, options=None, args=None, binary_capture=False):
+ if args is None:
+ args = []
+ if options is None:
+ options = []
+
+ # We may have been passed e.g. pathlib.Path or py.path
+ args = [str(x) for x in args]
+ project = str(project)
+
+ options = self.default_options + options
+
+ with ExitStack() as stack:
+ bst_args = ['--no-colors']
+
+ if silent:
+ bst_args += ['--no-verbose']
+
+ if configure:
+ config_file = stack.enter_context(
+ configured(self.directory, self.config)
+ )
+ bst_args += ['--config', config_file]
+
+ if project:
+ bst_args += ['--directory', project]
+
+ for option, value in options:
+ bst_args += ['--option', option, value]
+
+ bst_args += args
+
+ if cwd is not None:
+ stack.enter_context(chdir(cwd))
+
+ if env is not None:
+ stack.enter_context(environment(env))
+
+ # Ensure we have a working stdout - required to work
+ # around a bug that appears to cause AIX to close
+ # sys.__stdout__ after setup.py
+ try:
+ sys.__stdout__.fileno()
+ except ValueError:
+ sys.__stdout__ = open('/dev/stdout', 'w')
+
+ result = self._invoke(bst_cli, bst_args, binary_capture=binary_capture)
+
+ # Some informative stdout we can observe when anything fails
+ if self.verbose:
+ command = "bst " + " ".join(bst_args)
+ print("BuildStream exited with code {} for invocation:\n\t{}"
+ .format(result.exit_code, command))
+ if result.output:
+ print("Program output was:\n{}".format(result.output))
+ if result.stderr:
+ print("Program stderr was:\n{}".format(result.stderr))
+
+ if result.exc_info and result.exc_info[0] != SystemExit:
+ traceback.print_exception(*result.exc_info)
+
+ return result
+
+ def _invoke(self, cli_object, args=None, binary_capture=False):
+ exc_info = None
+ exception = None
+ exit_code = 0
+
+ # Temporarily redirect sys.stdin to /dev/null to ensure that
+ # Popen doesn't attempt to read pytest's dummy stdin.
+ old_stdin = sys.stdin
+ with open(os.devnull) as devnull:
+ sys.stdin = devnull
+ capture_kind = FDCaptureBinary if binary_capture else FDCapture
+ capture = MultiCapture(out=True, err=True, in_=False, Capture=capture_kind)
+ capture.start_capturing()
+
+ try:
+ cli_object.main(args=args or (), prog_name=cli_object.name)
+ except SystemExit as e:
+ if e.code != 0:
+ exception = e
+
+ exc_info = sys.exc_info()
+
+ exit_code = e.code
+ if not isinstance(exit_code, int):
+ sys.stdout.write('Program exit code was not an integer: ')
+ sys.stdout.write(str(exit_code))
+ sys.stdout.write('\n')
+ exit_code = 1
+ except Exception as e: # pylint: disable=broad-except
+ exception = e
+ exit_code = -1
+ exc_info = sys.exc_info()
+ finally:
+ sys.stdout.flush()
+
+ sys.stdin = old_stdin
+ out, err = capture.readouterr()
+ capture.stop_capturing()
+
+ return Result(exit_code=exit_code,
+ exception=exception,
+ exc_info=exc_info,
+ output=out,
+ stderr=err)
+
+ # Fetch an element state by name by
+ # invoking bst show on the project with the CLI
+ #
+ # If you need to get the states of multiple elements,
+ # then use get_element_states(s) instead.
+ #
+ def get_element_state(self, project, element_name):
+ result = self.run(project=project, silent=True, args=[
+ 'show',
+ '--deps', 'none',
+ '--format', '%{state}',
+ element_name
+ ])
+ result.assert_success()
+ return result.output.strip()
+
+ # Fetch the states of elements for a given target / deps
+ #
+ # Returns a dictionary with the element names as keys
+ #
+ def get_element_states(self, project, targets, deps='all'):
+ result = self.run(project=project, silent=True, args=[
+ 'show',
+ '--deps', deps,
+ '--format', '%{name}||%{state}',
+ *targets
+ ])
+ result.assert_success()
+ lines = result.output.splitlines()
+ states = {}
+ for line in lines:
+ split = line.split(sep='||')
+ states[split[0]] = split[1]
+ return states
+
+ # Fetch an element's cache key by invoking bst show
+ # on the project with the CLI
+ #
+ def get_element_key(self, project, element_name):
+ result = self.run(project=project, silent=True, args=[
+ 'show',
+ '--deps', 'none',
+ '--format', '%{full-key}',
+ element_name
+ ])
+ result.assert_success()
+ return result.output.strip()
+
+ # Get the decoded config of an element.
+ #
+ def get_element_config(self, project, element_name):
+ result = self.run(project=project, silent=True, args=[
+ 'show',
+ '--deps', 'none',
+ '--format', '%{config}',
+ element_name
+ ])
+
+ result.assert_success()
+ return yaml.safe_load(result.output)
+
+ # Fetch the elements that would be in the pipeline with the given
+ # arguments.
+ #
+ def get_pipeline(self, project, elements, except_=None, scope='plan'):
+ if except_ is None:
+ except_ = []
+
+ args = ['show', '--deps', scope, '--format', '%{name}']
+ args += list(itertools.chain.from_iterable(zip(itertools.repeat('--except'), except_)))
+
+ result = self.run(project=project, silent=True, args=args + elements)
+ result.assert_success()
+ return result.output.splitlines()
+
+ # Fetch an element's complete artifact name, cache_key will be generated
+ # if not given.
+ #
+ def get_artifact_name(self, project, project_name, element_name, cache_key=None):
+ if not cache_key:
+ cache_key = self.get_element_key(project, element_name)
+
+ # Replace path separator and chop off the .bst suffix for normal name
+ normal_name = _get_normal_name(element_name)
+ return _compose_artifact_name(project_name, normal_name, cache_key)
+
+
+class CliIntegration(Cli):
+
+ # run()
+ #
+ # This supports the same arguments as Cli.run() and additionally
+ # it supports the project_config keyword argument.
+ #
+ # This will first load the project.conf file from the specified
+ # project directory ('project' keyword argument) and perform substitutions
+ # of any {project_dir} specified in the existing project.conf.
+ #
+ # If the project_config parameter is specified, it is expected to
+ # be a dictionary of additional project configuration options, and
+ # will be composited on top of the already loaded project.conf
+ #
+ def run(self, *args, project_config=None, **kwargs):
+
+ # First load the project.conf and substitute {project_dir}
+ #
+ # Save the original project.conf, because we will run more than
+ # once in the same temp directory
+ #
+ project_directory = kwargs['project']
+ project_filename = os.path.join(project_directory, 'project.conf')
+ project_backup = os.path.join(project_directory, 'project.conf.backup')
+ project_load_filename = project_filename
+
+ if not os.path.exists(project_backup):
+ shutil.copy(project_filename, project_backup)
+ else:
+ project_load_filename = project_backup
+
+ with open(project_load_filename) as f:
+ config = f.read()
+ config = config.format(project_dir=project_directory)
+
+ if project_config is not None:
+
+ # If a custom project configuration dictionary was
+ # specified, composite it on top of the already
+ # substituted base project configuration
+ #
+ base_config = _yaml.load_data(config)
+
+ # In order to leverage _yaml.composite_dict(), both
+ # dictionaries need to be loaded via _yaml.load_data() first
+ #
+ with tempfile.TemporaryDirectory(dir=project_directory) as scratchdir:
+
+ temp_project = os.path.join(scratchdir, 'project.conf')
+ with open(temp_project, 'w') as f:
+ yaml.safe_dump(project_config, f)
+
+ project_config = _yaml.load(temp_project)
+
+ _yaml.composite_dict(base_config, project_config)
+
+ base_config = _yaml.node_sanitize(base_config)
+ _yaml.dump(base_config, project_filename)
+
+ else:
+
+ # Otherwise, just dump it as is
+ with open(project_filename, 'w') as f:
+ f.write(config)
+
+ return super().run(*args, **kwargs)
+
+
+class CliRemote(CliIntegration):
+
+ # ensure_services():
+ #
+ # Make sure that required services are configured and that
+ # non-required ones are not.
+ #
+ # Args:
+ # actions (bool): Whether to use the 'action-cache' service
+ # artifacts (bool): Whether to use the 'artifact-cache' service
+ # execution (bool): Whether to use the 'execution' service
+ # sources (bool): Whether to use the 'source-cache' service
+ # storage (bool): Whether to use the 'storage' service
+ #
+ # Returns a list of configured services (by names).
+ #
+ def ensure_services(self, actions=True, execution=True, storage=True,
+ artifacts=False, sources=False):
+ # Build a list of configured services by name:
+ configured_services = []
+ if not self.config:
+ return configured_services
+
+ if 'remote-execution' in self.config:
+ rexec_config = self.config['remote-execution']
+
+ if 'action-cache-service' in rexec_config:
+ if actions:
+ configured_services.append('action-cache')
+ else:
+ rexec_config.pop('action-cache-service')
+
+ if 'execution-service' in rexec_config:
+ if execution:
+ configured_services.append('execution')
+ else:
+ rexec_config.pop('execution-service')
+
+ if 'storage-service' in rexec_config:
+ if storage:
+ configured_services.append('storage')
+ else:
+ rexec_config.pop('storage-service')
+
+ if 'artifacts' in self.config:
+ if artifacts:
+ configured_services.append('artifact-cache')
+ else:
+ self.config.pop('artifacts')
+
+ if 'source-caches' in self.config:
+ if sources:
+ configured_services.append('source-cache')
+ else:
+ self.config.pop('source-caches')
+
+ return configured_services
+
+
+class TestArtifact():
+
+ # remove_artifact_from_cache():
+ #
+ # Remove given element artifact from artifact cache
+ #
+ # Args:
+ # cache_dir (str): Specific cache dir to remove artifact from
+ # element_name (str): The name of the element artifact
+ #
+ def remove_artifact_from_cache(self, cache_dir, element_name):
+
+ cache_dir = os.path.join(cache_dir, 'artifacts', 'refs')
+
+ normal_name = element_name.replace(os.sep, '-')
+ cache_dir = os.path.splitext(os.path.join(cache_dir, 'test', normal_name))[0]
+ shutil.rmtree(cache_dir)
+
+ # is_cached():
+ #
+ # Check if given element has a cached artifact
+ #
+ # Args:
+ # cache_dir (str): Specific cache dir to check
+ # element (Element): The element object
+ # element_key (str): The element's cache key
+ #
+ # Returns:
+ # (bool): If the cache contains the element's artifact
+ #
+ def is_cached(self, cache_dir, element, element_key):
+
+ # cas = CASCache(str(cache_dir))
+ artifact_ref = element.get_artifact_name(element_key)
+ return os.path.exists(os.path.join(cache_dir, 'artifacts', 'refs', artifact_ref))
+
+ # get_digest():
+ #
+ # Get the digest for a given element's artifact files
+ #
+ # Args:
+ # cache_dir (str): Specific cache dir to check
+ # element (Element): The element object
+ # element_key (str): The element's cache key
+ #
+ # Returns:
+ # (Digest): The digest stored in the ref
+ #
+ def get_digest(self, cache_dir, element, element_key):
+
+ artifact_ref = element.get_artifact_name(element_key)
+ artifact_dir = os.path.join(cache_dir, 'artifacts', 'refs')
+ artifact_proto = artifact_pb2.Artifact()
+ with open(os.path.join(artifact_dir, artifact_ref), 'rb') as f:
+ artifact_proto.ParseFromString(f.read())
+ return artifact_proto.files
+
+ # extract_buildtree():
+ #
+ # Context manager for extracting an elements artifact buildtree for
+ # inspection.
+ #
+ # Args:
+ # tmpdir (LocalPath): pytest fixture for the tests tmp dir
+ # digest (Digest): The element directory digest to extract
+ #
+ # Yields:
+ # (str): path to extracted buildtree directory, does not guarantee
+ # existence.
+ @contextmanager
+ def extract_buildtree(self, cache_dir, tmpdir, ref):
+ artifact = artifact_pb2.Artifact()
+ try:
+ with open(os.path.join(cache_dir, 'artifacts', 'refs', ref), 'rb') as f:
+ artifact.ParseFromString(f.read())
+ except FileNotFoundError:
+ yield None
+ else:
+ if str(artifact.buildtree):
+ with self._extract_subdirectory(tmpdir, artifact.buildtree) as f:
+ yield f
+ else:
+ yield None
+
+ # _extract_subdirectory():
+ #
+ # Context manager for extracting an element artifact for inspection,
+ # providing an expected path for a given subdirectory
+ #
+ # Args:
+ # tmpdir (LocalPath): pytest fixture for the tests tmp dir
+ # digest (Digest): The element directory digest to extract
+ # subdir (str): Subdirectory to path
+ #
+ # Yields:
+ # (str): path to extracted subdir directory, does not guarantee
+ # existence.
+ @contextmanager
+ def _extract_subdirectory(self, tmpdir, digest):
+ with tempfile.TemporaryDirectory() as extractdir:
+ try:
+ cas = CASCache(str(tmpdir))
+ cas.checkout(extractdir, digest)
+ yield extractdir
+ except FileNotFoundError:
+ yield None
+
+
+# Main fixture
+#
+# Use result = cli.run([arg1, arg2]) to run buildstream commands
+#
+@pytest.fixture()
+def cli(tmpdir):
+ directory = os.path.join(str(tmpdir), 'cache')
+ os.makedirs(directory)
+ return Cli(directory)
+
+
+# A variant of the main fixture that keeps persistent artifact and
+# source caches.
+#
+# It also does not use the click test runner to avoid deadlock issues
+# when running `bst shell`, but unfortunately cannot produce nice
+# stacktraces.
+@pytest.fixture()
+def cli_integration(tmpdir, integration_cache):
+ directory = os.path.join(str(tmpdir), 'cache')
+ os.makedirs(directory)
+
+ if os.environ.get('BST_FORCE_BACKEND') == 'unix':
+ fixture = CliIntegration(directory, default_options=[('linux', 'False')])
+ else:
+ fixture = CliIntegration(directory)
+
+ # We want to cache sources for integration tests more permanently,
+ # to avoid downloading the huge base-sdk repeatedly
+ fixture.configure({
+ 'cachedir': integration_cache.cachedir,
+ 'sourcedir': integration_cache.sources,
+ })
+
+ yield fixture
+
+ # remove following folders if necessary
+ try:
+ shutil.rmtree(os.path.join(integration_cache.cachedir, 'build'))
+ except FileNotFoundError:
+ pass
+ try:
+ shutil.rmtree(os.path.join(integration_cache.cachedir, 'tmp'))
+ except FileNotFoundError:
+ pass
+
+
+# A variant of the main fixture that is configured for remote-execution.
+#
+# It also does not use the click test runner to avoid deadlock issues
+# when running `bst shell`, but unfortunately cannot produce nice
+# stacktraces.
+@pytest.fixture()
+def cli_remote_execution(tmpdir, remote_services):
+ directory = os.path.join(str(tmpdir), 'cache')
+ os.makedirs(directory)
+
+ fixture = CliRemote(directory)
+
+ if remote_services.artifact_service:
+ fixture.configure({'artifacts': [{
+ 'url': remote_services.artifact_service,
+ }]})
+
+ remote_execution = {}
+ if remote_services.action_service:
+ remote_execution['action-cache-service'] = {
+ 'url': remote_services.action_service,
+ }
+ if remote_services.exec_service:
+ remote_execution['execution-service'] = {
+ 'url': remote_services.exec_service,
+ }
+ if remote_services.storage_service:
+ remote_execution['storage-service'] = {
+ 'url': remote_services.storage_service,
+ }
+ if remote_execution:
+ fixture.configure({'remote-execution': remote_execution})
+
+ if remote_services.source_service:
+ fixture.configure({'source-caches': [{
+ 'url': remote_services.source_service,
+ }]})
+
+ return fixture
+
+
+@contextmanager
+def chdir(directory):
+ old_dir = os.getcwd()
+ os.chdir(directory)
+ yield
+ os.chdir(old_dir)
+
+
+@contextmanager
+def environment(env):
+
+ old_env = {}
+ for key, value in env.items():
+ old_env[key] = os.environ.get(key)
+ if value is None:
+ os.environ.pop(key, None)
+ else:
+ os.environ[key] = value
+
+ yield
+
+ for key, value in old_env.items():
+ if value is None:
+ os.environ.pop(key, None)
+ else:
+ os.environ[key] = value
+
+
+@contextmanager
+def configured(directory, config=None):
+
+ # Ensure we've at least relocated the caches to a temp directory
+ if not config:
+ config = {}
+
+ if not config.get('sourcedir', False):
+ config['sourcedir'] = os.path.join(directory, 'sources')
+ if not config.get('cachedir', False):
+ config['cachedir'] = directory
+ if not config.get('logdir', False):
+ config['logdir'] = os.path.join(directory, 'logs')
+
+ # Dump it and yield the filename for test scripts to feed it
+ # to buildstream as an artument
+ filename = os.path.join(directory, "buildstream.conf")
+ _yaml.dump(config, filename)
+
+ yield filename
diff --git a/src/buildstream/types.py b/src/buildstream/types.py
new file mode 100644
index 000000000..d54bf0b6e
--- /dev/null
+++ b/src/buildstream/types.py
@@ -0,0 +1,177 @@
+#
+# Copyright (C) 2018 Bloomberg LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+# Jim MacArthur <jim.macarthur@codethink.co.uk>
+# Benjamin Schubert <bschubert15@bloomberg.net>
+
+"""
+Foundation types
+================
+
+"""
+
+from enum import Enum
+import heapq
+
+
+class Scope(Enum):
+ """Defines the scope of dependencies to include for a given element
+ when iterating over the dependency graph in APIs like
+ :func:`Element.dependencies() <buildstream.element.Element.dependencies>`
+ """
+
+ ALL = 1
+ """All elements which the given element depends on, following
+ all elements required for building. Including the element itself.
+ """
+
+ BUILD = 2
+ """All elements required for building the element, including their
+ respective run dependencies. Not including the given element itself.
+ """
+
+ RUN = 3
+ """All elements required for running the element. Including the element
+ itself.
+ """
+
+ NONE = 4
+ """Just the element itself, no dependencies.
+
+ *Since: 1.4*
+ """
+
+
+class Consistency():
+ """Defines the various consistency states of a :class:`.Source`.
+ """
+
+ INCONSISTENT = 0
+ """Inconsistent
+
+ Inconsistent sources have no explicit reference set. They cannot
+ produce a cache key, be fetched or staged. They can only be tracked.
+ """
+
+ RESOLVED = 1
+ """Resolved
+
+ Resolved sources have a reference and can produce a cache key and
+ be fetched, however they cannot be staged.
+ """
+
+ CACHED = 2
+ """Cached
+
+ Sources have a cached unstaged copy in the source directory.
+ """
+
+
+class CoreWarnings():
+ """CoreWarnings()
+
+ Some common warnings which are raised by core functionalities within BuildStream are found in this class.
+ """
+
+ OVERLAPS = "overlaps"
+ """
+ This warning will be produced when buildstream detects an overlap on an element
+ which is not whitelisted. See :ref:`Overlap Whitelist <public_overlap_whitelist>`
+ """
+
+ REF_NOT_IN_TRACK = "ref-not-in-track"
+ """
+ This warning will be produced when a source is configured with a reference
+ which is found to be invalid based on the configured track
+ """
+
+ BAD_ELEMENT_SUFFIX = "bad-element-suffix"
+ """
+ This warning will be produced when an element whose name does not end in .bst
+ is referenced either on the command line or by another element
+ """
+
+ BAD_CHARACTERS_IN_NAME = "bad-characters-in-name"
+ """
+ This warning will be produces when filename for a target contains invalid
+ characters in its name.
+ """
+
+
+# _KeyStrength():
+#
+# Strength of cache key
+#
+class _KeyStrength(Enum):
+
+ # Includes strong cache keys of all build dependencies and their
+ # runtime dependencies.
+ STRONG = 1
+
+ # Includes names of direct build dependencies but does not include
+ # cache keys of dependencies.
+ WEAK = 2
+
+
+# _UniquePriorityQueue():
+#
+# Implements a priority queue that adds only each key once.
+#
+# The queue will store and priority based on a tuple (key, item).
+#
+class _UniquePriorityQueue:
+
+ def __init__(self):
+ self._items = set()
+ self._heap = []
+
+ # push():
+ #
+ # Push a new item in the queue.
+ #
+ # If the item is already present in the queue as identified by the key,
+ # this is a noop.
+ #
+ # Args:
+ # key (hashable, comparable): unique key to use for checking for
+ # the object's existence and used for
+ # ordering
+ # item (any): item to push to the queue
+ #
+ def push(self, key, item):
+ if key not in self._items:
+ self._items.add(key)
+ heapq.heappush(self._heap, (key, item))
+
+ # pop():
+ #
+ # Pop the next item from the queue, by priority order.
+ #
+ # Returns:
+ # (any): the next item
+ #
+ # Throw:
+ # IndexError: when the list is empty
+ #
+ def pop(self):
+ key, item = heapq.heappop(self._heap)
+ self._items.remove(key)
+ return item
+
+ def __len__(self):
+ return len(self._heap)
diff --git a/src/buildstream/utils.py b/src/buildstream/utils.py
new file mode 100644
index 000000000..ade593750
--- /dev/null
+++ b/src/buildstream/utils.py
@@ -0,0 +1,1293 @@
+#
+# Copyright (C) 2016-2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+"""
+Utilities
+=========
+"""
+
+import calendar
+import errno
+import hashlib
+import os
+import re
+import shutil
+import signal
+import stat
+from stat import S_ISDIR
+import string
+import subprocess
+import tempfile
+import itertools
+from contextlib import contextmanager
+
+import psutil
+
+from . import _signals
+from ._exceptions import BstError, ErrorDomain
+from ._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
+
+# The magic number for timestamps: 2011-11-11 11:11:11
+_magic_timestamp = calendar.timegm([2011, 11, 11, 11, 11, 11])
+
+
+# The separator we use for user specified aliases
+_ALIAS_SEPARATOR = ':'
+_URI_SCHEMES = ["http", "https", "ftp", "file", "git", "sftp", "ssh"]
+
+
+class UtilError(BstError):
+ """Raised by utility functions when system calls fail.
+
+ This will be handled internally by the BuildStream core,
+ if you need to handle this error, then it should be reraised,
+ or either of the :class:`.ElementError` or :class:`.SourceError`
+ exceptions should be raised from this error.
+ """
+ def __init__(self, message, reason=None):
+ super().__init__(message, domain=ErrorDomain.UTIL, reason=reason)
+
+
+class ProgramNotFoundError(BstError):
+ """Raised if a required program is not found.
+
+ It is normally unneeded to handle this exception from plugin code.
+ """
+ def __init__(self, message, reason=None):
+ super().__init__(message, domain=ErrorDomain.PROG_NOT_FOUND, reason=reason)
+
+
+class DirectoryExistsError(OSError):
+ """Raised when a `os.rename` is attempted but the destination is an existing directory.
+ """
+
+
+class FileListResult():
+ """An object which stores the result of one of the operations
+ which run on a list of files.
+ """
+
+ def __init__(self):
+
+ self.overwritten = []
+ """List of files which were overwritten in the target directory"""
+
+ self.ignored = []
+ """List of files which were ignored, because they would have
+ replaced a non empty directory"""
+
+ self.failed_attributes = []
+ """List of files for which attributes could not be copied over"""
+
+ self.files_written = []
+ """List of files that were written."""
+
+ def combine(self, other):
+ """Create a new FileListResult that contains the results of both.
+ """
+ ret = FileListResult()
+
+ ret.overwritten = self.overwritten + other.overwritten
+ ret.ignored = self.ignored + other.ignored
+ ret.failed_attributes = self.failed_attributes + other.failed_attributes
+ ret.files_written = self.files_written + other.files_written
+
+ return ret
+
+
+def list_relative_paths(directory):
+ """A generator for walking directory relative paths
+
+ This generator is useful for checking the full manifest of
+ a directory.
+
+ Symbolic links will not be followed, but will be included
+ in the manifest.
+
+ Args:
+ directory (str): The directory to list files in
+
+ Yields:
+ Relative filenames in `directory`
+ """
+ for (dirpath, dirnames, filenames) in os.walk(directory):
+
+ # os.walk does not decend into symlink directories, which
+ # makes sense because otherwise we might have redundant
+ # directories, or end up descending into directories outside
+ # of the walk() directory.
+ #
+ # But symlinks to directories are still identified as
+ # subdirectories in the walked `dirpath`, so we extract
+ # these symlinks from `dirnames` and add them to `filenames`.
+ #
+ for d in dirnames:
+ fullpath = os.path.join(dirpath, d)
+ if os.path.islink(fullpath):
+ filenames.append(d)
+
+ # Modifying the dirnames directly ensures that the os.walk() generator
+ # allows us to specify the order in which they will be iterated.
+ dirnames.sort()
+ filenames.sort()
+
+ relpath = os.path.relpath(dirpath, directory)
+
+ # We don't want "./" pre-pended to all the entries in the root of
+ # `directory`, prefer to have no prefix in that case.
+ basepath = relpath if relpath != '.' and dirpath != directory else ''
+
+ # First yield the walked directory itself, except for the root
+ if basepath != '':
+ yield basepath
+
+ # List the filenames in the walked directory
+ for f in filenames:
+ yield os.path.join(basepath, f)
+
+
+# pylint: disable=anomalous-backslash-in-string
+def glob(paths, pattern):
+ """A generator to yield paths which match the glob pattern
+
+ Args:
+ paths (iterable): The paths to check
+ pattern (str): A glob pattern
+
+ This generator will iterate over the passed *paths* and
+ yield only the filenames which matched the provided *pattern*.
+
+ +--------+------------------------------------------------------------------+
+ | Meta | Description |
+ +========+==================================================================+
+ | \* | Zero or more of any character, excepting path separators |
+ +--------+------------------------------------------------------------------+
+ | \** | Zero or more of any character, including path separators |
+ +--------+------------------------------------------------------------------+
+ | ? | One of any character, except for path separators |
+ +--------+------------------------------------------------------------------+
+ | [abc] | One of any of the specified characters |
+ +--------+------------------------------------------------------------------+
+ | [a-z] | One of the characters in the specified range |
+ +--------+------------------------------------------------------------------+
+ | [!abc] | Any single character, except the specified characters |
+ +--------+------------------------------------------------------------------+
+ | [!a-z] | Any single character, except those in the specified range |
+ +--------+------------------------------------------------------------------+
+
+ .. note::
+
+ Escaping of the metacharacters is not possible
+
+ """
+ # Ensure leading slash, just because we want patterns
+ # to match file lists regardless of whether the patterns
+ # or file lists had a leading slash or not.
+ if not pattern.startswith(os.sep):
+ pattern = os.sep + pattern
+
+ expression = _glob2re(pattern)
+ regexer = re.compile(expression)
+
+ for filename in paths:
+ filename_try = filename
+ if not filename_try.startswith(os.sep):
+ filename_try = os.sep + filename_try
+
+ if regexer.match(filename_try):
+ yield filename
+
+
+def sha256sum(filename):
+ """Calculate the sha256sum of a file
+
+ Args:
+ filename (str): A path to a file on disk
+
+ Returns:
+ (str): An sha256 checksum string
+
+ Raises:
+ UtilError: In the case there was an issue opening
+ or reading `filename`
+ """
+ try:
+ h = hashlib.sha256()
+ with open(filename, "rb") as f:
+ for chunk in iter(lambda: f.read(65536), b""):
+ h.update(chunk)
+
+ except OSError as e:
+ raise UtilError("Failed to get a checksum of file '{}': {}"
+ .format(filename, e)) from e
+
+ return h.hexdigest()
+
+
+def safe_copy(src, dest, *, result=None):
+ """Copy a file while preserving attributes
+
+ Args:
+ src (str): The source filename
+ dest (str): The destination filename
+ result (:class:`~.FileListResult`): An optional collective result
+
+ Raises:
+ UtilError: In the case of unexpected system call failures
+
+ This is almost the same as shutil.copy2(), except that
+ we unlink *dest* before overwriting it if it exists, just
+ incase *dest* is a hardlink to a different file.
+ """
+ # First unlink the target if it exists
+ try:
+ os.unlink(dest)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise UtilError("Failed to remove destination file '{}': {}"
+ .format(dest, e)) from e
+
+ shutil.copyfile(src, dest)
+ try:
+ shutil.copystat(src, dest)
+ except PermissionError:
+ # If we failed to copy over some file stats, dont treat
+ # it as an unrecoverable error, but provide some feedback
+ # we can use for a warning.
+ #
+ # This has a tendency of happening when attempting to copy
+ # over extended file attributes.
+ if result:
+ result.failed_attributes.append(dest)
+
+ except shutil.Error as e:
+ raise UtilError("Failed to copy '{} -> {}': {}"
+ .format(src, dest, e)) from e
+
+
+def safe_link(src, dest, *, result=None, _unlink=False):
+ """Try to create a hardlink, but resort to copying in the case of cross device links.
+
+ Args:
+ src (str): The source filename
+ dest (str): The destination filename
+ result (:class:`~.FileListResult`): An optional collective result
+
+ Raises:
+ UtilError: In the case of unexpected system call failures
+ """
+
+ if _unlink:
+ # First unlink the target if it exists
+ try:
+ os.unlink(dest)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise UtilError("Failed to remove destination file '{}': {}"
+ .format(dest, e)) from e
+
+ # If we can't link it due to cross-device hardlink, copy
+ try:
+ os.link(src, dest)
+ except OSError as e:
+ if e.errno == errno.EEXIST and not _unlink:
+ # Target exists already, unlink and try again
+ safe_link(src, dest, result=result, _unlink=True)
+ elif e.errno == errno.EXDEV:
+ safe_copy(src, dest)
+ else:
+ raise UtilError("Failed to link '{} -> {}': {}"
+ .format(src, dest, e)) from e
+
+
+def safe_remove(path):
+ """Removes a file or directory
+
+ This will remove a file if it exists, and will
+ remove a directory if the directory is empty.
+
+ Args:
+ path (str): The path to remove
+
+ Returns:
+ True if `path` was removed or did not exist, False
+ if `path` was a non empty directory.
+
+ Raises:
+ UtilError: In the case of unexpected system call failures
+ """
+ try:
+ if S_ISDIR(os.lstat(path).st_mode):
+ os.rmdir(path)
+ else:
+ os.unlink(path)
+
+ # File removed/unlinked successfully
+ return True
+
+ except OSError as e:
+ if e.errno == errno.ENOTEMPTY:
+ # Path is non-empty directory
+ return False
+ elif e.errno == errno.ENOENT:
+ # Path does not exist
+ return True
+
+ raise UtilError("Failed to remove '{}': {}"
+ .format(path, e))
+
+
+def copy_files(src, dest, *, filter_callback=None, ignore_missing=False, report_written=False):
+ """Copy files from source to destination.
+
+ Args:
+ src (str): The source file or directory
+ dest (str): The destination directory
+ filter_callback (callable): Optional filter callback. Called with the relative path as
+ argument for every file in the source directory. The file is
+ copied only if the callable returns True. If no filter callback
+ is specified, all files will be copied.
+ ignore_missing (bool): Dont raise any error if a source file is missing
+ report_written (bool): Add to the result object the full list of files written
+
+ Returns:
+ (:class:`~.FileListResult`): The result describing what happened during this file operation
+
+ Raises:
+ UtilError: In the case of unexpected system call failures
+
+ .. note::
+
+ Directories in `dest` are replaced with files from `src`,
+ unless the existing directory in `dest` is not empty in which
+ case the path will be reported in the return value.
+
+ UNIX domain socket files from `src` are ignored.
+ """
+ result = FileListResult()
+ try:
+ _process_list(src, dest, safe_copy, result,
+ filter_callback=filter_callback,
+ ignore_missing=ignore_missing,
+ report_written=report_written)
+ except OSError as e:
+ raise UtilError("Failed to copy '{} -> {}': {}"
+ .format(src, dest, e))
+ return result
+
+
+def link_files(src, dest, *, filter_callback=None, ignore_missing=False, report_written=False):
+ """Hardlink files from source to destination.
+
+ Args:
+ src (str): The source file or directory
+ dest (str): The destination directory
+ filter_callback (callable): Optional filter callback. Called with the relative path as
+ argument for every file in the source directory. The file is
+ hardlinked only if the callable returns True. If no filter
+ callback is specified, all files will be hardlinked.
+ ignore_missing (bool): Dont raise any error if a source file is missing
+ report_written (bool): Add to the result object the full list of files written
+
+ Returns:
+ (:class:`~.FileListResult`): The result describing what happened during this file operation
+
+ Raises:
+ UtilError: In the case of unexpected system call failures
+
+ .. note::
+
+ Directories in `dest` are replaced with files from `src`,
+ unless the existing directory in `dest` is not empty in which
+ case the path will be reported in the return value.
+
+ .. note::
+
+ If a hardlink cannot be created due to crossing filesystems,
+ then the file will be copied instead.
+
+ UNIX domain socket files from `src` are ignored.
+ """
+ result = FileListResult()
+ try:
+ _process_list(src, dest, safe_link, result,
+ filter_callback=filter_callback,
+ ignore_missing=ignore_missing,
+ report_written=report_written)
+ except OSError as e:
+ raise UtilError("Failed to link '{} -> {}': {}"
+ .format(src, dest, e))
+
+ return result
+
+
+def get_host_tool(name):
+ """Get the full path of a host tool
+
+ Args:
+ name (str): The name of the program to search for
+
+ Returns:
+ The full path to the program, if found
+
+ Raises:
+ :class:`.ProgramNotFoundError`
+ """
+ search_path = os.environ.get('PATH')
+ program_path = shutil.which(name, path=search_path)
+
+ if not program_path:
+ raise ProgramNotFoundError("Did not find '{}' in PATH: {}".format(name, search_path))
+
+ return program_path
+
+
+def url_directory_name(url):
+ """Normalizes a url into a directory name
+
+ Args:
+ url (str): A url string
+
+ Returns:
+ A string which can be used as a directory name
+ """
+ valid_chars = string.digits + string.ascii_letters + '%_'
+
+ def transl(x):
+ return x if x in valid_chars else '_'
+
+ return ''.join([transl(x) for x in url])
+
+
+def get_bst_version():
+ """Gets the major, minor release portion of the
+ BuildStream version.
+
+ Returns:
+ (int): The major version
+ (int): The minor version
+ """
+ # Import this only conditionally, it's not resolved at bash complete time
+ from . import __version__ # pylint: disable=cyclic-import
+ versions = __version__.split('.')[:2]
+
+ if versions[0] == '0+untagged':
+ raise UtilError("Your git repository has no tags - BuildStream can't "
+ "determine its version. Please run `git fetch --tags`.")
+
+ try:
+ return (int(versions[0]), int(versions[1]))
+ except IndexError:
+ raise UtilError("Cannot detect Major and Minor parts of the version\n"
+ "Version: {} not in XX.YY.whatever format"
+ .format(__version__))
+ except ValueError:
+ raise UtilError("Cannot convert version to integer numbers\n"
+ "Version: {} not in Integer.Integer.whatever format"
+ .format(__version__))
+
+
+def move_atomic(source, destination, *, ensure_parents=True):
+ """Move the source to the destination using atomic primitives.
+
+ This uses `os.rename` to move a file or directory to a new destination.
+ It wraps some `OSError` thrown errors to ensure their handling is correct.
+
+ The main reason for this to exist is that rename can throw different errors
+ for the same symptom (https://www.unix.com/man-page/POSIX/3posix/rename/)
+ when we are moving a directory.
+
+ We are especially interested here in the case when the destination already
+ exists, is a directory and is not empty. In this case, either EEXIST or
+ ENOTEMPTY can be thrown.
+
+ In order to ensure consistent handling of these exceptions, this function
+ should be used instead of `os.rename`
+
+ Args:
+ source (str or Path): source to rename
+ destination (str or Path): destination to which to move the source
+ ensure_parents (bool): Whether or not to create the parent's directories
+ of the destination (default: True)
+ Raises:
+ DirectoryExistsError: if the destination directory already exists and is
+ not empty
+ OSError: if another filesystem level error occured
+ """
+ if ensure_parents:
+ os.makedirs(os.path.dirname(str(destination)), exist_ok=True)
+
+ try:
+ os.rename(str(source), str(destination))
+ except OSError as exc:
+ if exc.errno in (errno.EEXIST, errno.ENOTEMPTY):
+ raise DirectoryExistsError(*exc.args) from exc
+ raise
+
+
+@contextmanager
+def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None,
+ errors=None, newline=None, closefd=True, opener=None, tempdir=None):
+ """Save a file with a temporary name and rename it into place when ready.
+
+ This is a context manager which is meant for saving data to files.
+ The data is written to a temporary file, which gets renamed to the target
+ name when the context is closed. This avoids readers of the file from
+ getting an incomplete file.
+
+ **Example:**
+
+ .. code:: python
+
+ with save_file_atomic('/path/to/foo', 'w') as f:
+ f.write(stuff)
+
+ The file will be called something like ``tmpCAFEBEEF`` until the
+ context block ends, at which point it gets renamed to ``foo``. The
+ temporary file will be created in the same directory as the output file.
+ The ``filename`` parameter must be an absolute path.
+
+ If an exception occurs or the process is terminated, the temporary file will
+ be deleted.
+ """
+ # This feature has been proposed for upstream Python in the past, e.g.:
+ # https://bugs.python.org/issue8604
+
+ assert os.path.isabs(filename), "The utils.save_file_atomic() parameter ``filename`` must be an absolute path"
+ if tempdir is None:
+ tempdir = os.path.dirname(filename)
+ fd, tempname = tempfile.mkstemp(dir=tempdir)
+ os.close(fd)
+
+ f = open(tempname, mode=mode, buffering=buffering, encoding=encoding,
+ errors=errors, newline=newline, closefd=closefd, opener=opener)
+
+ def cleanup_tempfile():
+ f.close()
+ try:
+ os.remove(tempname)
+ except FileNotFoundError:
+ pass
+ except OSError as e:
+ raise UtilError("Failed to cleanup temporary file {}: {}".format(tempname, e)) from e
+
+ try:
+ with _signals.terminator(cleanup_tempfile):
+ f.real_filename = filename
+ yield f
+ f.close()
+ # This operation is atomic, at least on platforms we care about:
+ # https://bugs.python.org/issue8828
+ os.replace(tempname, filename)
+ except Exception:
+ cleanup_tempfile()
+ raise
+
+
+# _get_dir_size():
+#
+# Get the disk usage of a given directory in bytes.
+#
+# This function assumes that files do not inadvertantly
+# disappear while this function is running.
+#
+# Arguments:
+# (str) The path whose size to check.
+#
+# Returns:
+# (int) The size on disk in bytes.
+#
+def _get_dir_size(path):
+ path = os.path.abspath(path)
+
+ def get_size(path):
+ total = 0
+
+ for f in os.scandir(path):
+ total += f.stat(follow_symlinks=False).st_size
+
+ if f.is_dir(follow_symlinks=False):
+ total += get_size(f.path)
+
+ return total
+
+ return get_size(path)
+
+
+# _get_volume_size():
+#
+# Gets the overall usage and total size of a mounted filesystem in bytes.
+#
+# Args:
+# path (str): The path to check
+#
+# Returns:
+# (int): The total number of bytes on the volume
+# (int): The number of available bytes on the volume
+#
+def _get_volume_size(path):
+ try:
+ stat_ = os.statvfs(path)
+ except OSError as e:
+ raise UtilError("Failed to retrieve stats on volume for path '{}': {}"
+ .format(path, e)) from e
+
+ return stat_.f_bsize * stat_.f_blocks, stat_.f_bsize * stat_.f_bavail
+
+
+# _parse_size():
+#
+# Convert a string representing data size to a number of
+# bytes. E.g. "2K" -> 2048.
+#
+# This uses the same format as systemd's
+# [resource-control](https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#).
+#
+# Arguments:
+# size (str) The string to parse
+# volume (str) A path on the volume to consider for percentage
+# specifications
+#
+# Returns:
+# (int|None) The number of bytes, or None if 'infinity' was specified.
+#
+# Raises:
+# UtilError if the string is not a valid data size.
+#
+def _parse_size(size, volume):
+ if size == 'infinity':
+ return None
+
+ matches = re.fullmatch(r'([0-9]+\.?[0-9]*)([KMGT%]?)', size)
+ if matches is None:
+ raise UtilError("{} is not a valid data size.".format(size))
+
+ num, unit = matches.groups()
+
+ if unit == '%':
+ num = float(num)
+ if num > 100:
+ raise UtilError("{}% is not a valid percentage value.".format(num))
+
+ disk_size, _ = _get_volume_size(volume)
+
+ return disk_size * (num / 100)
+
+ units = ('', 'K', 'M', 'G', 'T')
+ return int(num) * 1024**units.index(unit)
+
+
+# _pretty_size()
+#
+# Converts a number of bytes into a string representation in KiB, MiB, GiB, TiB
+# represented as K, M, G, T etc.
+#
+# Args:
+# size (int): The size to convert in bytes.
+# dec_places (int): The number of decimal places to output to.
+#
+# Returns:
+# (str): The string representation of the number of bytes in the largest
+def _pretty_size(size, dec_places=0):
+ psize = size
+ unit = 'B'
+ units = ('B', 'K', 'M', 'G', 'T')
+ for unit in units:
+ if psize < 1024:
+ break
+ elif unit != units[-1]:
+ psize /= 1024
+ return "{size:g}{unit}".format(size=round(psize, dec_places), unit=unit)
+
+
+# Main process pid
+_main_pid = os.getpid()
+
+
+# _is_main_process()
+#
+# Return whether we are in the main process or not.
+#
+def _is_main_process():
+ assert _main_pid is not None
+ return os.getpid() == _main_pid
+
+
+# Recursively remove directories, ignoring file permissions as much as
+# possible.
+def _force_rmtree(rootpath, **kwargs):
+ for root, dirs, _ in os.walk(rootpath):
+ for d in dirs:
+ path = os.path.join(root, d.lstrip('/'))
+ if os.path.exists(path) and not os.path.islink(path):
+ try:
+ os.chmod(path, 0o755)
+ except OSError as e:
+ raise UtilError("Failed to ensure write permission on file '{}': {}"
+ .format(path, e))
+
+ try:
+ shutil.rmtree(rootpath, **kwargs)
+ except OSError as e:
+ raise UtilError("Failed to remove cache directory '{}': {}"
+ .format(rootpath, e))
+
+
+# Recursively make directories in target area
+def _copy_directories(srcdir, destdir, target):
+ this_dir = os.path.dirname(target)
+ new_dir = os.path.join(destdir, this_dir)
+
+ if not os.path.lexists(new_dir):
+ if this_dir:
+ yield from _copy_directories(srcdir, destdir, this_dir)
+
+ old_dir = os.path.join(srcdir, this_dir)
+ if os.path.lexists(old_dir):
+ dir_stat = os.lstat(old_dir)
+ mode = dir_stat.st_mode
+
+ if stat.S_ISDIR(mode) or stat.S_ISLNK(mode):
+ os.makedirs(new_dir)
+ yield (new_dir, mode)
+ else:
+ raise UtilError('Source directory tree has file where '
+ 'directory expected: {}'.format(old_dir))
+
+
+# _ensure_real_directory()
+#
+# Ensure `path` is a real directory and there are no symlink components.
+#
+# Symlink components are allowed in `root`.
+#
+def _ensure_real_directory(root, path):
+ destpath = root
+ for name in os.path.split(path):
+ destpath = os.path.join(destpath, name)
+ try:
+ deststat = os.lstat(destpath)
+ if not stat.S_ISDIR(deststat.st_mode):
+ relpath = destpath[len(root):]
+
+ if stat.S_ISLNK(deststat.st_mode):
+ filetype = 'symlink'
+ elif stat.S_ISREG(deststat.st_mode):
+ filetype = 'regular file'
+ else:
+ filetype = 'special file'
+
+ raise UtilError('Destination is a {}, not a directory: {}'.format(filetype, relpath))
+ except FileNotFoundError:
+ os.makedirs(destpath)
+
+
+# _process_list()
+#
+# Internal helper for copying/moving/linking file lists
+#
+# This will handle directories, symlinks and special files
+# internally, the `actionfunc` will only be called for regular files.
+#
+# Args:
+# srcdir: The source base directory
+# destdir: The destination base directory
+# actionfunc: The function to call for regular files
+# result: The FileListResult
+# filter_callback: Optional callback to invoke for every directory entry
+# ignore_missing: Dont raise any error if a source file is missing
+#
+#
+def _process_list(srcdir, destdir, actionfunc, result,
+ filter_callback=None,
+ ignore_missing=False, report_written=False):
+
+ # Keep track of directory permissions, since these need to be set
+ # *after* files have been written.
+ permissions = []
+
+ filelist = list_relative_paths(srcdir)
+
+ if filter_callback:
+ filelist = [path for path in filelist if filter_callback(path)]
+
+ # Now walk the list
+ for path in filelist:
+ srcpath = os.path.join(srcdir, path)
+ destpath = os.path.join(destdir, path)
+
+ # Ensure that the parent of the destination path exists without symlink
+ # components.
+ _ensure_real_directory(destdir, os.path.dirname(path))
+
+ # Add to the results the list of files written
+ if report_written:
+ result.files_written.append(path)
+
+ # Collect overlaps
+ if os.path.lexists(destpath) and not os.path.isdir(destpath):
+ result.overwritten.append(path)
+
+ # The destination directory may not have been created separately
+ permissions.extend(_copy_directories(srcdir, destdir, path))
+
+ try:
+ file_stat = os.lstat(srcpath)
+ mode = file_stat.st_mode
+
+ except FileNotFoundError as e:
+ # Skip this missing file
+ if ignore_missing:
+ continue
+ else:
+ raise UtilError("Source file is missing: {}".format(srcpath)) from e
+
+ if stat.S_ISDIR(mode):
+ # Ensure directory exists in destination
+ _ensure_real_directory(destdir, path)
+ permissions.append((destpath, os.stat(srcpath).st_mode))
+
+ elif stat.S_ISLNK(mode):
+ if not safe_remove(destpath):
+ result.ignored.append(path)
+ continue
+
+ target = os.readlink(srcpath)
+ os.symlink(target, destpath)
+
+ elif stat.S_ISREG(mode):
+ # Process the file.
+ if not safe_remove(destpath):
+ result.ignored.append(path)
+ continue
+
+ actionfunc(srcpath, destpath, result=result)
+
+ elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode):
+ # Block or character device. Put contents of st_dev in a mknod.
+ if not safe_remove(destpath):
+ result.ignored.append(path)
+ continue
+
+ if os.path.lexists(destpath):
+ os.remove(destpath)
+ os.mknod(destpath, file_stat.st_mode, file_stat.st_rdev)
+ os.chmod(destpath, file_stat.st_mode)
+
+ elif stat.S_ISFIFO(mode):
+ os.mkfifo(destpath, mode)
+
+ elif stat.S_ISSOCK(mode):
+ # We can't duplicate the process serving the socket anyway
+ pass
+
+ else:
+ # Unsupported type.
+ raise UtilError('Cannot extract {} into staging-area. Unsupported type.'.format(srcpath))
+
+ # Write directory permissions now that all files have been written
+ for d, perms in permissions:
+ os.chmod(d, perms)
+
+
+# _set_deterministic_user()
+#
+# Set the uid/gid for every file in a directory tree to the process'
+# euid/guid.
+#
+# Args:
+# directory (str): The directory to recursively set the uid/gid on
+#
+def _set_deterministic_user(directory):
+ user = os.geteuid()
+ group = os.getegid()
+
+ for root, dirs, files in os.walk(directory.encode("utf-8"), topdown=False):
+ for filename in files:
+ os.chown(os.path.join(root, filename), user, group, follow_symlinks=False)
+
+ for dirname in dirs:
+ os.chown(os.path.join(root, dirname), user, group, follow_symlinks=False)
+
+
+# _set_deterministic_mtime()
+#
+# Set the mtime for every file in a directory tree to the same.
+#
+# Args:
+# directory (str): The directory to recursively set the mtime on
+#
+def _set_deterministic_mtime(directory):
+ for dirname, _, filenames in os.walk(directory.encode("utf-8"), topdown=False):
+ for filename in filenames:
+ pathname = os.path.join(dirname, filename)
+
+ # Python's os.utime only ever modifies the timestamp
+ # of the target, it is not acceptable to set the timestamp
+ # of the target here, if we are staging the link target we
+ # will also set its timestamp.
+ #
+ # We should however find a way to modify the actual link's
+ # timestamp, this outdated python bug report claims that
+ # it is impossible:
+ #
+ # http://bugs.python.org/issue623782
+ #
+ # However, nowadays it is possible at least on gnuish systems
+ # with with the lutimes glibc function.
+ if not os.path.islink(pathname):
+ os.utime(pathname, (_magic_timestamp, _magic_timestamp))
+
+ os.utime(dirname, (_magic_timestamp, _magic_timestamp))
+
+
+# _tempdir()
+#
+# A context manager for doing work in a temporary directory.
+#
+# Args:
+# dir (str): A path to a parent directory for the temporary directory
+# suffix (str): A suffix for the temproary directory name
+# prefix (str): A prefix for the temporary directory name
+#
+# Yields:
+# (str): The temporary directory
+#
+# In addition to the functionality provided by python's
+# tempfile.TemporaryDirectory() context manager, this one additionally
+# supports cleaning up the temp directory on SIGTERM.
+#
+@contextmanager
+def _tempdir(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin
+ tempdir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
+
+ def cleanup_tempdir():
+ if os.path.isdir(tempdir):
+ _force_rmtree(tempdir)
+
+ try:
+ with _signals.terminator(cleanup_tempdir):
+ yield tempdir
+ finally:
+ cleanup_tempdir()
+
+
+# _tempnamedfile()
+#
+# A context manager for doing work on an open temporary file
+# which is guaranteed to be named and have an entry in the filesystem.
+#
+# Args:
+# dir (str): A path to a parent directory for the temporary file
+# suffix (str): A suffix for the temproary file name
+# prefix (str): A prefix for the temporary file name
+#
+# Yields:
+# (str): The temporary file handle
+#
+# Do not use tempfile.NamedTemporaryFile() directly, as this will
+# leak files on the filesystem when BuildStream exits a process
+# on SIGTERM.
+#
+@contextmanager
+def _tempnamedfile(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin
+ temp = None
+
+ def close_tempfile():
+ if temp is not None:
+ temp.close()
+
+ with _signals.terminator(close_tempfile), \
+ tempfile.NamedTemporaryFile(suffix=suffix, prefix=prefix, dir=dir) as temp:
+ yield temp
+
+
+# _kill_process_tree()
+#
+# Brutally murder a process and all of its children
+#
+# Args:
+# pid (int): Process ID
+#
+def _kill_process_tree(pid):
+ proc = psutil.Process(pid)
+ children = proc.children(recursive=True)
+
+ def kill_proc(p):
+ try:
+ p.kill()
+ except psutil.AccessDenied:
+ # Ignore this error, it can happen with
+ # some setuid bwrap processes.
+ pass
+ except psutil.NoSuchProcess:
+ # It is certain that this has already been sent
+ # SIGTERM, so there is a window where the process
+ # could have exited already.
+ pass
+
+ # Bloody Murder
+ for child in children:
+ kill_proc(child)
+ kill_proc(proc)
+
+
+# _call()
+#
+# A wrapper for subprocess.call() supporting suspend and resume
+#
+# Args:
+# popenargs (list): Popen() arguments
+# terminate (bool): Whether to attempt graceful termination before killing
+# rest_of_args (kwargs): Remaining arguments to subprocess.call()
+#
+# Returns:
+# (int): The process exit code.
+# (str): The program output.
+#
+def _call(*popenargs, terminate=False, **kwargs):
+
+ kwargs['start_new_session'] = True
+
+ process = None
+
+ old_preexec_fn = kwargs.get('preexec_fn')
+ if 'preexec_fn' in kwargs:
+ del kwargs['preexec_fn']
+
+ def preexec_fn():
+ os.umask(stat.S_IWGRP | stat.S_IWOTH)
+ if old_preexec_fn is not None:
+ old_preexec_fn()
+
+ # Handle termination, suspend and resume
+ def kill_proc():
+ if process:
+
+ # Some callers know that their subprocess can be
+ # gracefully terminated, make an attempt first
+ if terminate:
+ proc = psutil.Process(process.pid)
+ proc.terminate()
+
+ try:
+ proc.wait(20)
+ except psutil.TimeoutExpired:
+ # Did not terminate within the timeout: murder
+ _kill_process_tree(process.pid)
+
+ else:
+ # FIXME: This is a brutal but reliable approach
+ #
+ # Other variations I've tried which try SIGTERM first
+ # and then wait for child processes to exit gracefully
+ # have not reliably cleaned up process trees and have
+ # left orphaned git or ssh processes alive.
+ #
+ # This cleans up the subprocesses reliably but may
+ # cause side effects such as possibly leaving stale
+ # locks behind. Hopefully this should not be an issue
+ # as long as any child processes only interact with
+ # the temp directories which we control and cleanup
+ # ourselves.
+ #
+ _kill_process_tree(process.pid)
+
+ def suspend_proc():
+ if process:
+ group_id = os.getpgid(process.pid)
+ os.killpg(group_id, signal.SIGSTOP)
+
+ def resume_proc():
+ if process:
+ group_id = os.getpgid(process.pid)
+ os.killpg(group_id, signal.SIGCONT)
+
+ with _signals.suspendable(suspend_proc, resume_proc), _signals.terminator(kill_proc):
+ process = subprocess.Popen( # pylint: disable=subprocess-popen-preexec-fn
+ *popenargs, preexec_fn=preexec_fn, universal_newlines=True, **kwargs)
+ output, _ = process.communicate()
+ exit_code = process.poll()
+
+ return (exit_code, output)
+
+
+# _glob2re()
+#
+# Function to translate a glob style pattern into a regex
+#
+# Args:
+# pat (str): The glob pattern
+#
+# This is a modified version of the python standard library's
+# fnmatch.translate() function which supports path like globbing
+# a bit more correctly, and additionally supports recursive glob
+# patterns with double asterisk.
+#
+# Note that this will only support the most basic of standard
+# glob patterns, and additionally the recursive double asterisk.
+#
+# Support includes:
+#
+# * Match any pattern except a path separator
+# ** Match any pattern, including path separators
+# ? Match any single character
+# [abc] Match one of the specified characters
+# [A-Z] Match one of the characters in the specified range
+# [!abc] Match any single character, except the specified characters
+# [!A-Z] Match any single character, except those in the specified range
+#
+def _glob2re(pat):
+ i, n = 0, len(pat)
+ res = '(?ms)'
+ while i < n:
+ c = pat[i]
+ i = i + 1
+ if c == '*':
+ # fnmatch.translate() simply uses the '.*' separator here,
+ # we only want that for double asterisk (bash 'globstar' behavior)
+ #
+ if i < n and pat[i] == '*':
+ res = res + '.*'
+ i = i + 1
+ else:
+ res = res + '[^/]*'
+ elif c == '?':
+ # fnmatch.translate() simply uses the '.' wildcard here, but
+ # we dont want to match path separators here
+ res = res + '[^/]'
+ elif c == '[':
+ j = i
+ if j < n and pat[j] == '!':
+ j = j + 1
+ if j < n and pat[j] == ']':
+ j = j + 1
+ while j < n and pat[j] != ']':
+ j = j + 1
+ if j >= n:
+ res = res + '\\['
+ else:
+ stuff = pat[i:j].replace('\\', '\\\\')
+ i = j + 1
+ if stuff[0] == '!':
+ stuff = '^' + stuff[1:]
+ elif stuff[0] == '^':
+ stuff = '\\' + stuff
+ res = '{}[{}]'.format(res, stuff)
+ else:
+ res = res + re.escape(c)
+ return res + r'\Z'
+
+
+# _deduplicate()
+#
+# Remove duplicate entries in a list or other iterable.
+#
+# Copied verbatim from the unique_everseen() example at
+# https://docs.python.org/3/library/itertools.html#itertools-recipes
+#
+# Args:
+# iterable (iterable): What to deduplicate
+# key (callable): Optional function to map from list entry to value
+#
+# Returns:
+# (generator): Generator that produces a deduplicated version of 'iterable'
+#
+def _deduplicate(iterable, key=None):
+ seen = set()
+ seen_add = seen.add
+ if key is None:
+ for element in itertools.filterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
+
+
+# Like os.path.getmtime(), but returns the mtime of a link rather than
+# the target, if the filesystem supports that.
+#
+def _get_link_mtime(path):
+ path_stat = os.lstat(path)
+ return path_stat.st_mtime
+
+
+# _message_digest()
+#
+# Args:
+# message_buffer (str): String to create digest of
+#
+# Returns:
+# (remote_execution_pb2.Digest): Content digest
+#
+def _message_digest(message_buffer):
+ sha = hashlib.sha256(message_buffer)
+ digest = remote_execution_pb2.Digest()
+ digest.hash = sha.hexdigest()
+ digest.size_bytes = len(message_buffer)
+ return digest
+
+
+# _search_upward_for_files()
+#
+# Searches upwards (from directory, then directory's parent directory...)
+# for any of the files listed in `filenames`.
+#
+# If multiple filenames are specified, and present in the same directory,
+# the first filename in the list will be returned.
+#
+# Args:
+# directory (str): The directory to begin searching for files from
+# filenames (list of str): The names of files to search for
+#
+# Returns:
+# (str): The directory a file was found in, or None
+# (str): The name of the first file that was found in that directory, or None
+#
+def _search_upward_for_files(directory, filenames):
+ directory = os.path.abspath(directory)
+ while True:
+ for filename in filenames:
+ file_path = os.path.join(directory, filename)
+ if os.path.isfile(file_path):
+ return directory, filename
+
+ parent_dir = os.path.dirname(directory)
+ if directory == parent_dir:
+ # i.e. we've reached the root of the filesystem
+ return None, None
+ directory = parent_dir
+
+
+# _deterministic_umask()
+#
+# Context managed to apply a umask to a section that may be affected by a users
+# umask. Restores old mask afterwards.
+#
+@contextmanager
+def _deterministic_umask():
+ old_umask = os.umask(0o022)
+
+ try:
+ yield
+ finally:
+ os.umask(old_umask)