summaryrefslogtreecommitdiff
path: root/src/buildstream/sandbox
diff options
context:
space:
mode:
authorChandan Singh <csingh43@bloomberg.net>2019-04-24 22:53:19 +0100
committerChandan Singh <csingh43@bloomberg.net>2019-05-21 12:41:18 +0100
commit070d053e5cc47e572e9f9e647315082bd7a15c63 (patch)
tree7fb0fdff52f9b5f8a18ec8fe9c75b661f9e0839e /src/buildstream/sandbox
parent6c59e7901a52be961c2a1b671cf2b30f90bc4d0a (diff)
downloadbuildstream-070d053e5cc47e572e9f9e647315082bd7a15c63.tar.gz
Move source from 'buildstream' to 'src/buildstream'
This was discussed in #1008. Fixes #1009.
Diffstat (limited to 'src/buildstream/sandbox')
-rw-r--r--src/buildstream/sandbox/__init__.py22
-rw-r--r--src/buildstream/sandbox/_config.py62
-rw-r--r--src/buildstream/sandbox/_mount.py149
-rw-r--r--src/buildstream/sandbox/_mounter.py147
-rw-r--r--src/buildstream/sandbox/_sandboxbwrap.py433
-rw-r--r--src/buildstream/sandbox/_sandboxchroot.py325
-rw-r--r--src/buildstream/sandbox/_sandboxdummy.py36
-rw-r--r--src/buildstream/sandbox/_sandboxremote.py577
-rw-r--r--src/buildstream/sandbox/sandbox.py717
9 files changed, 2468 insertions, 0 deletions
diff --git a/src/buildstream/sandbox/__init__.py b/src/buildstream/sandbox/__init__.py
new file mode 100644
index 000000000..5966d194f
--- /dev/null
+++ b/src/buildstream/sandbox/__init__.py
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+from .sandbox import Sandbox, SandboxFlags, SandboxCommandError
+from ._sandboxremote import SandboxRemote
+from ._sandboxdummy import SandboxDummy
diff --git a/src/buildstream/sandbox/_config.py b/src/buildstream/sandbox/_config.py
new file mode 100644
index 000000000..457f92b3c
--- /dev/null
+++ b/src/buildstream/sandbox/_config.py
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2018 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jim MacArthur <jim.macarthur@codethink.co.uk>
+
+
+# SandboxConfig
+#
+# A container for sandbox configuration data. We want the internals
+# of this to be opaque, hence putting it in its own private file.
+class SandboxConfig():
+ def __init__(self, build_uid, build_gid, build_os=None, build_arch=None):
+ self.build_uid = build_uid
+ self.build_gid = build_gid
+ self.build_os = build_os
+ self.build_arch = build_arch
+
+ # get_unique_key():
+ #
+ # This returns the SandboxConfig's contribution
+ # to an element's cache key.
+ #
+ # Returns:
+ # (dict): A dictionary to add to an element's cache key
+ #
+ def get_unique_key(self):
+
+ # Currently operating system and machine architecture
+ # are not configurable and we have no sandbox implementation
+ # which can conform to such configurations.
+ #
+ # However this should be the right place to support
+ # such configurations in the future.
+ #
+ unique_key = {
+ 'os': self.build_os,
+ 'arch': self.build_arch
+ }
+
+ # Avoid breaking cache key calculation with
+ # the addition of configurabuild build uid/gid
+ if self.build_uid != 0:
+ unique_key['build-uid'] = self.build_uid
+
+ if self.build_gid != 0:
+ unique_key['build-gid'] = self.build_gid
+
+ return unique_key
diff --git a/src/buildstream/sandbox/_mount.py b/src/buildstream/sandbox/_mount.py
new file mode 100644
index 000000000..c0f26c8d7
--- /dev/null
+++ b/src/buildstream/sandbox/_mount.py
@@ -0,0 +1,149 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+import os
+from collections import OrderedDict
+from contextlib import contextmanager, ExitStack
+
+from .. import utils
+from .._fuse import SafeHardlinks
+
+
+# Mount()
+#
+# Helper data object representing a single mount point in the mount map
+#
+class Mount():
+ def __init__(self, sandbox, mount_point, safe_hardlinks, fuse_mount_options=None):
+ # Getting _get_underlying_directory() here is acceptable as
+ # we're part of the sandbox code. This will fail if our
+ # directory is CAS-based.
+ root_directory = sandbox.get_virtual_directory()._get_underlying_directory()
+
+ self.mount_point = mount_point
+ self.safe_hardlinks = safe_hardlinks
+ self._fuse_mount_options = {} if fuse_mount_options is None else fuse_mount_options
+
+ # FIXME: When the criteria for mounting something and its parent
+ # mount is identical, then there is no need to mount an additional
+ # fuse layer (i.e. if the root is read-write and there is a directory
+ # marked for staged artifacts directly within the rootfs, they can
+ # safely share the same fuse layer).
+ #
+ # In these cases it would be saner to redirect the sub-mount to
+ # a regular mount point within the parent's redirected mount.
+ #
+ if self.safe_hardlinks:
+ scratch_directory = sandbox._get_scratch_directory()
+ # Redirected mount
+ self.mount_origin = os.path.join(root_directory, mount_point.lstrip(os.sep))
+ self.mount_base = os.path.join(scratch_directory, utils.url_directory_name(mount_point))
+ self.mount_source = os.path.join(self.mount_base, 'mount')
+ self.mount_tempdir = os.path.join(self.mount_base, 'temp')
+ os.makedirs(self.mount_origin, exist_ok=True)
+ os.makedirs(self.mount_tempdir, exist_ok=True)
+ else:
+ # No redirection needed
+ self.mount_source = os.path.join(root_directory, mount_point.lstrip(os.sep))
+
+ external_mount_sources = sandbox._get_mount_sources()
+ external_mount_source = external_mount_sources.get(mount_point)
+
+ if external_mount_source is None:
+ os.makedirs(self.mount_source, exist_ok=True)
+ else:
+ if os.path.isdir(external_mount_source):
+ os.makedirs(self.mount_source, exist_ok=True)
+ else:
+ # When mounting a regular file, ensure the parent
+ # directory exists in the sandbox; and that an empty
+ # file is created at the mount location.
+ parent_dir = os.path.dirname(self.mount_source.rstrip('/'))
+ os.makedirs(parent_dir, exist_ok=True)
+ if not os.path.exists(self.mount_source):
+ with open(self.mount_source, 'w'):
+ pass
+
+ @contextmanager
+ def mounted(self, sandbox):
+ if self.safe_hardlinks:
+ mount = SafeHardlinks(self.mount_origin, self.mount_tempdir, self._fuse_mount_options)
+ with mount.mounted(self.mount_source):
+ yield
+ else:
+ # Nothing to mount here
+ yield
+
+
+# MountMap()
+#
+# Helper object for mapping of the sandbox mountpoints
+#
+# Args:
+# sandbox (Sandbox): The sandbox object
+# root_readonly (bool): Whether the sandbox root is readonly
+#
+class MountMap():
+
+ def __init__(self, sandbox, root_readonly, fuse_mount_options=None):
+ # We will be doing the mounts in the order in which they were declared.
+ self.mounts = OrderedDict()
+
+ if fuse_mount_options is None:
+ fuse_mount_options = {}
+
+ # We want safe hardlinks on rootfs whenever root is not readonly
+ self.mounts['/'] = Mount(sandbox, '/', not root_readonly, fuse_mount_options)
+
+ for mark in sandbox._get_marked_directories():
+ directory = mark['directory']
+ artifact = mark['artifact']
+
+ # We want safe hardlinks for any non-root directory where
+ # artifacts will be staged to
+ self.mounts[directory] = Mount(sandbox, directory, artifact, fuse_mount_options)
+
+ # get_mount_source()
+ #
+ # Gets the host directory where the mountpoint in the
+ # sandbox should be bind mounted from
+ #
+ # Args:
+ # mountpoint (str): The absolute mountpoint path inside the sandbox
+ #
+ # Returns:
+ # The host path to be mounted at the mount point
+ #
+ def get_mount_source(self, mountpoint):
+ return self.mounts[mountpoint].mount_source
+
+ # mounted()
+ #
+ # A context manager which ensures all the mount sources
+ # were mounted with any fuse layers which may have been needed.
+ #
+ # Args:
+ # sandbox (Sandbox): The sandbox
+ #
+ @contextmanager
+ def mounted(self, sandbox):
+ with ExitStack() as stack:
+ for _, mount in self.mounts.items():
+ stack.enter_context(mount.mounted(sandbox))
+ yield
diff --git a/src/buildstream/sandbox/_mounter.py b/src/buildstream/sandbox/_mounter.py
new file mode 100644
index 000000000..e6054c20d
--- /dev/null
+++ b/src/buildstream/sandbox/_mounter.py
@@ -0,0 +1,147 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+
+import sys
+from contextlib import contextmanager
+
+from .._exceptions import SandboxError
+from .. import utils, _signals
+
+
+# A class to wrap the `mount` and `umount` system commands
+class Mounter():
+ @classmethod
+ def _mount(cls, dest, src=None, mount_type=None,
+ stdout=sys.stdout, stderr=sys.stderr, options=None,
+ flags=None):
+
+ argv = [utils.get_host_tool('mount')]
+ if mount_type:
+ argv.extend(['-t', mount_type])
+ if options:
+ argv.extend(['-o', options])
+ if flags:
+ argv.extend(flags)
+
+ if src is not None:
+ argv += [src]
+ argv += [dest]
+
+ status, _ = utils._call(
+ argv,
+ terminate=True,
+ stdout=stdout,
+ stderr=stderr
+ )
+
+ if status != 0:
+ raise SandboxError('`{}` failed with exit code {}'
+ .format(' '.join(argv), status))
+
+ return dest
+
+ @classmethod
+ def _umount(cls, path, stdout=sys.stdout, stderr=sys.stderr):
+
+ cmd = [utils.get_host_tool('umount'), '-R', path]
+ status, _ = utils._call(
+ cmd,
+ terminate=True,
+ stdout=stdout,
+ stderr=stderr
+ )
+
+ if status != 0:
+ raise SandboxError('`{}` failed with exit code {}'
+ .format(' '.join(cmd), status))
+
+ # mount()
+ #
+ # A wrapper for the `mount` command. The device is unmounted when
+ # the context is left.
+ #
+ # Args:
+ # dest (str) - The directory to mount to
+ # src (str) - The directory to mount
+ # stdout (file) - stdout
+ # stderr (file) - stderr
+ # mount_type (str|None) - The mount type (can be omitted or None)
+ # kwargs - Arguments to pass to the mount command, such as `ro=True`
+ #
+ # Yields:
+ # (str) The path to the destination
+ #
+ @classmethod
+ @contextmanager
+ def mount(cls, dest, src=None, stdout=sys.stdout,
+ stderr=sys.stderr, mount_type=None, **kwargs):
+
+ def kill_proc():
+ cls._umount(dest, stdout, stderr)
+
+ options = ','.join([key for key, val in kwargs.items() if val])
+
+ path = cls._mount(dest, src, mount_type, stdout=stdout, stderr=stderr, options=options)
+ try:
+ with _signals.terminator(kill_proc):
+ yield path
+ finally:
+ cls._umount(dest, stdout, stderr)
+
+ # bind_mount()
+ #
+ # Mount a directory to a different location (a hardlink for all
+ # intents and purposes). The directory is unmounted when the
+ # context is left.
+ #
+ # Args:
+ # dest (str) - The directory to mount to
+ # src (str) - The directory to mount
+ # stdout (file) - stdout
+ # stderr (file) - stderr
+ # kwargs - Arguments to pass to the mount command, such as `ro=True`
+ #
+ # Yields:
+ # (str) The path to the destination
+ #
+ # While this is equivalent to `mount --rbind`, this option may not
+ # exist and can be dangerous, requiring careful cleanupIt is
+ # recommended to use this function over a manual mount invocation.
+ #
+ @classmethod
+ @contextmanager
+ def bind_mount(cls, dest, src=None, stdout=sys.stdout,
+ stderr=sys.stderr, **kwargs):
+
+ def kill_proc():
+ cls._umount(dest, stdout, stderr)
+
+ kwargs['rbind'] = True
+ options = ','.join([key for key, val in kwargs.items() if val])
+
+ path = cls._mount(dest, src, None, stdout, stderr, options)
+
+ try:
+ with _signals.terminator(kill_proc):
+ # Make the rbind a slave to avoid unmounting vital devices in
+ # /proc
+ cls._mount(dest, flags=['--make-rslave'])
+ yield path
+ finally:
+ cls._umount(dest, stdout, stderr)
diff --git a/src/buildstream/sandbox/_sandboxbwrap.py b/src/buildstream/sandbox/_sandboxbwrap.py
new file mode 100644
index 000000000..d2abc33d0
--- /dev/null
+++ b/src/buildstream/sandbox/_sandboxbwrap.py
@@ -0,0 +1,433 @@
+#
+# Copyright (C) 2016 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Andrew Leeming <andrew.leeming@codethink.co.uk>
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+import collections
+import json
+import os
+import sys
+import time
+import errno
+import signal
+import subprocess
+import shutil
+from contextlib import ExitStack, suppress
+from tempfile import TemporaryFile
+
+import psutil
+
+from .._exceptions import SandboxError
+from .. import utils, _signals
+from ._mount import MountMap
+from . import Sandbox, SandboxFlags
+
+
+# SandboxBwrap()
+#
+# Default bubblewrap based sandbox implementation.
+#
+class SandboxBwrap(Sandbox):
+
+ # Minimal set of devices for the sandbox
+ DEVICES = [
+ '/dev/full',
+ '/dev/null',
+ '/dev/urandom',
+ '/dev/random',
+ '/dev/zero'
+ ]
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.user_ns_available = kwargs['user_ns_available']
+ self.die_with_parent_available = kwargs['die_with_parent_available']
+ self.json_status_available = kwargs['json_status_available']
+ self.linux32 = kwargs['linux32']
+
+ def _run(self, command, flags, *, cwd, env):
+ stdout, stderr = self._get_output()
+
+ # Allowable access to underlying storage as we're part of the sandbox
+ root_directory = self.get_virtual_directory()._get_underlying_directory()
+
+ if not self._has_command(command[0], env):
+ raise SandboxError("Staged artifacts do not provide command "
+ "'{}'".format(command[0]),
+ reason='missing-command')
+
+ # Create the mount map, this will tell us where
+ # each mount point needs to be mounted from and to
+ mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY)
+ root_mount_source = mount_map.get_mount_source('/')
+
+ # start command with linux32 if needed
+ if self.linux32:
+ bwrap_command = [utils.get_host_tool('linux32')]
+ else:
+ bwrap_command = []
+
+ # Grab the full path of the bwrap binary
+ bwrap_command += [utils.get_host_tool('bwrap')]
+
+ for k, v in env.items():
+ bwrap_command += ['--setenv', k, v]
+ for k in os.environ.keys() - env.keys():
+ bwrap_command += ['--unsetenv', k]
+
+ # Create a new pid namespace, this also ensures that any subprocesses
+ # are cleaned up when the bwrap process exits.
+ bwrap_command += ['--unshare-pid']
+
+ # Ensure subprocesses are cleaned up when the bwrap parent dies.
+ if self.die_with_parent_available:
+ bwrap_command += ['--die-with-parent']
+
+ # Add in the root filesystem stuff first.
+ #
+ # The rootfs is mounted as RW initially so that further mounts can be
+ # placed on top. If a RO root is required, after all other mounts are
+ # complete, root is remounted as RO
+ bwrap_command += ["--bind", root_mount_source, "/"]
+
+ if not flags & SandboxFlags.NETWORK_ENABLED:
+ bwrap_command += ['--unshare-net']
+ bwrap_command += ['--unshare-uts', '--hostname', 'buildstream']
+ bwrap_command += ['--unshare-ipc']
+
+ # Give it a proc and tmpfs
+ bwrap_command += [
+ '--proc', '/proc',
+ '--tmpfs', '/tmp'
+ ]
+
+ # In interactive mode, we want a complete devpts inside
+ # the container, so there is a /dev/console and such. In
+ # the regular non-interactive sandbox, we want to hand pick
+ # a minimal set of devices to expose to the sandbox.
+ #
+ if flags & SandboxFlags.INTERACTIVE:
+ bwrap_command += ['--dev', '/dev']
+ else:
+ for device in self.DEVICES:
+ bwrap_command += ['--dev-bind', device, device]
+
+ # Add bind mounts to any marked directories
+ marked_directories = self._get_marked_directories()
+ mount_source_overrides = self._get_mount_sources()
+ for mark in marked_directories:
+ mount_point = mark['directory']
+ if mount_point in mount_source_overrides: # pylint: disable=consider-using-get
+ mount_source = mount_source_overrides[mount_point]
+ else:
+ mount_source = mount_map.get_mount_source(mount_point)
+
+ # Use --dev-bind for all mounts, this is simply a bind mount which does
+ # not restrictive about devices.
+ #
+ # While it's important for users to be able to mount devices
+ # into the sandbox for `bst shell` testing purposes, it is
+ # harmless to do in a build environment where the directories
+ # we mount just never contain device files.
+ #
+ bwrap_command += ['--dev-bind', mount_source, mount_point]
+
+ if flags & SandboxFlags.ROOT_READ_ONLY:
+ bwrap_command += ["--remount-ro", "/"]
+
+ if cwd is not None:
+ bwrap_command += ['--dir', cwd]
+ bwrap_command += ['--chdir', cwd]
+
+ # Set UID and GUI
+ if self.user_ns_available:
+ bwrap_command += ['--unshare-user']
+ if not flags & SandboxFlags.INHERIT_UID:
+ uid = self._get_config().build_uid
+ gid = self._get_config().build_gid
+ bwrap_command += ['--uid', str(uid), '--gid', str(gid)]
+
+ with ExitStack() as stack:
+ pass_fds = ()
+ # Improve error reporting with json-status if available
+ if self.json_status_available:
+ json_status_file = stack.enter_context(TemporaryFile())
+ pass_fds = (json_status_file.fileno(),)
+ bwrap_command += ['--json-status-fd', str(json_status_file.fileno())]
+
+ # Add the command
+ bwrap_command += command
+
+ # bwrap might create some directories while being suid
+ # and may give them to root gid, if it does, we'll want
+ # to clean them up after, so record what we already had
+ # there just in case so that we can safely cleanup the debris.
+ #
+ existing_basedirs = {
+ directory: os.path.exists(os.path.join(root_directory, directory))
+ for directory in ['tmp', 'dev', 'proc']
+ }
+
+ # Use the MountMap context manager to ensure that any redirected
+ # mounts through fuse layers are in context and ready for bwrap
+ # to mount them from.
+ #
+ stack.enter_context(mount_map.mounted(self))
+
+ # If we're interactive, we want to inherit our stdin,
+ # otherwise redirect to /dev/null, ensuring process
+ # disconnected from terminal.
+ if flags & SandboxFlags.INTERACTIVE:
+ stdin = sys.stdin
+ else:
+ stdin = stack.enter_context(open(os.devnull, "r"))
+
+ # Run bubblewrap !
+ exit_code = self.run_bwrap(bwrap_command, stdin, stdout, stderr,
+ (flags & SandboxFlags.INTERACTIVE), pass_fds)
+
+ # Cleanup things which bwrap might have left behind, while
+ # everything is still mounted because bwrap can be creating
+ # the devices on the fuse mount, so we should remove it there.
+ if not flags & SandboxFlags.INTERACTIVE:
+ for device in self.DEVICES:
+ device_path = os.path.join(root_mount_source, device.lstrip('/'))
+
+ # This will remove the device in a loop, allowing some
+ # retries in case the device file leaked by bubblewrap is still busy
+ self.try_remove_device(device_path)
+
+ # Remove /tmp, this is a bwrap owned thing we want to be sure
+ # never ends up in an artifact
+ for basedir in ['tmp', 'dev', 'proc']:
+
+ # Skip removal of directories which already existed before
+ # launching bwrap
+ if existing_basedirs[basedir]:
+ continue
+
+ base_directory = os.path.join(root_mount_source, basedir)
+
+ if flags & SandboxFlags.INTERACTIVE:
+ # Be more lenient in interactive mode here.
+ #
+ # In interactive mode; it's possible that the project shell
+ # configuration has mounted some things below the base
+ # directories, such as /dev/dri, and in this case it's less
+ # important to consider cleanup, as we wont be collecting
+ # this build result and creating an artifact.
+ #
+ # Note: Ideally; we should instead fix upstream bubblewrap to
+ # cleanup any debris it creates at startup time, and do
+ # the same ourselves for any directories we explicitly create.
+ #
+ shutil.rmtree(base_directory, ignore_errors=True)
+ else:
+ try:
+ os.rmdir(base_directory)
+ except FileNotFoundError:
+ # ignore this, if bwrap cleaned up properly then it's not a problem.
+ #
+ # If the directory was not empty on the other hand, then this is clearly
+ # a bug, bwrap mounted a tempfs here and when it exits, that better be empty.
+ pass
+
+ if self.json_status_available:
+ json_status_file.seek(0, 0)
+ child_exit_code = None
+ # The JSON status file's output is a JSON object per line
+ # with the keys present identifying the type of message.
+ # The only message relevant to us now is the exit-code of the subprocess.
+ for line in json_status_file:
+ with suppress(json.decoder.JSONDecodeError):
+ o = json.loads(line)
+ if isinstance(o, collections.abc.Mapping) and 'exit-code' in o:
+ child_exit_code = o['exit-code']
+ break
+ if child_exit_code is None:
+ raise SandboxError("`bwrap' terminated during sandbox setup with exitcode {}".format(exit_code),
+ reason="bwrap-sandbox-fail")
+ exit_code = child_exit_code
+
+ self._vdir._mark_changed()
+ return exit_code
+
+ def run_bwrap(self, argv, stdin, stdout, stderr, interactive, pass_fds):
+ # Wrapper around subprocess.Popen() with common settings.
+ #
+ # This function blocks until the subprocess has terminated.
+ #
+ # It then returns a tuple of (exit code, stdout output, stderr output).
+ # If stdout was not equal to subprocess.PIPE, stdout will be None. Same for
+ # stderr.
+
+ # Fetch the process actually launched inside the bwrap sandbox, or the
+ # intermediat control bwrap processes.
+ #
+ # NOTE:
+ # The main bwrap process itself is setuid root and as such we cannot
+ # send it any signals. Since we launch bwrap with --unshare-pid, it's
+ # direct child is another bwrap process which retains ownership of the
+ # pid namespace. This is the right process to kill when terminating.
+ #
+ # The grandchild is the binary which we asked bwrap to launch on our
+ # behalf, whatever this binary is, it is the right process to use
+ # for suspending and resuming. In the case that this is a shell, the
+ # shell will be group leader and all build scripts will stop/resume
+ # with that shell.
+ #
+ def get_user_proc(bwrap_pid, grand_child=False):
+ bwrap_proc = psutil.Process(bwrap_pid)
+ bwrap_children = bwrap_proc.children()
+ if bwrap_children:
+ if grand_child:
+ bwrap_grand_children = bwrap_children[0].children()
+ if bwrap_grand_children:
+ return bwrap_grand_children[0]
+ else:
+ return bwrap_children[0]
+ return None
+
+ def terminate_bwrap():
+ if process:
+ user_proc = get_user_proc(process.pid)
+ if user_proc:
+ user_proc.kill()
+
+ def suspend_bwrap():
+ if process:
+ user_proc = get_user_proc(process.pid, grand_child=True)
+ if user_proc:
+ group_id = os.getpgid(user_proc.pid)
+ os.killpg(group_id, signal.SIGSTOP)
+
+ def resume_bwrap():
+ if process:
+ user_proc = get_user_proc(process.pid, grand_child=True)
+ if user_proc:
+ group_id = os.getpgid(user_proc.pid)
+ os.killpg(group_id, signal.SIGCONT)
+
+ with ExitStack() as stack:
+
+ # We want to launch bwrap in a new session in non-interactive
+ # mode so that we handle the SIGTERM and SIGTSTP signals separately
+ # from the nested bwrap process, but in interactive mode this
+ # causes launched shells to lack job control (we dont really
+ # know why that is).
+ #
+ if interactive:
+ new_session = False
+ else:
+ new_session = True
+ stack.enter_context(_signals.suspendable(suspend_bwrap, resume_bwrap))
+ stack.enter_context(_signals.terminator(terminate_bwrap))
+
+ process = subprocess.Popen(
+ argv,
+ # The default is to share file descriptors from the parent process
+ # to the subprocess, which is rarely good for sandboxing.
+ close_fds=True,
+ pass_fds=pass_fds,
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ start_new_session=new_session
+ )
+
+ # Wait for the child process to finish, ensuring that
+ # a SIGINT has exactly the effect the user probably
+ # expects (i.e. let the child process handle it).
+ try:
+ while True:
+ try:
+ _, status = os.waitpid(process.pid, 0)
+ # If the process exits due to a signal, we
+ # brutally murder it to avoid zombies
+ if not os.WIFEXITED(status):
+ user_proc = get_user_proc(process.pid)
+ if user_proc:
+ utils._kill_process_tree(user_proc.pid)
+
+ # If we receive a KeyboardInterrupt we continue
+ # waiting for the process since we are in the same
+ # process group and it should also have received
+ # the SIGINT.
+ except KeyboardInterrupt:
+ continue
+
+ break
+ # If we can't find the process, it has already died of its
+ # own accord, and therefore we don't need to check or kill
+ # anything.
+ except psutil.NoSuchProcess:
+ pass
+
+ # Return the exit code - see the documentation for
+ # os.WEXITSTATUS to see why this is required.
+ if os.WIFEXITED(status):
+ exit_code = os.WEXITSTATUS(status)
+ else:
+ exit_code = -1
+
+ if interactive and stdin.isatty():
+ # Make this process the foreground process again, otherwise the
+ # next read() on stdin will trigger SIGTTIN and stop the process.
+ # This is required because the sandboxed process does not have
+ # permission to do this on its own (running in separate PID namespace).
+ #
+ # tcsetpgrp() will trigger SIGTTOU when called from a background
+ # process, so ignore it temporarily.
+ handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
+ os.tcsetpgrp(0, os.getpid())
+ signal.signal(signal.SIGTTOU, handler)
+
+ return exit_code
+
+ def try_remove_device(self, device_path):
+
+ # Put some upper limit on the tries here
+ max_tries = 1000
+ tries = 0
+
+ while True:
+ try:
+ os.unlink(device_path)
+ except OSError as e:
+ if e.errno == errno.EBUSY:
+ # This happens on some machines, seems there is a race sometimes
+ # after bubblewrap returns and the device files it bind-mounted did
+ # not finish unmounting.
+ #
+ if tries < max_tries:
+ tries += 1
+ time.sleep(1 / 100)
+ continue
+ else:
+ # We've reached the upper limit of tries, bail out now
+ # because something must have went wrong
+ #
+ raise
+ elif e.errno == errno.ENOENT:
+ # Bubblewrap cleaned it up for us, no problem if we cant remove it
+ break
+ else:
+ # Something unexpected, reraise this error
+ raise
+ else:
+ # Successfully removed the symlink
+ break
diff --git a/src/buildstream/sandbox/_sandboxchroot.py b/src/buildstream/sandbox/_sandboxchroot.py
new file mode 100644
index 000000000..7266a00e3
--- /dev/null
+++ b/src/buildstream/sandbox/_sandboxchroot.py
@@ -0,0 +1,325 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Tristan Maat <tristan.maat@codethink.co.uk>
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+
+import os
+import sys
+import stat
+import signal
+import subprocess
+from contextlib import contextmanager, ExitStack
+import psutil
+
+from .._exceptions import SandboxError
+from .. import utils
+from .. import _signals
+from ._mounter import Mounter
+from ._mount import MountMap
+from . import Sandbox, SandboxFlags
+
+
+class SandboxChroot(Sandbox):
+
+ _FUSE_MOUNT_OPTIONS = {'dev': True}
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ uid = self._get_config().build_uid
+ gid = self._get_config().build_gid
+ if uid != 0 or gid != 0:
+ raise SandboxError("Chroot sandboxes cannot specify a non-root uid/gid "
+ "({},{} were supplied via config)".format(uid, gid))
+
+ self.mount_map = None
+
+ def _run(self, command, flags, *, cwd, env):
+
+ if not self._has_command(command[0], env):
+ raise SandboxError("Staged artifacts do not provide command "
+ "'{}'".format(command[0]),
+ reason='missing-command')
+
+ stdout, stderr = self._get_output()
+
+ # Create the mount map, this will tell us where
+ # each mount point needs to be mounted from and to
+ self.mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY,
+ self._FUSE_MOUNT_OPTIONS)
+
+ # Create a sysroot and run the command inside it
+ with ExitStack() as stack:
+ os.makedirs('/var/run/buildstream', exist_ok=True)
+
+ # FIXME: While we do not currently do anything to prevent
+ # network access, we also don't copy /etc/resolv.conf to
+ # the new rootfs.
+ #
+ # This effectively disables network access, since DNs will
+ # never resolve, so anything a normal process wants to do
+ # will fail. Malicious processes could gain rights to
+ # anything anyway.
+ #
+ # Nonetheless a better solution could perhaps be found.
+
+ rootfs = stack.enter_context(utils._tempdir(dir='/var/run/buildstream'))
+ stack.enter_context(self.create_devices(self._root, flags))
+ stack.enter_context(self.mount_dirs(rootfs, flags, stdout, stderr))
+
+ if flags & SandboxFlags.INTERACTIVE:
+ stdin = sys.stdin
+ else:
+ stdin = stack.enter_context(open(os.devnull, 'r'))
+
+ # Ensure the cwd exists
+ if cwd is not None:
+ workdir = os.path.join(rootfs, cwd.lstrip(os.sep))
+ os.makedirs(workdir, exist_ok=True)
+ status = self.chroot(rootfs, command, stdin, stdout,
+ stderr, cwd, env, flags)
+
+ self._vdir._mark_changed()
+ return status
+
+ # chroot()
+ #
+ # A helper function to chroot into the rootfs.
+ #
+ # Args:
+ # rootfs (str): The path of the sysroot to chroot into
+ # command (list): The command to execute in the chroot env
+ # stdin (file): The stdin
+ # stdout (file): The stdout
+ # stderr (file): The stderr
+ # cwd (str): The current working directory
+ # env (dict): The environment variables to use while executing the command
+ # flags (:class:`SandboxFlags`): The flags to enable on the sandbox
+ #
+ # Returns:
+ # (int): The exit code of the executed command
+ #
+ def chroot(self, rootfs, command, stdin, stdout, stderr, cwd, env, flags):
+ def kill_proc():
+ if process:
+ # First attempt to gracefully terminate
+ proc = psutil.Process(process.pid)
+ proc.terminate()
+
+ try:
+ proc.wait(20)
+ except psutil.TimeoutExpired:
+ utils._kill_process_tree(process.pid)
+
+ def suspend_proc():
+ group_id = os.getpgid(process.pid)
+ os.killpg(group_id, signal.SIGSTOP)
+
+ def resume_proc():
+ group_id = os.getpgid(process.pid)
+ os.killpg(group_id, signal.SIGCONT)
+
+ try:
+ with _signals.suspendable(suspend_proc, resume_proc), _signals.terminator(kill_proc):
+ process = subprocess.Popen( # pylint: disable=subprocess-popen-preexec-fn
+ command,
+ close_fds=True,
+ cwd=os.path.join(rootfs, cwd.lstrip(os.sep)),
+ env=env,
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ # If you try to put gtk dialogs here Tristan (either)
+ # will personally scald you
+ preexec_fn=lambda: (os.chroot(rootfs), os.chdir(cwd)),
+ start_new_session=flags & SandboxFlags.INTERACTIVE
+ )
+
+ # Wait for the child process to finish, ensuring that
+ # a SIGINT has exactly the effect the user probably
+ # expects (i.e. let the child process handle it).
+ try:
+ while True:
+ try:
+ _, status = os.waitpid(process.pid, 0)
+ # If the process exits due to a signal, we
+ # brutally murder it to avoid zombies
+ if not os.WIFEXITED(status):
+ utils._kill_process_tree(process.pid)
+
+ # Unlike in the bwrap case, here only the main
+ # process seems to receive the SIGINT. We pass
+ # on the signal to the child and then continue
+ # to wait.
+ except KeyboardInterrupt:
+ process.send_signal(signal.SIGINT)
+ continue
+
+ break
+ # If we can't find the process, it has already died of
+ # its own accord, and therefore we don't need to check
+ # or kill anything.
+ except psutil.NoSuchProcess:
+ pass
+
+ # Return the exit code - see the documentation for
+ # os.WEXITSTATUS to see why this is required.
+ if os.WIFEXITED(status):
+ code = os.WEXITSTATUS(status)
+ else:
+ code = -1
+
+ except subprocess.SubprocessError as e:
+ # Exceptions in preexec_fn are simply reported as
+ # 'Exception occurred in preexec_fn', turn these into
+ # a more readable message.
+ if str(e) == 'Exception occurred in preexec_fn.':
+ raise SandboxError('Could not chroot into {} or chdir into {}. '
+ 'Ensure you are root and that the relevant directory exists.'
+ .format(rootfs, cwd)) from e
+ else:
+ raise SandboxError('Could not run command {}: {}'.format(command, e)) from e
+
+ return code
+
+ # create_devices()
+ #
+ # Create the nodes in /dev/ usually required for builds (null,
+ # none, etc.)
+ #
+ # Args:
+ # rootfs (str): The path of the sysroot to prepare
+ # flags (:class:`.SandboxFlags`): The sandbox flags
+ #
+ @contextmanager
+ def create_devices(self, rootfs, flags):
+
+ devices = []
+ # When we are interactive, we'd rather mount /dev due to the
+ # sheer number of devices
+ if not flags & SandboxFlags.INTERACTIVE:
+
+ for device in Sandbox.DEVICES:
+ location = os.path.join(rootfs, device.lstrip(os.sep))
+ os.makedirs(os.path.dirname(location), exist_ok=True)
+ try:
+ if os.path.exists(location):
+ os.remove(location)
+
+ devices.append(self.mknod(device, location))
+ except OSError as err:
+ if err.errno == 1:
+ raise SandboxError("Permission denied while creating device node: {}.".format(err) +
+ "BuildStream reqiures root permissions for these setttings.")
+ else:
+ raise
+
+ yield
+
+ for device in devices:
+ os.remove(device)
+
+ # mount_dirs()
+ #
+ # Mount paths required for the command.
+ #
+ # Args:
+ # rootfs (str): The path of the sysroot to prepare
+ # flags (:class:`.SandboxFlags`): The sandbox flags
+ # stdout (file): The stdout
+ # stderr (file): The stderr
+ #
+ @contextmanager
+ def mount_dirs(self, rootfs, flags, stdout, stderr):
+
+ # FIXME: This should probably keep track of potentially
+ # already existing files a la _sandboxwrap.py:239
+
+ @contextmanager
+ def mount_point(point, **kwargs):
+ mount_source_overrides = self._get_mount_sources()
+ if point in mount_source_overrides: # pylint: disable=consider-using-get
+ mount_source = mount_source_overrides[point]
+ else:
+ mount_source = self.mount_map.get_mount_source(point)
+ mount_point = os.path.join(rootfs, point.lstrip(os.sep))
+
+ with Mounter.bind_mount(mount_point, src=mount_source, stdout=stdout, stderr=stderr, **kwargs):
+ yield
+
+ @contextmanager
+ def mount_src(src, **kwargs):
+ mount_point = os.path.join(rootfs, src.lstrip(os.sep))
+ os.makedirs(mount_point, exist_ok=True)
+
+ with Mounter.bind_mount(mount_point, src=src, stdout=stdout, stderr=stderr, **kwargs):
+ yield
+
+ with ExitStack() as stack:
+ stack.enter_context(self.mount_map.mounted(self))
+
+ stack.enter_context(mount_point('/'))
+
+ if flags & SandboxFlags.INTERACTIVE:
+ stack.enter_context(mount_src('/dev'))
+
+ stack.enter_context(mount_src('/tmp'))
+ stack.enter_context(mount_src('/proc'))
+
+ for mark in self._get_marked_directories():
+ stack.enter_context(mount_point(mark['directory']))
+
+ # Remount root RO if necessary
+ if flags & flags & SandboxFlags.ROOT_READ_ONLY:
+ root_mount = Mounter.mount(rootfs, stdout=stdout, stderr=stderr, remount=True, ro=True, bind=True)
+ # Since the exit stack has already registered a mount
+ # for this path, we do not need to register another
+ # umount call.
+ root_mount.__enter__()
+
+ yield
+
+ # mknod()
+ #
+ # Create a device node equivalent to the given source node
+ #
+ # Args:
+ # source (str): Path of the device to mimic (e.g. '/dev/null')
+ # target (str): Location to create the new device in
+ #
+ # Returns:
+ # target (str): The location of the created node
+ #
+ def mknod(self, source, target):
+ try:
+ dev = os.stat(source)
+ major = os.major(dev.st_rdev)
+ minor = os.minor(dev.st_rdev)
+
+ target_dev = os.makedev(major, minor)
+
+ os.mknod(target, mode=stat.S_IFCHR | dev.st_mode, device=target_dev)
+
+ except PermissionError as e:
+ raise SandboxError('Could not create device {}, ensure that you have root permissions: {}')
+
+ except OSError as e:
+ raise SandboxError('Could not create device {}: {}'
+ .format(target, e)) from e
+
+ return target
diff --git a/src/buildstream/sandbox/_sandboxdummy.py b/src/buildstream/sandbox/_sandboxdummy.py
new file mode 100644
index 000000000..750ddb05d
--- /dev/null
+++ b/src/buildstream/sandbox/_sandboxdummy.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2017 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+
+from .._exceptions import SandboxError
+from .sandbox import Sandbox
+
+
+class SandboxDummy(Sandbox):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._reason = kwargs.get("dummy_reason", "no reason given")
+
+ def _run(self, command, flags, *, cwd, env):
+
+ if not self._has_command(command[0], env):
+ raise SandboxError("Staged artifacts do not provide command "
+ "'{}'".format(command[0]),
+ reason='missing-command')
+
+ raise SandboxError("This platform does not support local builds: {}".format(self._reason),
+ reason="unavailable-local-sandbox")
diff --git a/src/buildstream/sandbox/_sandboxremote.py b/src/buildstream/sandbox/_sandboxremote.py
new file mode 100644
index 000000000..2cb7e2538
--- /dev/null
+++ b/src/buildstream/sandbox/_sandboxremote.py
@@ -0,0 +1,577 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2018 Bloomberg LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Jim MacArthur <jim.macarthur@codethink.co.uk>
+
+import os
+import shlex
+from collections import namedtuple
+from urllib.parse import urlparse
+from functools import partial
+
+import grpc
+
+from .. import utils
+from .._message import Message, MessageType
+from .sandbox import Sandbox, SandboxCommandError, _SandboxBatch
+from ..storage.directory import VirtualDirectoryError
+from ..storage._casbaseddirectory import CasBasedDirectory
+from .. import _signals
+from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
+from .._protos.google.rpc import code_pb2
+from .._exceptions import BstError, SandboxError
+from .. import _yaml
+from .._protos.google.longrunning import operations_pb2, operations_pb2_grpc
+from .._cas import CASRemote, CASRemoteSpec
+
+
+class RemoteExecutionSpec(namedtuple('RemoteExecutionSpec', 'exec_service storage_service action_service')):
+ pass
+
+
+# SandboxRemote()
+#
+# This isn't really a sandbox, it's a stub which sends all the sources and build
+# commands to a remote server and retrieves the results from it.
+#
+class SandboxRemote(Sandbox):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ self._output_files_required = kwargs.get('output_files_required', True)
+
+ config = kwargs['specs'] # This should be a RemoteExecutionSpec
+ if config is None:
+ return
+
+ self.storage_url = config.storage_service['url']
+ self.exec_url = config.exec_service['url']
+
+ exec_certs = {}
+ for key in ['client-cert', 'client-key', 'server-cert']:
+ if key in config.exec_service:
+ with open(config.exec_service[key], 'rb') as f:
+ exec_certs[key] = f.read()
+
+ self.exec_credentials = grpc.ssl_channel_credentials(
+ root_certificates=exec_certs.get('server-cert'),
+ private_key=exec_certs.get('client-key'),
+ certificate_chain=exec_certs.get('client-cert'))
+
+ action_certs = {}
+ for key in ['client-cert', 'client-key', 'server-cert']:
+ if key in config.action_service:
+ with open(config.action_service[key], 'rb') as f:
+ action_certs[key] = f.read()
+
+ if config.action_service:
+ self.action_url = config.action_service['url']
+ self.action_instance = config.action_service.get('instance-name', None)
+ self.action_credentials = grpc.ssl_channel_credentials(
+ root_certificates=action_certs.get('server-cert'),
+ private_key=action_certs.get('client-key'),
+ certificate_chain=action_certs.get('client-cert'))
+ else:
+ self.action_url = None
+ self.action_instance = None
+ self.action_credentials = None
+
+ self.exec_instance = config.exec_service.get('instance-name', None)
+ self.storage_instance = config.storage_service.get('instance-name', None)
+
+ self.storage_remote_spec = CASRemoteSpec(self.storage_url, push=True,
+ server_cert=config.storage_service.get('server-cert'),
+ client_key=config.storage_service.get('client-key'),
+ client_cert=config.storage_service.get('client-cert'),
+ instance_name=self.storage_instance)
+ self.operation_name = None
+
+ def info(self, msg):
+ self._get_context().message(Message(None, MessageType.INFO, msg))
+
+ @staticmethod
+ def specs_from_config_node(config_node, basedir=None):
+
+ def require_node(config, keyname):
+ val = _yaml.node_get(config, dict, keyname, default_value=None)
+ if val is None:
+ provenance = _yaml.node_get_provenance(remote_config, key=keyname)
+ raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA,
+ "{}: '{}' was not present in the remote "
+ "execution configuration (remote-execution). "
+ .format(str(provenance), keyname))
+ return val
+
+ remote_config = _yaml.node_get(config_node, dict, 'remote-execution', default_value=None)
+ if remote_config is None:
+ return None
+
+ service_keys = ['execution-service', 'storage-service', 'action-cache-service']
+
+ _yaml.node_validate(remote_config, ['url', *service_keys])
+
+ exec_config = require_node(remote_config, 'execution-service')
+ storage_config = require_node(remote_config, 'storage-service')
+ action_config = _yaml.node_get(remote_config, dict, 'action-cache-service', default_value={})
+
+ tls_keys = ['client-key', 'client-cert', 'server-cert']
+
+ _yaml.node_validate(exec_config, ['url', 'instance-name', *tls_keys])
+ _yaml.node_validate(storage_config, ['url', 'instance-name', *tls_keys])
+ if action_config:
+ _yaml.node_validate(action_config, ['url', 'instance-name', *tls_keys])
+
+ # Maintain some backwards compatibility with older configs, in which
+ # 'url' was the only valid key for remote-execution:
+ if 'url' in remote_config:
+ if 'execution-service' not in remote_config:
+ exec_config = _yaml.new_node_from_dict({'url': remote_config['url']})
+ else:
+ provenance = _yaml.node_get_provenance(remote_config, key='url')
+ raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA,
+ "{}: 'url' and 'execution-service' keys were found in the remote "
+ "execution configuration (remote-execution). "
+ "You can only specify one of these."
+ .format(str(provenance)))
+
+ service_configs = [exec_config, storage_config, action_config]
+
+ def resolve_path(path):
+ if basedir and path:
+ return os.path.join(basedir, path)
+ else:
+ return path
+
+ for config_key, config in zip(service_keys, service_configs):
+ # Either both or none of the TLS client key/cert pair must be specified:
+ if ('client-key' in config) != ('client-cert' in config):
+ provenance = _yaml.node_get_provenance(remote_config, key=config_key)
+ raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA,
+ "{}: TLS client key/cert pair is incomplete. "
+ "You must specify both 'client-key' and 'client-cert' "
+ "for authenticated HTTPS connections."
+ .format(str(provenance)))
+
+ for tls_key in tls_keys:
+ if tls_key in config:
+ _yaml.node_set(config, tls_key, resolve_path(_yaml.node_get(config, str, tls_key)))
+
+ return RemoteExecutionSpec(*[_yaml.node_sanitize(conf) for conf in service_configs])
+
+ def run_remote_command(self, channel, action_digest):
+ # Sends an execution request to the remote execution server.
+ #
+ # This function blocks until it gets a response from the server.
+
+ # Try to create a communication channel to the BuildGrid server.
+ stub = remote_execution_pb2_grpc.ExecutionStub(channel)
+ request = remote_execution_pb2.ExecuteRequest(instance_name=self.exec_instance,
+ action_digest=action_digest,
+ skip_cache_lookup=False)
+
+ def __run_remote_command(stub, execute_request=None, running_operation=None):
+ try:
+ last_operation = None
+ if execute_request is not None:
+ operation_iterator = stub.Execute(execute_request)
+ else:
+ request = remote_execution_pb2.WaitExecutionRequest(name=running_operation.name)
+ operation_iterator = stub.WaitExecution(request)
+
+ for operation in operation_iterator:
+ if not self.operation_name:
+ self.operation_name = operation.name
+ if operation.done:
+ return operation
+ else:
+ last_operation = operation
+
+ except grpc.RpcError as e:
+ status_code = e.code()
+ if status_code == grpc.StatusCode.UNAVAILABLE:
+ raise SandboxError("Failed contacting remote execution server at {}."
+ .format(self.exec_url))
+
+ elif status_code in (grpc.StatusCode.INVALID_ARGUMENT,
+ grpc.StatusCode.FAILED_PRECONDITION,
+ grpc.StatusCode.RESOURCE_EXHAUSTED,
+ grpc.StatusCode.INTERNAL,
+ grpc.StatusCode.DEADLINE_EXCEEDED):
+ raise SandboxError("{} ({}).".format(e.details(), status_code.name))
+
+ elif running_operation and status_code == grpc.StatusCode.UNIMPLEMENTED:
+ raise SandboxError("Failed trying to recover from connection loss: "
+ "server does not support operation status polling recovery.")
+
+ return last_operation
+
+ # Set up signal handler to trigger cancel_operation on SIGTERM
+ operation = None
+ with self._get_context().timed_activity("Waiting for the remote build to complete"), \
+ _signals.terminator(partial(self.cancel_operation, channel)):
+ operation = __run_remote_command(stub, execute_request=request)
+ if operation is None:
+ return None
+ elif operation.done:
+ return operation
+ while operation is not None and not operation.done:
+ operation = __run_remote_command(stub, running_operation=operation)
+
+ return operation
+
+ def cancel_operation(self, channel):
+ # If we don't have the name can't send request.
+ if self.operation_name is None:
+ return
+
+ stub = operations_pb2_grpc.OperationsStub(channel)
+ request = operations_pb2.CancelOperationRequest(
+ name=str(self.operation_name))
+
+ try:
+ stub.CancelOperation(request)
+ except grpc.RpcError as e:
+ if (e.code() == grpc.StatusCode.UNIMPLEMENTED or
+ e.code() == grpc.StatusCode.INVALID_ARGUMENT):
+ pass
+ else:
+ raise SandboxError("Failed trying to send CancelOperation request: "
+ "{} ({})".format(e.details(), e.code().name))
+
+ def process_job_output(self, output_directories, output_files, *, failure):
+ # Reads the remote execution server response to an execution request.
+ #
+ # output_directories is an array of OutputDirectory objects.
+ # output_files is an array of OutputFile objects.
+ #
+ # We only specify one output_directory, so it's an error
+ # for there to be any output files or more than one directory at the moment.
+ #
+ if output_files:
+ raise SandboxError("Output files were returned when we didn't request any.")
+ elif not output_directories:
+ error_text = "No output directory was returned from the build server."
+ raise SandboxError(error_text)
+ elif len(output_directories) > 1:
+ error_text = "More than one output directory was returned from the build server: {}."
+ raise SandboxError(error_text.format(output_directories))
+
+ tree_digest = output_directories[0].tree_digest
+ if tree_digest is None or not tree_digest.hash:
+ raise SandboxError("Output directory structure had no digest attached.")
+
+ context = self._get_context()
+ project = self._get_project()
+ cascache = context.get_cascache()
+ artifactcache = context.artifactcache
+ casremote = CASRemote(self.storage_remote_spec)
+
+ # Now do a pull to ensure we have the full directory structure.
+ dir_digest = cascache.pull_tree(casremote, tree_digest)
+ if dir_digest is None or not dir_digest.hash or not dir_digest.size_bytes:
+ raise SandboxError("Output directory structure pulling from remote failed.")
+
+ # At the moment, we will get the whole directory back in the first directory argument and we need
+ # to replace the sandbox's virtual directory with that. Creating a new virtual directory object
+ # from another hash will be interesting, though...
+
+ new_dir = CasBasedDirectory(context.artifactcache.cas, digest=dir_digest)
+ self._set_virtual_directory(new_dir)
+
+ # Fetch the file blobs if needed
+ if self._output_files_required or artifactcache.has_push_remotes():
+ required_blobs = []
+ directories = []
+
+ directories.append(self._output_directory)
+ if self._build_directory and (self._build_directory_always or failure):
+ directories.append(self._build_directory)
+
+ for directory in directories:
+ try:
+ vdir = new_dir.descend(*directory.strip(os.sep).split(os.sep))
+ dir_digest = vdir._get_digest()
+ required_blobs += cascache.required_blobs_for_directory(dir_digest)
+ except VirtualDirectoryError:
+ # If the directory does not exist, there is no need to
+ # download file blobs.
+ pass
+
+ local_missing_blobs = cascache.local_missing_blobs(required_blobs)
+ if local_missing_blobs:
+ if self._output_files_required:
+ # Fetch all blobs from Remote Execution CAS server
+ blobs_to_fetch = local_missing_blobs
+ else:
+ # Output files are not required in the local cache,
+ # however, artifact push remotes will need them.
+ # Only fetch blobs that are missing on one or multiple
+ # artifact servers.
+ blobs_to_fetch = artifactcache.find_missing_blobs(project, local_missing_blobs)
+
+ remote_missing_blobs = cascache.fetch_blobs(casremote, blobs_to_fetch)
+ if remote_missing_blobs:
+ raise SandboxError("{} output files are missing on the CAS server"
+ .format(len(remote_missing_blobs)))
+
+ def _run(self, command, flags, *, cwd, env):
+ stdout, stderr = self._get_output()
+
+ context = self._get_context()
+ project = self._get_project()
+ cascache = context.get_cascache()
+ artifactcache = context.artifactcache
+
+ # set up virtual dircetory
+ upload_vdir = self.get_virtual_directory()
+
+ # Create directories for all marked directories. This emulates
+ # some of the behaviour of other sandboxes, which create these
+ # to use as mount points.
+ for mark in self._get_marked_directories():
+ directory = mark['directory']
+ # Create each marked directory
+ upload_vdir.descend(*directory.split(os.path.sep), create=True)
+
+ # Generate action_digest first
+ input_root_digest = upload_vdir._get_digest()
+ command_proto = self._create_command(command, cwd, env)
+ command_digest = utils._message_digest(command_proto.SerializeToString())
+ action = remote_execution_pb2.Action(command_digest=command_digest,
+ input_root_digest=input_root_digest)
+ action_digest = utils._message_digest(action.SerializeToString())
+
+ # Next, try to create a communication channel to the BuildGrid server.
+ url = urlparse(self.exec_url)
+ if not url.port:
+ raise SandboxError("You must supply a protocol and port number in the execution-service url, "
+ "for example: http://buildservice:50051.")
+ if url.scheme == 'http':
+ channel = grpc.insecure_channel('{}:{}'.format(url.hostname, url.port))
+ elif url.scheme == 'https':
+ channel = grpc.secure_channel('{}:{}'.format(url.hostname, url.port), self.exec_credentials)
+ else:
+ raise SandboxError("Remote execution currently only supports the 'http' protocol "
+ "and '{}' was supplied.".format(url.scheme))
+
+ # check action cache download and download if there
+ action_result = self._check_action_cache(action_digest)
+
+ if not action_result:
+ casremote = CASRemote(self.storage_remote_spec)
+ try:
+ casremote.init()
+ except grpc.RpcError as e:
+ raise SandboxError("Failed to contact remote execution CAS endpoint at {}: {}"
+ .format(self.storage_url, e)) from e
+
+ # Determine blobs missing on remote
+ try:
+ missing_blobs = cascache.remote_missing_blobs_for_directory(casremote, input_root_digest)
+ except grpc.RpcError as e:
+ raise SandboxError("Failed to determine missing blobs: {}".format(e)) from e
+
+ # Check if any blobs are also missing locally (partial artifact)
+ # and pull them from the artifact cache.
+ try:
+ local_missing_blobs = cascache.local_missing_blobs(missing_blobs)
+ if local_missing_blobs:
+ artifactcache.fetch_missing_blobs(project, local_missing_blobs)
+ except (grpc.RpcError, BstError) as e:
+ raise SandboxError("Failed to pull missing blobs from artifact cache: {}".format(e)) from e
+
+ # Now, push the missing blobs to the remote.
+ try:
+ cascache.send_blobs(casremote, missing_blobs)
+ except grpc.RpcError as e:
+ raise SandboxError("Failed to push source directory to remote: {}".format(e)) from e
+
+ # Push command and action
+ try:
+ casremote.push_message(command_proto)
+ except grpc.RpcError as e:
+ raise SandboxError("Failed to push command to remote: {}".format(e))
+
+ try:
+ casremote.push_message(action)
+ except grpc.RpcError as e:
+ raise SandboxError("Failed to push action to remote: {}".format(e))
+
+ # Now request to execute the action
+ operation = self.run_remote_command(channel, action_digest)
+ action_result = self._extract_action_result(operation)
+
+ # Get output of build
+ self.process_job_output(action_result.output_directories, action_result.output_files,
+ failure=action_result.exit_code != 0)
+
+ if stdout:
+ if action_result.stdout_raw:
+ stdout.write(str(action_result.stdout_raw, 'utf-8', errors='ignore'))
+ if stderr:
+ if action_result.stderr_raw:
+ stderr.write(str(action_result.stderr_raw, 'utf-8', errors='ignore'))
+
+ if action_result.exit_code != 0:
+ # A normal error during the build: the remote execution system
+ # has worked correctly but the command failed.
+ return action_result.exit_code
+
+ return 0
+
+ def _check_action_cache(self, action_digest):
+ # Checks the action cache to see if this artifact has already been built
+ #
+ # Should return either the action response or None if not found, raise
+ # Sandboxerror if other grpc error was raised
+ if not self.action_url:
+ return None
+ url = urlparse(self.action_url)
+ if not url.port:
+ raise SandboxError("You must supply a protocol and port number in the action-cache-service url, "
+ "for example: http://buildservice:50051.")
+ if url.scheme == 'http':
+ channel = grpc.insecure_channel('{}:{}'.format(url.hostname, url.port))
+ elif url.scheme == 'https':
+ channel = grpc.secure_channel('{}:{}'.format(url.hostname, url.port), self.action_credentials)
+
+ request = remote_execution_pb2.GetActionResultRequest(instance_name=self.action_instance,
+ action_digest=action_digest)
+ stub = remote_execution_pb2_grpc.ActionCacheStub(channel)
+ try:
+ result = stub.GetActionResult(request)
+ except grpc.RpcError as e:
+ if e.code() != grpc.StatusCode.NOT_FOUND:
+ raise SandboxError("Failed to query action cache: {} ({})"
+ .format(e.code(), e.details()))
+ else:
+ return None
+ else:
+ self.info("Action result found in action cache")
+ return result
+
+ def _create_command(self, command, working_directory, environment):
+ # Creates a command proto
+ environment_variables = [remote_execution_pb2.Command.
+ EnvironmentVariable(name=k, value=v)
+ for (k, v) in environment.items()]
+
+ # Request the whole directory tree as output
+ output_directory = os.path.relpath(os.path.sep, start=working_directory)
+
+ return remote_execution_pb2.Command(arguments=command,
+ working_directory=working_directory,
+ environment_variables=environment_variables,
+ output_files=[],
+ output_directories=[output_directory],
+ platform=None)
+
+ @staticmethod
+ def _extract_action_result(operation):
+ if operation is None:
+ # Failure of remote execution, usually due to an error in BuildStream
+ raise SandboxError("No response returned from server")
+
+ assert not operation.HasField('error') and operation.HasField('response')
+
+ execution_response = remote_execution_pb2.ExecuteResponse()
+ # The response is expected to be an ExecutionResponse message
+ assert operation.response.Is(execution_response.DESCRIPTOR)
+
+ operation.response.Unpack(execution_response)
+
+ if execution_response.status.code != code_pb2.OK:
+ # An unexpected error during execution: the remote execution
+ # system failed at processing the execution request.
+ if execution_response.status.message:
+ raise SandboxError(execution_response.status.message)
+ else:
+ raise SandboxError("Remote server failed at executing the build request.")
+
+ return execution_response.result
+
+ def _create_batch(self, main_group, flags, *, collect=None):
+ return _SandboxRemoteBatch(self, main_group, flags, collect=collect)
+
+ def _use_cas_based_directory(self):
+ # Always use CasBasedDirectory for remote execution
+ return True
+
+
+# _SandboxRemoteBatch()
+#
+# Command batching by shell script generation.
+#
+class _SandboxRemoteBatch(_SandboxBatch):
+
+ def __init__(self, sandbox, main_group, flags, *, collect=None):
+ super().__init__(sandbox, main_group, flags, collect=collect)
+
+ self.script = None
+ self.first_command = None
+ self.cwd = None
+ self.env = None
+
+ def execute(self):
+ self.script = ""
+
+ self.main_group.execute(self)
+
+ first = self.first_command
+ if first and self.sandbox.run(['sh', '-c', '-e', self.script], self.flags, cwd=first.cwd, env=first.env) != 0:
+ raise SandboxCommandError("Command execution failed", collect=self.collect)
+
+ def execute_group(self, group):
+ group.execute_children(self)
+
+ def execute_command(self, command):
+ if self.first_command is None:
+ # First command in batch
+ # Initial working directory and environment of script already matches
+ # the command configuration.
+ self.first_command = command
+ else:
+ # Change working directory for this command
+ if command.cwd != self.cwd:
+ self.script += "mkdir -p {}\n".format(command.cwd)
+ self.script += "cd {}\n".format(command.cwd)
+
+ # Update environment for this command
+ for key in self.env.keys():
+ if key not in command.env:
+ self.script += "unset {}\n".format(key)
+ for key, value in command.env.items():
+ if key not in self.env or self.env[key] != value:
+ self.script += "export {}={}\n".format(key, shlex.quote(value))
+
+ # Keep track of current working directory and environment
+ self.cwd = command.cwd
+ self.env = command.env
+
+ # Actual command execution
+ cmdline = ' '.join(shlex.quote(cmd) for cmd in command.command)
+ self.script += "(set -ex; {})".format(cmdline)
+
+ # Error handling
+ label = command.label or cmdline
+ quoted_label = shlex.quote("'{}'".format(label))
+ self.script += " || (echo Command {} failed with exitcode $? >&2 ; exit 1)\n".format(quoted_label)
+
+ def execute_call(self, call):
+ raise SandboxError("SandboxRemote does not support callbacks in command batches")
diff --git a/src/buildstream/sandbox/sandbox.py b/src/buildstream/sandbox/sandbox.py
new file mode 100644
index 000000000..c96ccb57b
--- /dev/null
+++ b/src/buildstream/sandbox/sandbox.py
@@ -0,0 +1,717 @@
+#
+# Copyright (C) 2017 Codethink Limited
+# Copyright (C) 2018 Bloomberg Finance LP
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# Andrew Leeming <andrew.leeming@codethink.co.uk>
+# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+"""
+Sandbox - The build sandbox
+===========================
+:class:`.Element` plugins which want to interface with the sandbox
+need only understand this interface, while it may be given a different
+sandbox implementation, any sandbox implementation it is given will
+conform to this interface.
+
+See also: :ref:`sandboxing`.
+"""
+
+import os
+import shlex
+import contextlib
+from contextlib import contextmanager
+
+from .._exceptions import ImplError, BstError, SandboxError
+from .._message import Message, MessageType
+from ..storage._filebaseddirectory import FileBasedDirectory
+from ..storage._casbaseddirectory import CasBasedDirectory
+
+
+class SandboxFlags():
+ """Flags indicating how the sandbox should be run.
+ """
+
+ NONE = 0
+ """Use default sandbox configuration.
+ """
+
+ ROOT_READ_ONLY = 0x01
+ """The root filesystem is read only.
+
+ This is normally true except when running integration commands
+ on staged dependencies, where we have to update caches and run
+ things such as ldconfig.
+ """
+
+ NETWORK_ENABLED = 0x02
+ """Whether to expose host network.
+
+ This should not be set when running builds, but can
+ be allowed for running a shell in a sandbox.
+ """
+
+ INTERACTIVE = 0x04
+ """Whether to run the sandbox interactively
+
+ This determines if the sandbox should attempt to connect
+ the terminal through to the calling process, or detach
+ the terminal entirely.
+ """
+
+ INHERIT_UID = 0x08
+ """Whether to use the user id and group id from the host environment
+
+ This determines if processes in the sandbox should run with the
+ same user id and group id as BuildStream itself. By default,
+ processes run with user id and group id 0, protected by a user
+ namespace where available.
+ """
+
+
+class SandboxCommandError(SandboxError):
+ """Raised by :class:`.Sandbox` implementations when a command fails.
+
+ Args:
+ message (str): The error message to report to the user
+ detail (str): The detailed error string
+ collect (str): An optional directory containing partial install contents
+ """
+ def __init__(self, message, *, detail=None, collect=None):
+ super().__init__(message, detail=detail, reason='command-failed')
+
+ self.collect = collect
+
+
+class Sandbox():
+ """Sandbox()
+
+ Sandbox programming interface for :class:`.Element` plugins.
+ """
+
+ # Minimal set of devices for the sandbox
+ DEVICES = [
+ '/dev/urandom',
+ '/dev/random',
+ '/dev/zero',
+ '/dev/null'
+ ]
+
+ def __init__(self, context, project, directory, **kwargs):
+ self.__context = context
+ self.__project = project
+ self.__directories = []
+ self.__cwd = None
+ self.__env = None
+ self.__mount_sources = {}
+ self.__allow_real_directory = kwargs['allow_real_directory']
+ self.__allow_run = True
+
+ # Plugin ID for logging
+ plugin = kwargs.get('plugin', None)
+ if plugin:
+ self.__plugin_id = plugin._unique_id
+ else:
+ self.__plugin_id = None
+
+ # Configuration from kwargs common to all subclasses
+ self.__config = kwargs['config']
+ self.__stdout = kwargs['stdout']
+ self.__stderr = kwargs['stderr']
+ self.__bare_directory = kwargs['bare_directory']
+
+ # Setup the directories. Root and output_directory should be
+ # available to subclasses, hence being single-underscore. The
+ # others are private to this class.
+ # If the directory is bare, it probably doesn't need scratch
+ if self.__bare_directory:
+ self._root = directory
+ self.__scratch = None
+ os.makedirs(self._root, exist_ok=True)
+ else:
+ self._root = os.path.join(directory, 'root')
+ self.__scratch = os.path.join(directory, 'scratch')
+ for directory_ in [self._root, self.__scratch]:
+ os.makedirs(directory_, exist_ok=True)
+
+ self._output_directory = None
+ self._build_directory = None
+ self._build_directory_always = None
+ self._vdir = None
+ self._usebuildtree = False
+
+ # This is set if anyone requests access to the underlying
+ # directory via get_directory.
+ self._never_cache_vdirs = False
+
+ # Pending command batch
+ self.__batch = None
+
+ def get_directory(self):
+ """Fetches the sandbox root directory
+
+ The root directory is where artifacts for the base
+ runtime environment should be staged. Only works if
+ BST_VIRTUAL_DIRECTORY is not set.
+
+ Returns:
+ (str): The sandbox root directory
+
+ """
+ if self.__allow_real_directory:
+ self._never_cache_vdirs = True
+ return self._root
+ else:
+ raise BstError("You can't use get_directory")
+
+ def get_virtual_directory(self):
+ """Fetches the sandbox root directory as a virtual Directory.
+
+ The root directory is where artifacts for the base
+ runtime environment should be staged.
+
+ Use caution if you use get_directory and
+ get_virtual_directory. If you alter the contents of the
+ directory returned by get_directory, all objects returned by
+ get_virtual_directory or derived from them are invalid and you
+ must call get_virtual_directory again to get a new copy.
+
+ Returns:
+ (Directory): The sandbox root directory
+
+ """
+ if self._vdir is None or self._never_cache_vdirs:
+ if self._use_cas_based_directory():
+ cascache = self.__context.get_cascache()
+ self._vdir = CasBasedDirectory(cascache)
+ else:
+ self._vdir = FileBasedDirectory(self._root)
+ return self._vdir
+
+ def _set_virtual_directory(self, virtual_directory):
+ """ Sets virtual directory. Useful after remote execution
+ has rewritten the working directory.
+ """
+ self._vdir = virtual_directory
+
+ def set_environment(self, environment):
+ """Sets the environment variables for the sandbox
+
+ Args:
+ environment (dict): The environment variables to use in the sandbox
+ """
+ self.__env = environment
+
+ def set_work_directory(self, directory):
+ """Sets the work directory for commands run in the sandbox
+
+ Args:
+ directory (str): An absolute path within the sandbox
+ """
+ self.__cwd = directory
+
+ def set_output_directory(self, directory):
+ """Sets the output directory - the directory which is preserved
+ as an artifact after assembly.
+
+ Args:
+ directory (str): An absolute path within the sandbox
+ """
+ self._output_directory = directory
+
+ def mark_directory(self, directory, *, artifact=False):
+ """Marks a sandbox directory and ensures it will exist
+
+ Args:
+ directory (str): An absolute path within the sandbox to mark
+ artifact (bool): Whether the content staged at this location
+ contains artifacts
+
+ .. note::
+ Any marked directories will be read-write in the sandboxed
+ environment, only the root directory is allowed to be readonly.
+ """
+ self.__directories.append({
+ 'directory': directory,
+ 'artifact': artifact
+ })
+
+ def run(self, command, flags, *, cwd=None, env=None, label=None):
+ """Run a command in the sandbox.
+
+ If this is called outside a batch context, the command is immediately
+ executed.
+
+ If this is called in a batch context, the command is added to the batch
+ for later execution. If the command fails, later commands will not be
+ executed. Command flags must match batch flags.
+
+ Args:
+ command (list): The command to run in the sandboxed environment, as a list
+ of strings starting with the binary to run.
+ flags (:class:`.SandboxFlags`): The flags for running this command.
+ cwd (str): The sandbox relative working directory in which to run the command.
+ env (dict): A dictionary of string key, value pairs to set as environment
+ variables inside the sandbox environment.
+ label (str): An optional label for the command, used for logging. (*Since: 1.4*)
+
+ Returns:
+ (int|None): The program exit code, or None if running in batch context.
+
+ Raises:
+ (:class:`.ProgramNotFoundError`): If a host tool which the given sandbox
+ implementation requires is not found.
+
+ .. note::
+
+ The optional *cwd* argument will default to the value set with
+ :func:`~buildstream.sandbox.Sandbox.set_work_directory` and this
+ function must make sure the directory will be created if it does
+ not exist yet, even if a workspace is being used.
+ """
+
+ if not self.__allow_run:
+ raise SandboxError("Sandbox.run() has been disabled")
+
+ # Fallback to the sandbox default settings for
+ # the cwd and env.
+ #
+ cwd = self._get_work_directory(cwd=cwd)
+ env = self._get_environment(cwd=cwd, env=env)
+
+ # Convert single-string argument to a list
+ if isinstance(command, str):
+ command = [command]
+
+ if self.__batch:
+ assert flags == self.__batch.flags, \
+ "Inconsistent sandbox flags in single command batch"
+
+ batch_command = _SandboxBatchCommand(command, cwd=cwd, env=env, label=label)
+
+ current_group = self.__batch.current_group
+ current_group.append(batch_command)
+ return None
+ else:
+ return self._run(command, flags, cwd=cwd, env=env)
+
+ @contextmanager
+ def batch(self, flags, *, label=None, collect=None):
+ """Context manager for command batching
+
+ This provides a batch context that defers execution of commands until
+ the end of the context. If a command fails, the batch will be aborted
+ and subsequent commands will not be executed.
+
+ Command batches may be nested. Execution will start only when the top
+ level batch context ends.
+
+ Args:
+ flags (:class:`.SandboxFlags`): The flags for this command batch.
+ label (str): An optional label for the batch group, used for logging.
+ collect (str): An optional directory containing partial install contents
+ on command failure.
+
+ Raises:
+ (:class:`.SandboxCommandError`): If a command fails.
+
+ *Since: 1.4*
+ """
+
+ group = _SandboxBatchGroup(label=label)
+
+ if self.__batch:
+ # Nested batch
+ assert flags == self.__batch.flags, \
+ "Inconsistent sandbox flags in single command batch"
+
+ parent_group = self.__batch.current_group
+ parent_group.append(group)
+ self.__batch.current_group = group
+ try:
+ yield
+ finally:
+ self.__batch.current_group = parent_group
+ else:
+ # Top-level batch
+ batch = self._create_batch(group, flags, collect=collect)
+
+ self.__batch = batch
+ try:
+ yield
+ finally:
+ self.__batch = None
+
+ batch.execute()
+
+ #####################################################
+ # Abstract Methods for Sandbox implementations #
+ #####################################################
+
+ # _run()
+ #
+ # Abstract method for running a single command
+ #
+ # Args:
+ # command (list): The command to run in the sandboxed environment, as a list
+ # of strings starting with the binary to run.
+ # flags (:class:`.SandboxFlags`): The flags for running this command.
+ # cwd (str): The sandbox relative working directory in which to run the command.
+ # env (dict): A dictionary of string key, value pairs to set as environment
+ # variables inside the sandbox environment.
+ #
+ # Returns:
+ # (int): The program exit code.
+ #
+ def _run(self, command, flags, *, cwd, env):
+ raise ImplError("Sandbox of type '{}' does not implement _run()"
+ .format(type(self).__name__))
+
+ # _create_batch()
+ #
+ # Abstract method for creating a batch object. Subclasses can override
+ # this method to instantiate a subclass of _SandboxBatch.
+ #
+ # Args:
+ # main_group (:class:`_SandboxBatchGroup`): The top level batch group.
+ # flags (:class:`.SandboxFlags`): The flags for commands in this batch.
+ # collect (str): An optional directory containing partial install contents
+ # on command failure.
+ #
+ def _create_batch(self, main_group, flags, *, collect=None):
+ return _SandboxBatch(self, main_group, flags, collect=collect)
+
+ # _use_cas_based_directory()
+ #
+ # Whether to use CasBasedDirectory as sandbox root. If this returns `False`,
+ # FileBasedDirectory will be used.
+ #
+ # Returns:
+ # (bool): Whether to use CasBasedDirectory
+ #
+ def _use_cas_based_directory(self):
+ # Use CasBasedDirectory as sandbox root if neither Sandbox.get_directory()
+ # nor Sandbox.run() are required. This allows faster staging.
+ if not self.__allow_real_directory and not self.__allow_run:
+ return True
+
+ return 'BST_CAS_DIRECTORIES' in os.environ
+
+ ################################################
+ # Private methods #
+ ################################################
+ # _get_context()
+ #
+ # Fetches the context BuildStream was launched with.
+ #
+ # Returns:
+ # (Context): The context of this BuildStream invocation
+ def _get_context(self):
+ return self.__context
+
+ # _get_project()
+ #
+ # Fetches the Project this sandbox was created to build for.
+ #
+ # Returns:
+ # (Project): The project this sandbox was created for.
+ def _get_project(self):
+ return self.__project
+
+ # _get_marked_directories()
+ #
+ # Fetches the marked directories in the sandbox
+ #
+ # Returns:
+ # (list): A list of directory mark objects.
+ #
+ # The returned objects are dictionaries with the following attributes:
+ # directory: The absolute path within the sandbox
+ # artifact: Whether the path will contain artifacts or not
+ #
+ def _get_marked_directories(self):
+ return self.__directories
+
+ # _get_mount_source()
+ #
+ # Fetches the list of mount sources
+ #
+ # Returns:
+ # (dict): A dictionary where keys are mount points and values are the mount sources
+ def _get_mount_sources(self):
+ return self.__mount_sources
+
+ # _set_mount_source()
+ #
+ # Sets the mount source for a given mountpoint
+ #
+ # Args:
+ # mountpoint (str): The absolute mountpoint path inside the sandbox
+ # mount_source (str): the host path to be mounted at the mount point
+ def _set_mount_source(self, mountpoint, mount_source):
+ self.__mount_sources[mountpoint] = mount_source
+
+ # _get_environment()
+ #
+ # Fetches the environment variables for running commands
+ # in the sandbox.
+ #
+ # Args:
+ # cwd (str): The working directory the command has been requested to run in, if any.
+ # env (str): The environment the command has been requested to run in, if any.
+ #
+ # Returns:
+ # (str): The sandbox work directory
+ def _get_environment(self, *, cwd=None, env=None):
+ cwd = self._get_work_directory(cwd=cwd)
+ if env is None:
+ env = self.__env
+
+ # Naive getcwd implementations can break when bind-mounts to different
+ # paths on the same filesystem are present. Letting the command know
+ # what directory it is in makes it unnecessary to call the faulty
+ # getcwd.
+ env = dict(env)
+ env['PWD'] = cwd
+
+ return env
+
+ # _get_work_directory()
+ #
+ # Fetches the working directory for running commands
+ # in the sandbox.
+ #
+ # Args:
+ # cwd (str): The working directory the command has been requested to run in, if any.
+ #
+ # Returns:
+ # (str): The sandbox work directory
+ def _get_work_directory(self, *, cwd=None):
+ return cwd or self.__cwd or '/'
+
+ # _get_scratch_directory()
+ #
+ # Fetches the sandbox scratch directory, this directory can
+ # be used by the sandbox implementation to cache things or
+ # redirect temporary fuse mounts.
+ #
+ # The scratch directory is guaranteed to be on the same
+ # filesystem as the root directory.
+ #
+ # Returns:
+ # (str): The sandbox scratch directory
+ def _get_scratch_directory(self):
+ assert not self.__bare_directory, "Scratch is not going to work with bare directories"
+ return self.__scratch
+
+ # _get_output()
+ #
+ # Fetches the stdout & stderr
+ #
+ # Returns:
+ # (file): The stdout, or None to inherit
+ # (file): The stderr, or None to inherit
+ def _get_output(self):
+ return (self.__stdout, self.__stderr)
+
+ # _get_config()
+ #
+ # Fetches the sandbox configuration object.
+ #
+ # Returns:
+ # (SandboxConfig): An object containing the configuration
+ # data passed in during construction.
+ def _get_config(self):
+ return self.__config
+
+ # _has_command()
+ #
+ # Tests whether a command exists inside the sandbox
+ #
+ # Args:
+ # command (list): The command to test.
+ # env (dict): A dictionary of string key, value pairs to set as environment
+ # variables inside the sandbox environment.
+ # Returns:
+ # (bool): Whether a command exists inside the sandbox.
+ def _has_command(self, command, env=None):
+ if os.path.isabs(command):
+ return os.path.lexists(os.path.join(
+ self._root, command.lstrip(os.sep)))
+
+ for path in env.get('PATH').split(':'):
+ if os.path.lexists(os.path.join(
+ self._root, path.lstrip(os.sep), command)):
+ return True
+
+ return False
+
+ # _get_plugin_id()
+ #
+ # Get the plugin's unique identifier
+ #
+ def _get_plugin_id(self):
+ return self.__plugin_id
+
+ # _callback()
+ #
+ # If this is called outside a batch context, the specified function is
+ # invoked immediately.
+ #
+ # If this is called in a batch context, the function is added to the batch
+ # for later invocation.
+ #
+ # Args:
+ # callback (callable): The function to invoke
+ #
+ def _callback(self, callback):
+ if self.__batch:
+ batch_call = _SandboxBatchCall(callback)
+
+ current_group = self.__batch.current_group
+ current_group.append(batch_call)
+ else:
+ callback()
+
+ # _disable_run()
+ #
+ # Raise exception if `Sandbox.run()` is called. This enables use of
+ # CasBasedDirectory for faster staging when command execution is not
+ # required.
+ #
+ def _disable_run(self):
+ self.__allow_run = False
+
+ # _set_build_directory()
+ #
+ # Sets the build directory - the directory which may be preserved as
+ # buildtree in the artifact.
+ #
+ # Args:
+ # directory (str): An absolute path within the sandbox
+ # always (bool): True if the build directory should always be downloaded,
+ # False if it should be downloaded only on failure
+ #
+ def _set_build_directory(self, directory, *, always):
+ self._build_directory = directory
+ self._build_directory_always = always
+
+
+# _SandboxBatch()
+#
+# A batch of sandbox commands.
+#
+class _SandboxBatch():
+
+ def __init__(self, sandbox, main_group, flags, *, collect=None):
+ self.sandbox = sandbox
+ self.main_group = main_group
+ self.current_group = main_group
+ self.flags = flags
+ self.collect = collect
+
+ def execute(self):
+ self.main_group.execute(self)
+
+ def execute_group(self, group):
+ if group.label:
+ context = self.sandbox._get_context()
+ cm = context.timed_activity(group.label, unique_id=self.sandbox._get_plugin_id())
+ else:
+ cm = contextlib.suppress()
+
+ with cm:
+ group.execute_children(self)
+
+ def execute_command(self, command):
+ if command.label:
+ context = self.sandbox._get_context()
+ message = Message(self.sandbox._get_plugin_id(), MessageType.STATUS,
+ 'Running command', detail=command.label)
+ context.message(message)
+
+ exitcode = self.sandbox._run(command.command, self.flags, cwd=command.cwd, env=command.env)
+ if exitcode != 0:
+ cmdline = ' '.join(shlex.quote(cmd) for cmd in command.command)
+ label = command.label or cmdline
+ raise SandboxCommandError("Command failed with exitcode {}".format(exitcode),
+ detail=label, collect=self.collect)
+
+ def execute_call(self, call):
+ call.callback()
+
+
+# _SandboxBatchItem()
+#
+# An item in a command batch.
+#
+class _SandboxBatchItem():
+
+ def __init__(self, *, label=None):
+ self.label = label
+
+
+# _SandboxBatchCommand()
+#
+# A command item in a command batch.
+#
+class _SandboxBatchCommand(_SandboxBatchItem):
+
+ def __init__(self, command, *, cwd, env, label=None):
+ super().__init__(label=label)
+
+ self.command = command
+ self.cwd = cwd
+ self.env = env
+
+ def execute(self, batch):
+ batch.execute_command(self)
+
+
+# _SandboxBatchGroup()
+#
+# A group in a command batch.
+#
+class _SandboxBatchGroup(_SandboxBatchItem):
+
+ def __init__(self, *, label=None):
+ super().__init__(label=label)
+
+ self.children = []
+
+ def append(self, item):
+ self.children.append(item)
+
+ def execute(self, batch):
+ batch.execute_group(self)
+
+ def execute_children(self, batch):
+ for item in self.children:
+ item.execute(batch)
+
+
+# _SandboxBatchCall()
+#
+# A call item in a command batch.
+#
+class _SandboxBatchCall(_SandboxBatchItem):
+
+ def __init__(self, callback):
+ super().__init__()
+
+ self.callback = callback
+
+ def execute(self, batch):
+ batch.execute_call(self)