summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTristan van Berkom <tristan.vanberkom@codethink.co.uk>2020-06-15 17:15:02 +0900
committerTristan van Berkom <tristan.vanberkom@codethink.co.uk>2020-06-15 19:11:12 +0900
commit5daa22ae6f60568849254274fdbf57bde5b919d8 (patch)
tree5e689c7d7bad75dfa7b3b15a2f726131830573a1
parent0c14551de6ff5c84e1689d544f2d3ee073ac0a07 (diff)
downloadbuildstream-tristan/nuke-pickle-jobber.tar.gz
Completely abolish job pickling.tristan/nuke-pickle-jobber
-rw-r--r--.gitlab-ci.yml12
-rw-r--r--src/buildstream/_cas/cascache.py26
-rw-r--r--src/buildstream/_frontend/cli.py42
-rw-r--r--src/buildstream/_loader/loader.py30
-rw-r--r--src/buildstream/_messenger.py43
-rw-r--r--src/buildstream/_options/optionpool.py11
-rw-r--r--src/buildstream/_platform/platform.py14
-rw-r--r--src/buildstream/_pluginfactory/pluginfactory.py33
-rw-r--r--src/buildstream/_scheduler/jobs/job.py8
-rw-r--r--src/buildstream/_scheduler/jobs/jobpickler.py202
-rw-r--r--src/buildstream/_scheduler/scheduler.py15
-rw-r--r--src/buildstream/_stream.py15
-rw-r--r--src/buildstream/element.py34
-rw-r--r--src/buildstream/node.pyx27
-rw-r--r--src/buildstream/plugin.py21
-rw-r--r--src/buildstream/sandbox/_sandboxbuildboxrun.py4
-rw-r--r--src/buildstream/source.py22
-rwxr-xr-xtests/conftest.py14
-rw-r--r--tox.ini1
19 files changed, 1 insertions, 573 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index d4f56b39f..a5f2245c9 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -208,18 +208,6 @@ tests-remote-execution:
SOURCE_CACHE_SERVICE: http://docker:50052
PYTEST_ARGS: "--color=yes --remote-execution"
-tests-spawn-multiprocessing-start-method:
- image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:31-${DOCKER_IMAGE_VERSION}
- <<: *tests
- variables:
- BST_FORCE_START_METHOD: "spawn"
- script:
- # FIXME: Until all the tests pass as normal, override which tests will run here.
- - mkdir -p "${INTEGRATION_CACHE}"
- - useradd -Um buildstream
- - chown -R buildstream:buildstream .
- - su buildstream -c "tox -- ${PYTEST_ARGS} tests/{artifactcache,cachekey,elements,format,frontend,internals,plugins,sourcecache}"
-
tests-no-usedevelop:
# Ensure that tests also pass without `--develop` flag.
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:31-${DOCKER_IMAGE_VERSION}
diff --git a/src/buildstream/_cas/cascache.py b/src/buildstream/_cas/cascache.py
index 6b2f0d1a4..7936121ea 100644
--- a/src/buildstream/_cas/cascache.py
+++ b/src/buildstream/_cas/cascache.py
@@ -94,32 +94,6 @@ class CASCache:
self._casd_channel = self._casd_process_manager.create_channel()
self._cache_usage_monitor = _CASCacheUsageMonitor(self._casd_channel)
- def __getstate__(self):
- # Note that we can't use jobpickler's
- # 'get_state_for_child_job_pickling' protocol here, since CASCache's
- # are passed to subprocesses other than child jobs. e.g.
- # test.utils.ArtifactShare.
-
- state = self.__dict__.copy()
-
- # Child jobs do not need to manage the CASD process, they only need a
- # connection to CASD.
- if state["_casd_process_manager"] is not None:
- state["_casd_process_manager"] = None
- # In order to be pickle-able, the connection must be in the initial
- # 'closed' state.
- state["_casd_channel"] = self._casd_process_manager.create_channel()
-
- # The usage monitor is not pickle-able, but we also don't need it in
- # child processes currently. Make sure that if this changes, we get a
- # bug report, by setting _cache_usage_monitor_forbidden.
- assert "_cache_usage_monitor" in state
- assert "_cache_usage_monitor_forbidden" in state
- state["_cache_usage_monitor"] = None
- state["_cache_usage_monitor_forbidden"] = True
-
- return state
-
# get_cas():
#
# Return ContentAddressableStorage stub for buildbox-casd channel.
diff --git a/src/buildstream/_frontend/cli.py b/src/buildstream/_frontend/cli.py
index 18bb03c74..d5fa47091 100644
--- a/src/buildstream/_frontend/cli.py
+++ b/src/buildstream/_frontend/cli.py
@@ -1,4 +1,3 @@
-import multiprocessing
import os
import sys
from functools import partial
@@ -211,41 +210,6 @@ def validate_output_streams():
sys.exit(-1)
-def handle_bst_force_start_method_env():
- bst_force_start_method_str = "BST_FORCE_START_METHOD"
- if bst_force_start_method_str in os.environ:
- start_method = os.environ[bst_force_start_method_str]
- existing_start_method = multiprocessing.get_start_method(allow_none=True)
- if existing_start_method is None:
- multiprocessing.set_start_method(start_method)
- print(
- bst_force_start_method_str + ": multiprocessing start method forced to:",
- start_method,
- file=sys.stderr,
- flush=True,
- )
- elif existing_start_method == start_method:
- # Note that when testing, we run the buildstream entrypoint
- # multiple times in the same executable, so guard against that
- # here.
- print(
- bst_force_start_method_str + ": multiprocessing start method already set to:",
- existing_start_method,
- file=sys.stderr,
- flush=True,
- )
- else:
- print(
- bst_force_start_method_str + ": cannot set multiprocessing start method to:",
- start_method,
- ", already set to:",
- existing_start_method,
- file=sys.stderr,
- flush=True,
- )
- sys.exit(-1)
-
-
def override_main(self, args=None, prog_name=None, complete_var=None, standalone_mode=True, **extra):
# Hook for the Bash completion. This only activates if the Bash
@@ -271,12 +235,6 @@ def override_main(self, args=None, prog_name=None, complete_var=None, standalone
# in the case that it is non-blocking.
validate_output_streams()
- # We can only set the global multiprocessing start method once; for that
- # reason we're advised to do it inside the entrypoint, where it's more
- # likely that you can ensure the code path is only followed once. In the
- # case of testing, our tests preceed our entrypoint, so we do our best.
- handle_bst_force_start_method_env()
-
original_main(self, args=args, prog_name=prog_name, complete_var=None, standalone_mode=standalone_mode, **extra)
diff --git a/src/buildstream/_loader/loader.py b/src/buildstream/_loader/loader.py
index 5a5d24dc4..fd9e2ef2d 100644
--- a/src/buildstream/_loader/loader.py
+++ b/src/buildstream/_loader/loader.py
@@ -183,36 +183,6 @@ class Loader:
return loader
- # get_state_for_child_job_pickling(self)
- #
- # Return data necessary to reconstruct this object in a child job process.
- #
- # This should be implemented the same as __getstate__(). We define this
- # method instead as it is child job specific.
- #
- # Returns:
- # (dict): This `state` is what we want `self.__dict__` to be restored to
- # after instantiation in the child process.
- #
- def get_state_for_child_job_pickling(self):
- state = self.__dict__.copy()
-
- # When pickling a Loader over to the ChildJob, we don't want to bring
- # the whole Stream over with it. The _fetch_subprojects member is a
- # method of the Stream. We also don't want to remove it in the main
- # process. If we remove it in the child process then we will already be
- # too late. The only time that seems just right is here, when preparing
- # the child process' copy of the Loader.
- #
- del state["_fetch_subprojects"]
-
- # Also there's no gain in pickling over the caches, and they might
- # contain things which are unpleasantly large or unable to pickle.
- del state["_elements"]
- del state["_meta_elements"]
-
- return state
-
# collect_element_no_deps()
#
# Collect a single element, without its dependencies, into a meta_element
diff --git a/src/buildstream/_messenger.py b/src/buildstream/_messenger.py
index 03b2833ec..3a32a2467 100644
--- a/src/buildstream/_messenger.py
+++ b/src/buildstream/_messenger.py
@@ -375,49 +375,6 @@ class Messenger:
self._log_handle.write("{}\n".format(text))
self._log_handle.flush()
- # get_state_for_child_job_pickling(self)
- #
- # Return data necessary to reconstruct this object in a child job process.
- #
- # This should be implemented the same as __getstate__(). We define this
- # method instead as it is child job specific.
- #
- # Returns:
- # (dict): This `state` is what we want `self.__dict__` to be restored to
- # after instantiation in the child process.
- #
- def get_state_for_child_job_pickling(self):
- state = self.__dict__.copy()
-
- # When pickling a Messenger over to the ChildJob, we don't want to bring
- # the whole _message_handler over with it. We also don't want to remove it
- # in the main process. If we remove it in the child process then we will
- # already be too late. The only time that seems just right is here, when
- # preparing the child process' copy of the Messenger.
- #
- # Another approach might be to use a context manager on the Messenger,
- # which removes and restores the _message_handler. This wouldn't require
- # access to private details of Messenger, but it would open up a window
- # where messagesw wouldn't be handled as expected.
- #
- del state["_message_handler"]
-
- # The render status callback is only used in the main process
- #
- del state["_render_status_cb"]
-
- # The "simple_task" context manager is not needed outside the main
- # process. During testing we override it to something that cannot
- # pickle, so just drop it when pickling to a child job. Note that it
- # will only appear in 'state' if it has been overridden.
- #
- state.pop("simple_task", None)
-
- # The State object is not needed outside the main process
- del state["_state"]
-
- return state
-
# _render_status()
#
# Calls the render status callback set in the messenger, but only if a
diff --git a/src/buildstream/_options/optionpool.py b/src/buildstream/_options/optionpool.py
index c05c55914..b30d1f0ce 100644
--- a/src/buildstream/_options/optionpool.py
+++ b/src/buildstream/_options/optionpool.py
@@ -65,17 +65,6 @@ class OptionPool:
self._environment = None
self._init_environment()
- def __getstate__(self):
- state = self.__dict__.copy()
- # Jinja2 Environments don't appear to be serializable. It is easy
- # enough for us to reconstruct this one anyway, so no need to pickle it.
- del state["_environment"]
- return state
-
- def __setstate__(self, state):
- self.__dict__.update(state)
- self._init_environment()
-
# load()
#
# Loads the options described in the project.conf
diff --git a/src/buildstream/_platform/platform.py b/src/buildstream/_platform/platform.py
index f46043cbf..50a1d8e6c 100644
--- a/src/buildstream/_platform/platform.py
+++ b/src/buildstream/_platform/platform.py
@@ -18,7 +18,6 @@
# Authors:
# Tristan Maat <tristan.maat@codethink.co.uk>
-import multiprocessing
import os
import platform
@@ -146,19 +145,6 @@ class Platform:
# Otherwise, use the hardware identifier from uname
return Platform.canonicalize_arch(uname.machine)
- # does_multiprocessing_start_require_pickling():
- #
- # Returns True if the multiprocessing start method will pickle arguments
- # to new processes.
- #
- # Returns:
- # (bool): Whether pickling is required or not
- #
- def does_multiprocessing_start_require_pickling(self):
- # Note that if the start method has not been set before now, it will be
- # set to the platform default by `get_start_method`.
- return multiprocessing.get_start_method() != "fork"
-
##################################################################
# Sandbox functions #
##################################################################
diff --git a/src/buildstream/_pluginfactory/pluginfactory.py b/src/buildstream/_pluginfactory/pluginfactory.py
index 22f62427e..df950abee 100644
--- a/src/buildstream/_pluginfactory/pluginfactory.py
+++ b/src/buildstream/_pluginfactory/pluginfactory.py
@@ -92,39 +92,6 @@ class PluginFactory:
searchpath=[self._site_plugins_path], identifier=self._identifier + "site",
)
- def __getstate__(self):
- state = self.__dict__.copy()
-
- # PluginSource is not a picklable type, so we must reconstruct this one
- # as best we can when unpickling.
- #
- # Since the values of `_types` depend on the PluginSource, we must also
- # get rid of those. It is only a cache - we will automatically recreate
- # them on demand.
- #
- # Note that this method of referring to members is error-prone in that
- # a later 'search and replace' renaming might miss these. Guard against
- # this by making sure we are not creating new members, only clearing
- # existing ones.
- #
- del state["_site_source"]
- assert "_types" in state
- state["_types"] = {}
- assert "_sources" in state
- state["_sources"] = {}
-
- return state
-
- def __setstate__(self, state):
- self.__dict__.update(state)
-
- # Note that in order to enable plugins to be unpickled along with this
- # PluginSource, we would also have to set and restore the 'identifier'
- # of the PluginSource. We would also have to recreate `_types` as it
- # was before unpickling them. We are not using this method in
- # BuildStream, so the identifier is not restored here.
- self._init_site_source()
-
######################################################
# Public Methods #
######################################################
diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py
index 23aa51e58..78a375fec 100644
--- a/src/buildstream/_scheduler/jobs/job.py
+++ b/src/buildstream/_scheduler/jobs/job.py
@@ -36,8 +36,6 @@ from ...types import FastEnum
from ... import _signals, utils
from .. import _multiprocessing
-from .jobpickler import pickle_child_job, do_pickled_child_job
-
# Return code values shutdown of job handling child processes
#
@@ -174,11 +172,7 @@ class Job:
self._message_element_key,
)
- if self._scheduler.context.platform.does_multiprocessing_start_require_pickling():
- pickled = pickle_child_job(child_job, self._scheduler.context.get_projects(),)
- self._process = _multiprocessing.AsyncioSafeProcess(target=do_pickled_child_job, args=[pickled, pipe_w],)
- else:
- self._process = _multiprocessing.AsyncioSafeProcess(target=child_job.child_action, args=[pipe_w],)
+ self._process = _multiprocessing.AsyncioSafeProcess(target=child_job.child_action, args=[pipe_w],)
# Block signals which are handled in the main process such that
# the child process does not inherit the parent's state, but the main
diff --git a/src/buildstream/_scheduler/jobs/jobpickler.py b/src/buildstream/_scheduler/jobs/jobpickler.py
deleted file mode 100644
index 1ebad7d49..000000000
--- a/src/buildstream/_scheduler/jobs/jobpickler.py
+++ /dev/null
@@ -1,202 +0,0 @@
-#
-# Copyright (C) 2019 Bloomberg Finance LP
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library. If not, see <http://www.gnu.org/licenses/>.
-#
-# Authors:
-# Angelos Evripiotis <jevripiotis@bloomberg.net>
-
-
-import copyreg
-import io
-import pickle
-
-from ..._protos.buildstream.v2.artifact_pb2 import Artifact as ArtifactProto
-from ..._protos.build.bazel.remote.execution.v2.remote_execution_pb2 import Digest as DigestProto
-
-# BuildStream toplevel imports
-from ..._loader import Loader
-from ..._messenger import Messenger
-from ... import utils, node
-
-# Note that `str(type(proto_class))` results in `GeneratedProtocolMessageType`
-# instead of the concrete type, so we come up with our own names here.
-_NAME_TO_PROTO_CLASS = {
- "artifact": ArtifactProto,
- "digest": DigestProto,
-}
-
-_PROTO_CLASS_TO_NAME = {cls: name for name, cls in _NAME_TO_PROTO_CLASS.items()}
-
-
-# pickle_child_job()
-#
-# Perform the special case pickling required to pickle a child job for
-# unpickling in a child process.
-#
-# Args:
-# child_job (ChildJob): The job to pickle.
-# projects (List[Project]): The list of loaded projects, so we can get the
-# relevant factories.
-#
-def pickle_child_job(child_job, projects):
- # Note that we need to consider all the state of the program that's
- # necessary for the job, this includes e.g. the global state of the node
- # module.
- node_module_state = node._get_state_for_pickling()
- return _pickle_child_job_data((child_job, node_module_state), projects,)
-
-
-# do_pickled_child_job()
-#
-# Unpickle the supplied 'pickled' job and call 'child_action' on it.
-#
-# This is expected to be run in a subprocess started from the main process, as
-# such it will fixup any globals to be in the expected state.
-#
-# Args:
-# pickled (BytesIO): The pickled data, and job to execute.
-# *child_args (any) : Any parameters to be passed to `child_action`.
-#
-def do_pickled_child_job(pickled, *child_args):
- utils._is_main_process = _not_main_process
-
- child_job, node_module_state = pickle.load(pickled)
- node._set_state_from_pickling(node_module_state)
- return child_job.child_action(*child_args)
-
-
-# _not_main_process()
-#
-# A function to replace `utils._is_main_process` when we're running in a
-# subprocess that was not forked - the inheritance of the main process id will
-# not work in this case.
-#
-# Note that we'll always not be the main process by definition.
-#
-def _not_main_process():
- return False
-
-
-# _pickle_child_job_data()
-#
-# Perform the special case pickling required to pickle a child job for
-# unpickling in a child process.
-#
-# Note that this just enables the pickling of things that contain ChildJob-s,
-# the thing to be pickled doesn't have to be a ChildJob.
-#
-# Note that we don't need an `unpickle_child_job_data`, as regular
-# `pickle.load()` will do everything required.
-#
-# Args:
-# child_job_data (ChildJob): The job to be pickled.
-# projects (List[Project]): The list of loaded projects, so we can get the
-# relevant factories.
-#
-# Returns:
-# An `io.BytesIO`, with the pickled contents of the ChildJob and everything it
-# transitively refers to.
-#
-# Some types require special handling when pickling to send to another process.
-# We register overrides for those special cases:
-#
-# o Very stateful objects: Some things carry much more state than they need for
-# pickling over to the child job process. This extra state brings
-# complication of supporting pickling of more types, and the performance
-# penalty of the actual pickling. Use private knowledge of these objects to
-# safely reduce the pickled state.
-#
-# o gRPC objects: These don't pickle, but they do have their own serialization
-# mechanism, which we use instead. To avoid modifying generated code, we
-# instead register overrides here.
-#
-# o Plugins: These cannot be unpickled unless the factory which created them
-# has been unpickled first, with the same identifier as before. See note
-# below. Some state in plugins is not necessary for child jobs, and comes
-# with a heavy cost; we also need to remove this before pickling.
-#
-def _pickle_child_job_data(child_job_data, projects):
-
- factory_list = [
- factory
- for p in projects
- for factory in [
- p.config.element_factory,
- p.first_pass_config.element_factory,
- p.config.source_factory,
- p.first_pass_config.source_factory,
- ]
- ]
-
- plugin_class_to_factory = {
- cls: factory for factory in factory_list if factory is not None for _, cls, _ in factory.list_plugins()
- }
-
- pickled_data = io.BytesIO()
- pickler = pickle.Pickler(pickled_data)
- pickler.dispatch_table = copyreg.dispatch_table.copy()
-
- def reduce_plugin(plugin):
- return _reduce_plugin_with_factory_dict(plugin, plugin_class_to_factory)
-
- for cls in plugin_class_to_factory:
- pickler.dispatch_table[cls] = reduce_plugin
- pickler.dispatch_table[ArtifactProto] = _reduce_proto
- pickler.dispatch_table[DigestProto] = _reduce_proto
- pickler.dispatch_table[Loader] = _reduce_object
- pickler.dispatch_table[Messenger] = _reduce_object
-
- pickler.dump(child_job_data)
- pickled_data.seek(0)
-
- return pickled_data
-
-
-def _reduce_object(instance):
- cls = type(instance)
- state = instance.get_state_for_child_job_pickling()
- return (cls.__new__, (cls,), state)
-
-
-def _reduce_proto(instance):
- name = _PROTO_CLASS_TO_NAME[type(instance)]
- data = instance.SerializeToString()
- return (_new_proto_from_reduction_args, (name, data))
-
-
-def _new_proto_from_reduction_args(name, data):
- cls = _NAME_TO_PROTO_CLASS[name]
- instance = cls()
- instance.ParseFromString(data)
- return instance
-
-
-def _reduce_plugin_with_factory_dict(plugin, plugin_class_to_factory):
- meta_kind, state = plugin._get_args_for_child_job_pickling()
- assert meta_kind
- factory = plugin_class_to_factory[type(plugin)]
- args = (factory, meta_kind)
- return (_new_plugin_from_reduction_args, args, state)
-
-
-def _new_plugin_from_reduction_args(factory, meta_kind):
- cls, _ = factory.lookup(None, meta_kind, None)
- plugin = cls.__new__(cls)
-
- # Note that we rely on the `__project` member of the Plugin to keep
- # `factory` alive after the scope of this function. If `factory` were to be
- # GC'd then we would see undefined behaviour.
-
- return plugin
diff --git a/src/buildstream/_scheduler/scheduler.py b/src/buildstream/_scheduler/scheduler.py
index 43c6c9680..66174ad19 100644
--- a/src/buildstream/_scheduler/scheduler.py
+++ b/src/buildstream/_scheduler/scheduler.py
@@ -634,18 +634,3 @@ class Scheduler:
# as a 'BUG', format it appropriately & exit. mypy needs to ignore parameter
# types here as we're overriding sys globally in App._global_exception_handler()
sys.excepthook(type(e), e, e.__traceback__, exc) # type: ignore
-
- def __getstate__(self):
- # The only use-cases for pickling in BuildStream at the time of writing
- # are enabling the 'spawn' method of starting child processes, and
- # saving jobs to disk for replays.
- #
- # In both of these use-cases, a common mistake is that something being
- # pickled indirectly holds a reference to the Scheduler, which in turn
- # holds lots of things that are not pickleable.
- #
- # Make this situation easier to debug by failing early, in the
- # Scheduler itself. Pickling this is almost certainly a mistake, unless
- # a new use-case arises.
- #
- raise TypeError("Scheduler objects should not be pickled.")
diff --git a/src/buildstream/_stream.py b/src/buildstream/_stream.py
index 44faf2bc7..b38927e18 100644
--- a/src/buildstream/_stream.py
+++ b/src/buildstream/_stream.py
@@ -1643,21 +1643,6 @@ class Stream:
self._notification_queue.appendleft(notification)
self._notifier()
- def __getstate__(self):
- # The only use-cases for pickling in BuildStream at the time of writing
- # are enabling the 'spawn' method of starting child processes, and
- # saving jobs to disk for replays.
- #
- # In both of these use-cases, a common mistake is that something being
- # pickled indirectly holds a reference to the Stream, which in turn
- # holds lots of things that are not pickleable.
- #
- # Make this situation easier to debug by failing early, in the
- # Stream itself. Pickling this is almost certainly a mistake, unless
- # a new use-case arises.
- #
- raise TypeError("Stream objects should not be pickled.")
-
# _handle_compression()
#
diff --git a/src/buildstream/element.py b/src/buildstream/element.py
index 917ad9ba9..6a0fa5fab 100644
--- a/src/buildstream/element.py
+++ b/src/buildstream/element.py
@@ -2251,40 +2251,6 @@ class Element(Plugin):
rdep.__buildable_callback(rdep)
rdep.__buildable_callback = None
- # _get_args_for_child_job_pickling(self)
- #
- # Return data necessary to reconstruct this object in a child job process.
- #
- # Returns:
- # (str, dict): A tuple of (meta_kind, state), where a factory can use
- # `meta_kind` to create an instance of the same type as `self`. `state`
- # is what we want `self.__dict__` to be restored to after instantiation
- # in the child process.
- #
- def _get_args_for_child_job_pickling(self):
- state = self.__dict__.copy()
-
- # These are called in the main process to notify the scheduler about
- # certain things. They carry a reference to the scheduler, which we
- # don't want in the child process, so clear them.
- #
- # Note that this method of referring to members is error-prone in that
- # a later 'search and replace' renaming might miss these. Guard against
- # this by making sure we are not creating new members, only clearing
- # existing ones.
- #
- assert "_Element__can_query_cache_callback" in state
- state["_Element__can_query_cache_callback"] = None
- assert "_Element__buildable_callback" in state
- state["_Element__buildable_callback"] = None
-
- # This callback is not even read in the child process, so delete it.
- # If this assumption is invalidated, we will get an attribute error to
- # let us know, and we will need to update accordingly.
- del state["_Element__required_callback"]
-
- return self.__meta_kind, state
-
def _walk_artifact_files(self):
yield from self.__artifact.get_files().walk()
diff --git a/src/buildstream/node.pyx b/src/buildstream/node.pyx
index 32b8c130c..394138311 100644
--- a/src/buildstream/node.pyx
+++ b/src/buildstream/node.pyx
@@ -1608,33 +1608,6 @@ def _reset_global_state():
__counter = 0
-# _get_state_for_pickling()
-#
-# This gets the global variables necessary to preserve in a child process when
-# e.g. running a ChildJob. Things that are pickled from the parent process to
-# the child process will expect this module to be in the same state as in the
-# parent.
-#
-# Returns:
-# (object): The state to supply to a call of _get_state_for_pickling().
-#
-def _get_state_for_pickling():
- return __FILE_LIST, __counter
-
-
-# _set_state_from_pickling()
-#
-# This restores the global variables saved from _get_state_for_pickling(). See
-# that function for more details.
-#
-# Args:
-# state (object): The result of calling _get_state_for_pickling().
-#
-def _set_state_from_pickling(state):
- global __FILE_LIST, __counter
- __FILE_LIST, __counter = state
-
-
#############################################################
# Module local helper Methods #
#############################################################
diff --git a/src/buildstream/plugin.py b/src/buildstream/plugin.py
index f8652e5cb..14e22e56d 100644
--- a/src/buildstream/plugin.py
+++ b/src/buildstream/plugin.py
@@ -693,27 +693,6 @@ class Plugin:
def _get_full_name(self):
return self.__full_name
- # _get_args_for_child_job_pickling(self)
- #
- # Return data necessary to reconstruct this object in a child job process.
- #
- # Returns:
- # (str, dict): A tuple of (meta_kind, state), where a factory can use
- # `meta_kind` to create an instance of the same type as `self`. `state`
- # is what we want `self.__dict__` to be restored to after instantiation
- # in the child process.
- #
- def _get_args_for_child_job_pickling(self):
- # Note that this is only to be implemented as a BuildStream internal,
- # so it's not an ImplError - those apply to custom plugins. Direct
- # descendants of Plugin must implement this, e.g. Element and Source.
- # Raise NotImplementedError as this would be an internal bug.
- raise NotImplementedError(
- "{tag} plugin '{kind}' does not implement _get_args_for_child_job_pickling()".format(
- tag=self.__type_tag, kind=self.get_kind()
- )
- )
-
#############################################################
# Local Private Methods #
#############################################################
diff --git a/src/buildstream/sandbox/_sandboxbuildboxrun.py b/src/buildstream/sandbox/_sandboxbuildboxrun.py
index 3907b6f0d..3d71b7440 100644
--- a/src/buildstream/sandbox/_sandboxbuildboxrun.py
+++ b/src/buildstream/sandbox/_sandboxbuildboxrun.py
@@ -72,10 +72,6 @@ class SandboxBuildBoxRun(SandboxREAPI):
@classmethod
def check_sandbox_config(cls, platform, config):
- if platform.does_multiprocessing_start_require_pickling():
- # Reinitialize class as class data is not pickled.
- cls.check_available()
-
if config.build_os not in cls._osfamilies:
raise SandboxError("OS '{}' is not supported by buildbox-run.".format(config.build_os))
if config.build_arch not in cls._isas:
diff --git a/src/buildstream/source.py b/src/buildstream/source.py
index 049db7062..f15d5a628 100644
--- a/src/buildstream/source.py
+++ b/src/buildstream/source.py
@@ -1183,28 +1183,6 @@ class Source(Plugin):
def _element_name(self):
return self.__element_name
- # _get_args_for_child_job_pickling(self)
- #
- # Return data necessary to reconstruct this object in a child job process.
- #
- # Returns:
- # (str, dict): A tuple of (meta_kind, state), where a factory can use
- # `meta_kind` to create an instance of the same type as `self`. `state`
- # is what we want `self.__dict__` to be restored to after instantiation
- # in the child process.
- #
- def _get_args_for_child_job_pickling(self):
- # In case you're wondering, note that it doesn't seem to be necessary
- # to make a copy of `self.__dict__` here, because:
- #
- # o It seems that the default implementation of `_PyObject_GetState`
- # in `typeobject.c` currently works this way, in CPython.
- #
- # o The code sketch of how pickling works also returns `self.__dict__`:
- # https://docs.python.org/3/library/pickle.html#pickling-class-instances
- #
- return self.__meta_kind, self.__dict__
-
#############################################################
# Local Private Methods #
#############################################################
diff --git a/tests/conftest.py b/tests/conftest.py
index 8d33fa024..8b526f982 100755
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -20,7 +20,6 @@
# Tristan Maat <tristan.maat@codethink.co.uk>
#
import os
-import multiprocessing
import pkg_resources
import pytest
@@ -144,16 +143,3 @@ def set_xdg_paths(pytestconfig):
value = os.path.join(pytestconfig.getoption("basetemp"), default)
os.environ[env_var] = value
-
-
-def pytest_configure(config):
- # If we need to set_start_method() then we need to do it as early as
- # possible. Note that some tests implicitly set the start method by using
- # multiprocessing. If we wait for bst to do it, it will already be too
- # late.
- if "BST_FORCE_START_METHOD" in os.environ:
- start_method = os.environ["BST_FORCE_START_METHOD"]
- multiprocessing.set_start_method(start_method)
- print(
- "Multiprocessing method set to:", start_method,
- )
diff --git a/tox.ini b/tox.ini
index bc64928a8..f7146294c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -50,7 +50,6 @@ deps =
passenv =
ARTIFACT_CACHE_SERVICE
BST_CAS_STAGING_ROOT
- BST_FORCE_START_METHOD
GI_TYPELIB_PATH
INTEGRATION_CACHE
http_proxy