summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorChandan Singh <csingh43@bloomberg.net>2019-11-05 13:40:03 +0000
committerChandan Singh <csingh43@bloomberg.net>2019-11-05 13:40:03 +0000
commitab707e87f53249d7f2aac17683254b54196f90ce (patch)
treed1d2898c6561a8ca362419dce92a6f808d45b4e6 /tests
parente06c2295b063245dbdb2397e5bd8c4d0a79ba10d (diff)
downloadbuildstream-ab707e87f53249d7f2aac17683254b54196f90ce.tar.gz
Use 119 line lengthchandan/black
Diffstat (limited to 'tests')
-rw-r--r--tests/artifactcache/artifactservice.py13
-rw-r--r--tests/artifactcache/config.py64
-rw-r--r--tests/artifactcache/expiry.py17
-rw-r--r--tests/artifactcache/junctions.py48
-rw-r--r--tests/artifactcache/pull.py24
-rw-r--r--tests/artifactcache/push.py22
-rw-r--r--tests/cachekey/cachekey.py33
-rwxr-xr-xtests/cachekey/update.py8
-rwxr-xr-xtests/conftest.py14
-rw-r--r--tests/elements/filter.py132
-rw-r--r--tests/elements/filter/basic/element_plugins/dynamic.py5
-rw-r--r--tests/examples/autotools.py28
-rw-r--r--tests/examples/developing.py60
-rw-r--r--tests/examples/first-project.py14
-rw-r--r--tests/examples/flatpak-autotools.py36
-rw-r--r--tests/examples/integration-commands.py25
-rw-r--r--tests/examples/junctions.py42
-rw-r--r--tests/examples/running-commands.py21
-rw-r--r--tests/external_plugins.py16
-rw-r--r--tests/format/include.py106
-rw-r--r--tests/format/include_composition.py24
-rw-r--r--tests/format/invalid_keys.py7
-rw-r--r--tests/format/junctions.py71
-rw-r--r--tests/format/listdirectiveerrors.py30
-rw-r--r--tests/format/optionarch.py20
-rw-r--r--tests/format/optionbool.py41
-rw-r--r--tests/format/optioneltmask.py28
-rw-r--r--tests/format/optionenum.py42
-rw-r--r--tests/format/optionexports.py12
-rw-r--r--tests/format/optionflags.py47
-rw-r--r--tests/format/optionos.py4
-rw-r--r--tests/format/options.py120
-rw-r--r--tests/format/project.py44
-rw-r--r--tests/format/project/plugin-preflight-error/errorplugin/preflighterror.py3
-rw-r--r--tests/format/projectoverrides.py12
-rw-r--r--tests/format/variables.py89
-rw-r--r--tests/frontend/artifact_delete.py22
-rw-r--r--tests/frontend/artifact_list_contents.py21
-rw-r--r--tests/frontend/artifact_log.py15
-rw-r--r--tests/frontend/artifact_show.py4
-rw-r--r--tests/frontend/buildcheckout.py144
-rw-r--r--tests/frontend/completions.py160
-rw-r--r--tests/frontend/compose_splits.py8
-rw-r--r--tests/frontend/configurable_warnings.py10
-rw-r--r--tests/frontend/configuredwarning/plugins/corewarn.py3
-rw-r--r--tests/frontend/configuredwarning/plugins/warninga.py4
-rw-r--r--tests/frontend/configuredwarning/plugins/warningb.py4
-rw-r--r--tests/frontend/consistencyerror/plugins/consistencyerror.py4
-rw-r--r--tests/frontend/cross_junction_workspace.py6
-rw-r--r--tests/frontend/fetch.py4
-rw-r--r--tests/frontend/help.py14
-rw-r--r--tests/frontend/init.py45
-rw-r--r--tests/frontend/large_directory.py8
-rw-r--r--tests/frontend/logging.py26
-rw-r--r--tests/frontend/mirror.py58
-rw-r--r--tests/frontend/order.py34
-rw-r--r--tests/frontend/overlaps.py26
-rw-r--r--tests/frontend/progress.py16
-rw-r--r--tests/frontend/project/sources/fetch_source.py17
-rw-r--r--tests/frontend/pull.py92
-rw-r--r--tests/frontend/push.py121
-rw-r--r--tests/frontend/rebuild.py8
-rw-r--r--tests/frontend/show.py156
-rw-r--r--tests/frontend/source_checkout.py110
-rw-r--r--tests/frontend/track.py60
-rw-r--r--tests/frontend/workspace.py379
-rw-r--r--tests/integration/artifact.py35
-rw-r--r--tests/integration/autotools.py22
-rw-r--r--tests/integration/build-uid.py18
-rw-r--r--tests/integration/cachedfail.py40
-rw-r--r--tests/integration/cmake.py22
-rw-r--r--tests/integration/compose.py9
-rw-r--r--tests/integration/filter.py14
-rw-r--r--tests/integration/import.py10
-rw-r--r--tests/integration/make.py13
-rw-r--r--tests/integration/manual.py44
-rw-r--r--tests/integration/messages.py33
-rw-r--r--tests/integration/pip_element.py43
-rw-r--r--tests/integration/pip_source.py32
-rw-r--r--tests/integration/pullbuildtrees.py75
-rw-r--r--tests/integration/sandbox-bwrap.py16
-rw-r--r--tests/integration/script.py101
-rw-r--r--tests/integration/shell.py142
-rw-r--r--tests/integration/shellbuildtrees.py272
-rw-r--r--tests/integration/sockets.py8
-rw-r--r--tests/integration/source-determinism.py9
-rw-r--r--tests/integration/stack.py7
-rw-r--r--tests/integration/symlinks.py35
-rw-r--r--tests/integration/workspace.py94
-rw-r--r--tests/internals/cascache.py4
-rw-r--r--tests/internals/context.py4
-rw-r--r--tests/internals/pluginfactory.py84
-rw-r--r--tests/internals/pluginloading.py8
-rw-r--r--tests/internals/storage.py4
-rw-r--r--tests/internals/storage_vdir_import.py16
-rw-r--r--tests/internals/yaml.py438
-rw-r--r--tests/plugins/deprecationwarnings/deprecationwarnings.py10
-rw-r--r--tests/remoteexecution/buildfail.py9
-rw-r--r--tests/remoteexecution/buildtree.py46
-rw-r--r--tests/remoteexecution/junction.py19
-rw-r--r--tests/remoteexecution/partial.py22
-rw-r--r--tests/remoteexecution/simple.py9
-rw-r--r--tests/sandboxes/missing_dependencies.py8
-rw-r--r--tests/sandboxes/remote-exec-config.py24
-rw-r--r--tests/sandboxes/selection.py6
-rw-r--r--tests/sourcecache/cache.py4
-rw-r--r--tests/sourcecache/config.py9
-rw-r--r--tests/sourcecache/fetch.py28
-rw-r--r--tests/sourcecache/source-checkout.py15
-rw-r--r--tests/sourcecache/workspace.py16
-rw-r--r--tests/sources/bzr.py5
-rw-r--r--tests/sources/deb.py19
-rw-r--r--tests/sources/git.py106
-rw-r--r--tests/sources/local.py32
-rw-r--r--tests/sources/patch.py24
-rw-r--r--tests/sources/previous_source_access.py9
-rw-r--r--tests/sources/previous_source_access/plugins/sources/foo_transform.py3
-rw-r--r--tests/sources/remote.py44
-rw-r--r--tests/sources/tar.py77
-rw-r--r--tests/sources/zip.py28
-rw-r--r--tests/testutils/artifactshare.py23
-rw-r--r--tests/testutils/context.py4
-rw-r--r--tests/testutils/http_server.py4
-rw-r--r--tests/testutils/patch.py8
-rw-r--r--tests/testutils/python_repo.py10
-rw-r--r--tests/testutils/repo/bzr.py12
-rw-r--r--tests/testutils/repo/git.py12
-rw-r--r--tests/testutils/setuptools.py4
128 files changed, 982 insertions, 4220 deletions
diff --git a/tests/artifactcache/artifactservice.py b/tests/artifactcache/artifactservice.py
index dafbc8fc9..67dd80942 100644
--- a/tests/artifactcache/artifactservice.py
+++ b/tests/artifactcache/artifactservice.py
@@ -28,9 +28,7 @@ from buildstream._protos.buildstream.v2.artifact_pb2 import (
UpdateArtifactRequest,
)
from buildstream._protos.buildstream.v2.artifact_pb2_grpc import ArtifactServiceStub
-from buildstream._protos.build.bazel.remote.execution.v2 import (
- remote_execution_pb2 as re_pb2,
-)
+from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as re_pb2
from buildstream import utils
from tests.testutils.artifactshare import create_artifact_share
@@ -100,14 +98,9 @@ def test_update_artifact(tmpdir, files):
except grpc.RpcError as e:
assert e.code() == grpc.StatusCode.FAILED_PRECONDITION
if files == "absent":
- assert (
- e.details() == "Artifact files specified but no files found"
- )
+ assert e.details() == "Artifact files specified but no files found"
elif files == "invalid":
- assert (
- e.details()
- == "Artifact files specified but directory not found"
- )
+ assert e.details() == "Artifact files specified but directory not found"
return
# If we uploaded the artifact check GetArtifact
diff --git a/tests/artifactcache/config.py b/tests/artifactcache/config.py
index d2df0fd79..204bc7398 100644
--- a/tests/artifactcache/config.py
+++ b/tests/artifactcache/config.py
@@ -25,12 +25,8 @@ cache3 = RemoteSpec(url="https://example.com/cache3", push=False)
cache4 = RemoteSpec(url="https://example.com/cache4", push=False)
cache5 = RemoteSpec(url="https://example.com/cache5", push=False)
cache6 = RemoteSpec(url="https://example.com/cache6", push=True, type=RemoteType.ALL)
-cache7 = RemoteSpec(
- url="https://index.example.com/cache1", push=True, type=RemoteType.INDEX
-)
-cache8 = RemoteSpec(
- url="https://storage.example.com/cache1", push=True, type=RemoteType.STORAGE
-)
+cache7 = RemoteSpec(url="https://index.example.com/cache1", push=True, type=RemoteType.INDEX)
+cache8 = RemoteSpec(url="https://storage.example.com/cache1", push=True, type=RemoteType.STORAGE)
# Generate cache configuration fragments for the user config and project config files.
@@ -57,8 +53,7 @@ def configure_remote_caches(override_caches, project_caches=None, user_caches=No
}
elif len(user_caches) > 1:
user_config["artifacts"] = [
- {"url": cache.url, "push": cache.push, "type": type_strings[cache.type]}
- for cache in user_caches
+ {"url": cache.url, "push": cache.push, "type": type_strings[cache.type]} for cache in user_caches
]
if len(override_caches) == 1:
@@ -75,11 +70,7 @@ def configure_remote_caches(override_caches, project_caches=None, user_caches=No
user_config["projects"] = {
"test": {
"artifacts": [
- {
- "url": cache.url,
- "push": cache.push,
- "type": type_strings[cache.type],
- }
+ {"url": cache.url, "push": cache.push, "type": type_strings[cache.type],}
for cache in override_caches
]
}
@@ -101,11 +92,7 @@ def configure_remote_caches(override_caches, project_caches=None, user_caches=No
project_config.update(
{
"artifacts": [
- {
- "url": cache.url,
- "push": cache.push,
- "type": type_strings[cache.type],
- }
+ {"url": cache.url, "push": cache.push, "type": type_strings[cache.type],}
for cache in project_caches
]
}
@@ -123,25 +110,15 @@ def configure_remote_caches(override_caches, project_caches=None, user_caches=No
pytest.param([], [], [], id="empty-config"),
pytest.param([], [], [cache1, cache2], id="user-config"),
pytest.param([], [cache1, cache2], [cache3], id="project-config"),
- pytest.param(
- [cache1], [cache2], [cache3], id="project-override-in-user-config"
- ),
- pytest.param(
- [cache1, cache2], [cache3, cache4], [cache5, cache6], id="list-order"
- ),
- pytest.param(
- [cache1, cache2, cache1], [cache2], [cache2, cache1], id="duplicates"
- ),
+ pytest.param([cache1], [cache2], [cache3], id="project-override-in-user-config"),
+ pytest.param([cache1, cache2], [cache3, cache4], [cache5, cache6], id="list-order"),
+ pytest.param([cache1, cache2, cache1], [cache2], [cache2, cache1], id="duplicates"),
pytest.param([cache7, cache8], [], [cache1], id="split-caches"),
],
)
-def test_artifact_cache_precedence(
- tmpdir, override_caches, project_caches, user_caches
-):
+def test_artifact_cache_precedence(tmpdir, override_caches, project_caches, user_caches):
# Produce a fake user and project config with the cache configuration.
- user_config, project_config = configure_remote_caches(
- override_caches, project_caches, user_caches
- )
+ user_config, project_config = configure_remote_caches(override_caches, project_caches, user_caches)
project_config["name"] = "test"
user_config_file = str(tmpdir.join("buildstream.conf"))
@@ -156,14 +133,10 @@ def test_artifact_cache_precedence(
project.ensure_fully_loaded()
# Use the helper from the artifactcache module to parse our configuration.
- parsed_cache_specs = ArtifactCache._configured_remote_cache_specs(
- context, project
- )
+ parsed_cache_specs = ArtifactCache._configured_remote_cache_specs(context, project)
# Verify that it was correctly read.
- expected_cache_specs = list(
- _deduplicate(itertools.chain(override_caches, project_caches, user_caches))
- )
+ expected_cache_specs = list(_deduplicate(itertools.chain(override_caches, project_caches, user_caches)))
assert parsed_cache_specs == expected_cache_specs
@@ -172,19 +145,14 @@ def test_artifact_cache_precedence(
# instead of an unhandled exception.
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
- "config_key, config_value",
- [("client-cert", "client.crt"), ("client-key", "client.key")],
+ "config_key, config_value", [("client-cert", "client.crt"), ("client-key", "client.key")],
)
def test_missing_certs(cli, datafiles, config_key, config_value):
project = os.path.join(datafiles.dirname, datafiles.basename, "missing-certs")
project_conf = {
"name": "test",
- "artifacts": {
- "url": "https://cache.example.com:12345",
- "push": "true",
- config_key: config_value,
- },
+ "artifacts": {"url": "https://cache.example.com:12345", "push": "true", config_key: config_value,},
}
project_conf_file = os.path.join(project, "project.conf")
_yaml.roundtrip_dump(project_conf, project_conf_file)
@@ -215,9 +183,7 @@ def test_only_one(cli, datafiles, override_caches, project_caches, user_caches):
project = os.path.join(datafiles.dirname, datafiles.basename, "only-one")
# Produce a fake user and project config with the cache configuration.
- user_config, project_config = configure_remote_caches(
- override_caches, project_caches, user_caches
- )
+ user_config, project_config = configure_remote_caches(override_caches, project_caches, user_caches)
project_config["name"] = "test"
cli.configure(user_config)
diff --git a/tests/artifactcache/expiry.py b/tests/artifactcache/expiry.py
index 1bc7b9781..030f4a023 100644
--- a/tests/artifactcache/expiry.py
+++ b/tests/artifactcache/expiry.py
@@ -138,10 +138,7 @@ def test_expiry_order(cli, datafiles):
wait_for_cache_granularity()
# Now extract dep.bst
- res = cli.run(
- project=project,
- args=["artifact", "checkout", "dep.bst", "--directory", checkout],
- )
+ res = cli.run(project=project, args=["artifact", "checkout", "dep.bst", "--directory", checkout],)
res.assert_success()
# Finally, build something that will cause the cache to overflow
@@ -203,9 +200,7 @@ def test_keep_dependencies(cli, datafiles):
# duplicating artifacts (bad!) we need to make this equal in size
# or smaller than half the size of its dependencies.
#
- create_element_size(
- "target.bst", project, element_path, ["dependency.bst"], 2000000
- )
+ create_element_size("target.bst", project, element_path, ["dependency.bst"], 2000000)
res = cli.run(project=project, args=["build", "target.bst"])
res.assert_success()
@@ -221,9 +216,7 @@ def test_never_delete_required(cli, datafiles):
project = str(datafiles)
element_path = "elements"
- cli.configure(
- {"cache": {"quota": 10000000}, "scheduler": {"fetchers": 1, "builders": 1}}
- )
+ cli.configure({"cache": {"quota": 10000000}, "scheduler": {"fetchers": 1, "builders": 1}})
# Create a linear build tree
create_element_size("dep1.bst", project, element_path, [], 8000000)
@@ -314,9 +307,7 @@ def test_cleanup_first(cli, datafiles):
#
# Fix the fetchers and builders just to ensure a predictable
# sequence of events (although it does not effect this test)
- cli.configure(
- {"cache": {"quota": 5000000,}, "scheduler": {"fetchers": 1, "builders": 1}}
- )
+ cli.configure({"cache": {"quota": 5000000,}, "scheduler": {"fetchers": 1, "builders": 1}})
# Our cache is now more than full, BuildStream
create_element_size("target2.bst", project, element_path, [], 4000000)
diff --git a/tests/artifactcache/junctions.py b/tests/artifactcache/junctions.py
index 91cc01fff..76ba85fb5 100644
--- a/tests/artifactcache/junctions.py
+++ b/tests/artifactcache/junctions.py
@@ -26,9 +26,7 @@ def test_push_pull(cli, tmpdir, datafiles):
project = os.path.join(str(datafiles), "parent")
base_project = os.path.join(str(project), "base")
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare-parent")
- ) as share, create_artifact_share(
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare-parent")) as share, create_artifact_share(
os.path.join(str(tmpdir), "artifactshare-base")
) as base_share:
@@ -46,9 +44,7 @@ def test_push_pull(cli, tmpdir, datafiles):
project_set_artifacts(base_project, base_share.repo)
# Now try bst artifact push
- result = cli.run(
- project=project, args=["artifact", "push", "--deps", "all", "target.bst"]
- )
+ result = cli.run(project=project, args=["artifact", "push", "--deps", "all", "target.bst"])
assert result.exit_code == 0
# And finally assert that the artifacts are in the right shares
@@ -56,16 +52,12 @@ def test_push_pull(cli, tmpdir, datafiles):
# In the parent project's cache
assert_shared(cli, share, project, "target.bst", project_name="parent")
assert_shared(cli, share, project, "app.bst", project_name="parent")
- assert_not_shared(
- cli, share, base_project, "base-element.bst", project_name="base"
- )
+ assert_not_shared(cli, share, base_project, "base-element.bst", project_name="base")
# In the junction project's cache
assert_not_shared(cli, base_share, project, "target.bst", project_name="parent")
assert_not_shared(cli, base_share, project, "app.bst", project_name="parent")
- assert_shared(
- cli, base_share, base_project, "base-element.bst", project_name="base"
- )
+ assert_shared(cli, base_share, base_project, "base-element.bst", project_name="base")
# Now we've pushed, delete the user's local artifact cache
# directory and try to redownload it from the share
@@ -82,9 +74,7 @@ def test_push_pull(cli, tmpdir, datafiles):
assert state != "cached"
# Now try bst artifact pull
- result = cli.run(
- project=project, args=["artifact", "pull", "--deps", "all", "target.bst"]
- )
+ result = cli.run(project=project, args=["artifact", "pull", "--deps", "all", "target.bst"])
assert result.exit_code == 0
# And assert that they are again in the local cache, without having built
@@ -107,9 +97,7 @@ def test_caching_junction_elements(cli, tmpdir, datafiles):
junction_data["config"] = {"cache-junction-elements": True}
_yaml.roundtrip_dump(junction_data, junction_element)
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare-parent")
- ) as share, create_artifact_share(
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare-parent")) as share, create_artifact_share(
os.path.join(str(tmpdir), "artifactshare-base")
) as base_share:
@@ -127,9 +115,7 @@ def test_caching_junction_elements(cli, tmpdir, datafiles):
project_set_artifacts(base_project, base_share.repo)
# Now try bst artifact push
- result = cli.run(
- project=project, args=["artifact", "push", "--deps", "all", "target.bst"]
- )
+ result = cli.run(project=project, args=["artifact", "push", "--deps", "all", "target.bst"])
assert result.exit_code == 0
# And finally assert that the artifacts are in the right shares
@@ -142,9 +128,7 @@ def test_caching_junction_elements(cli, tmpdir, datafiles):
# The junction project's cache should only contain elements in the junction project
assert_not_shared(cli, base_share, project, "target.bst", project_name="parent")
assert_not_shared(cli, base_share, project, "app.bst", project_name="parent")
- assert_shared(
- cli, base_share, base_project, "base-element.bst", project_name="base"
- )
+ assert_shared(cli, base_share, base_project, "base-element.bst", project_name="base")
@pytest.mark.datafiles(DATA_DIR)
@@ -156,9 +140,7 @@ def test_ignore_junction_remotes(cli, tmpdir, datafiles):
junction_element = os.path.join(project, "base.bst")
junction_data = _yaml.roundtrip_load(junction_element)
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare-parent")
- ) as share, create_artifact_share(
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare-parent")) as share, create_artifact_share(
os.path.join(str(tmpdir), "artifactshare-base")
) as base_share:
@@ -175,16 +157,12 @@ def test_ignore_junction_remotes(cli, tmpdir, datafiles):
# The parent project's cache should only contain project elements
assert_shared(cli, share, project, "target.bst", project_name="parent")
assert_shared(cli, share, project, "app.bst", project_name="parent")
- assert_not_shared(
- cli, share, base_project, "base-element.bst", project_name="base"
- )
+ assert_not_shared(cli, share, base_project, "base-element.bst", project_name="base")
# The junction project's cache should only contain elements in the junction project
assert_not_shared(cli, base_share, project, "target.bst", project_name="parent")
assert_not_shared(cli, base_share, project, "app.bst", project_name="parent")
- assert_shared(
- cli, base_share, base_project, "base-element.bst", project_name="base"
- )
+ assert_shared(cli, base_share, base_project, "base-element.bst", project_name="base")
# Ensure that, from now on, we ignore junction element remotes
junction_data["config"] = {"ignore-junction-remotes": True}
@@ -205,9 +183,7 @@ def test_ignore_junction_remotes(cli, tmpdir, datafiles):
assert state != "cached"
# Now try bst artifact pull
- result = cli.run(
- project=project, args=["artifact", "pull", "--deps", "all", "target.bst"]
- )
+ result = cli.run(project=project, args=["artifact", "pull", "--deps", "all", "target.bst"])
assert result.exit_code == 0
# And assert that they are again in the local cache, without having built
diff --git a/tests/artifactcache/pull.py b/tests/artifactcache/pull.py
index 6c4134b0b..188f4cf97 100644
--- a/tests/artifactcache/pull.py
+++ b/tests/artifactcache/pull.py
@@ -57,9 +57,7 @@ def test_pull(cli, tmpdir, datafiles):
# Assert that we are now cached locally
assert cli.get_element_state(project_dir, "target.bst") == "cached"
# Assert that we shared/pushed the cached artifact
- assert share.get_artifact(
- cli.get_artifact_name(project_dir, "test", "target.bst")
- )
+ assert share.get_artifact(cli.get_artifact_name(project_dir, "test", "target.bst"))
# Delete the artifact locally
cli.remove_artifact_from_cache(project_dir, "target.bst")
@@ -91,9 +89,7 @@ def test_pull(cli, tmpdir, datafiles):
# Manually setup the CAS remote
artifactcache.setup_remotes(use_config=True)
- assert artifactcache.has_push_remotes(
- plugin=element
- ), "No remote configured for element target.bst"
+ assert artifactcache.has_push_remotes(plugin=element), "No remote configured for element target.bst"
assert artifactcache.pull(element, element_key), "Pull operation failed"
assert cli.artifact.is_cached(cache_dir, element, element_key)
@@ -126,9 +122,7 @@ def test_pull_tree(cli, tmpdir, datafiles):
# Assert that we are now cached locally
assert cli.get_element_state(project_dir, "target.bst") == "cached"
# Assert that we shared/pushed the cached artifact
- assert share.get_artifact(
- cli.get_artifact_name(project_dir, "test", "target.bst")
- )
+ assert share.get_artifact(cli.get_artifact_name(project_dir, "test", "target.bst"))
with dummy_context(config=user_config_file) as context:
# Load the project and CAS cache
@@ -142,9 +136,7 @@ def test_pull_tree(cli, tmpdir, datafiles):
assert cli.artifact.is_cached(rootcache_dir, element, element_key)
# Retrieve the Directory object from the cached artifact
- artifact_digest = cli.artifact.get_digest(
- rootcache_dir, element, element_key
- )
+ artifact_digest = cli.artifact.get_digest(rootcache_dir, element, element_key)
artifactcache = context.artifactcache
# Manually setup the CAS remote
@@ -173,9 +165,7 @@ def test_pull_tree(cli, tmpdir, datafiles):
cas.close_grpc_channels()
assert cli.get_element_state(project_dir, "target.bst") != "cached"
- tree_digest = remote_execution_pb2.Digest(
- hash=tree_hash, size_bytes=tree_size
- )
+ tree_digest = remote_execution_pb2.Digest(hash=tree_hash, size_bytes=tree_size)
# Pull the artifact using the Tree object
directory_digest = artifactcache.pull_tree(project, artifact_digest)
@@ -187,9 +177,7 @@ def test_pull_tree(cli, tmpdir, datafiles):
# Directory size now zero with AaaP and stack element commit #1cbc5e63dc
assert directory_hash and not directory_size
- directory_digest = remote_execution_pb2.Digest(
- hash=directory_hash, size_bytes=directory_size
- )
+ directory_digest = remote_execution_pb2.Digest(hash=directory_hash, size_bytes=directory_size)
# Ensure the entire Tree stucture has been pulled
assert os.path.exists(cas.objpath(directory_digest))
diff --git a/tests/artifactcache/push.py b/tests/artifactcache/push.py
index dded57563..238d5f7ef 100644
--- a/tests/artifactcache/push.py
+++ b/tests/artifactcache/push.py
@@ -43,9 +43,7 @@ def _push(cli, cache_dir, project_dir, config_file, target):
artifactcache.setup_remotes(use_config=True)
artifactcache.initialize_remotes()
- assert artifactcache.has_push_remotes(
- plugin=element
- ), "No remote configured for element target.bst"
+ assert artifactcache.has_push_remotes(plugin=element), "No remote configured for element target.bst"
assert element._push(), "Push operation failed"
return element_key
@@ -75,14 +73,8 @@ def test_push(cli, tmpdir, datafiles):
# Write down the user configuration file
_yaml.roundtrip_dump(user_config, file=user_config_file)
- element_key = _push(
- cli, rootcache_dir, project_dir, user_config_file, "target.bst"
- )
- assert share.get_artifact(
- cli.get_artifact_name(
- project_dir, "test", "target.bst", cache_key=element_key
- )
- )
+ element_key = _push(cli, rootcache_dir, project_dir, user_config_file, "target.bst")
+ assert share.get_artifact(cli.get_artifact_name(project_dir, "test", "target.bst", cache_key=element_key))
@pytest.mark.datafiles(DATA_DIR)
@@ -115,9 +107,7 @@ def test_push_split(cli, tmpdir, datafiles):
element_key = _push(cli, rootcache_dir, project_dir, config_path, "target.bst")
proto = index.get_artifact_proto(
- cli.get_artifact_name(
- project_dir, "test", "target.bst", cache_key=element_key
- )
+ cli.get_artifact_name(project_dir, "test", "target.bst", cache_key=element_key)
)
assert storage.get_cas_files(proto) is not None
@@ -165,7 +155,5 @@ def test_push_message(tmpdir, datafiles):
message_hash, message_size = command_digest.hash, command_digest.size_bytes
assert message_hash and message_size
- message_digest = remote_execution_pb2.Digest(
- hash=message_hash, size_bytes=message_size
- )
+ message_digest = remote_execution_pb2.Digest(hash=message_hash, size_bytes=message_size)
assert share.has_object(message_digest)
diff --git a/tests/cachekey/cachekey.py b/tests/cachekey/cachekey.py
index 882d07240..eb248b9ed 100644
--- a/tests/cachekey/cachekey.py
+++ b/tests/cachekey/cachekey.py
@@ -153,9 +153,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project",)
# The cache key test uses a project which exercises all plugins,
# so we cant run it at all if we dont have them installed.
#
-@pytest.mark.skipif(
- MACHINE_ARCH != "x86-64", reason="Cache keys depend on architecture"
-)
+@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Cache keys depend on architecture")
@pytest.mark.skipif(not IS_LINUX, reason="Only available on linux")
@pytest.mark.skipif(HAVE_BZR is False, reason="bzr is not available")
@pytest.mark.skipif(HAVE_GIT is False, reason="git is not available")
@@ -174,11 +172,7 @@ def test_cache_key(datafiles, cli):
# https://github.com/omarkohl/pytest-datafiles/issues/11
os.chmod(goodbye_link, 0o755)
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--format", "%{name}::%{full-key}", "target.bst"],
- )
+ result = cli.run(project=project, silent=True, args=["show", "--format", "%{name}::%{full-key}", "target.bst"],)
result.assert_success()
assert_cache_keys(project, result.output)
@@ -198,9 +192,7 @@ def test_cache_key(datafiles, cli):
],
],
)
-def test_cache_key_fatal_warnings(
- cli, tmpdir, first_warnings, second_warnings, identical_keys
-):
+def test_cache_key_fatal_warnings(cli, tmpdir, first_warnings, second_warnings, identical_keys):
# Builds project, Runs bst show, gathers cache keys
def run_get_cache_key(project_name, warnings):
@@ -218,10 +210,7 @@ def test_cache_key_fatal_warnings(
element_file = str(elem_dir.join("stack.bst"))
_yaml.roundtrip_dump({"kind": "stack"}, file=element_file)
- result = cli.run(
- project=str(project_dir),
- args=["show", "--format", "%{name}::%{full-key}", "stack.bst"],
- )
+ result = cli.run(project=str(project_dir), args=["show", "--format", "%{name}::%{full-key}", "stack.bst"],)
return result.output
# Returns true if all keys are identical
@@ -241,23 +230,15 @@ def test_keys_stable_over_targets(cli, datafiles):
target2 = "elements/key-stability/t2.bst"
project = str(datafiles)
- full_graph_result = cli.run(
- project=project, args=["show", "--format", "%{name}::%{full-key}", root_element]
- )
+ full_graph_result = cli.run(project=project, args=["show", "--format", "%{name}::%{full-key}", root_element])
full_graph_result.assert_success()
all_cache_keys = parse_output_keys(full_graph_result.output)
- ordering1_result = cli.run(
- project=project,
- args=["show", "--format", "%{name}::%{full-key}", target1, target2],
- )
+ ordering1_result = cli.run(project=project, args=["show", "--format", "%{name}::%{full-key}", target1, target2],)
ordering1_result.assert_success()
ordering1_cache_keys = parse_output_keys(ordering1_result.output)
- ordering2_result = cli.run(
- project=project,
- args=["show", "--format", "%{name}::%{full-key}", target2, target1],
- )
+ ordering2_result = cli.run(project=project, args=["show", "--format", "%{name}::%{full-key}", target2, target1],)
ordering2_result.assert_success()
ordering2_cache_keys = parse_output_keys(ordering2_result.output)
diff --git a/tests/cachekey/update.py b/tests/cachekey/update.py
index ae8b368c5..2dd4085c2 100755
--- a/tests/cachekey/update.py
+++ b/tests/cachekey/update.py
@@ -45,13 +45,7 @@ def update_keys():
result = cli.run(
project=PROJECT_DIR,
silent=True,
- args=[
- "--no-colors",
- "show",
- "--format",
- "%{name}::%{full-key}",
- "target.bst",
- ],
+ args=["--no-colors", "show", "--format", "%{name}::%{full-key}", "target.bst",],
)
# Load the actual keys, and the expected ones if they exist
diff --git a/tests/conftest.py b/tests/conftest.py
index 05a4853f6..610423443 100755
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -28,9 +28,7 @@ from buildstream.testing._fixtures import (
reset_global_node_state,
thread_check,
) # pylint: disable=unused-import
-from buildstream.testing.integration import (
- integration_cache,
-) # pylint: disable=unused-import
+from buildstream.testing.integration import integration_cache # pylint: disable=unused-import
from tests.testutils.repo.git import Git
@@ -51,17 +49,11 @@ from tests.testutils.repo.zip import Zip
#################################################
def pytest_addoption(parser):
parser.addoption(
- "--integration",
- action="store_true",
- default=False,
- help="Run integration tests",
+ "--integration", action="store_true", default=False, help="Run integration tests",
)
parser.addoption(
- "--remote-execution",
- action="store_true",
- default=False,
- help="Run remote-execution tests only",
+ "--remote-execution", action="store_true", default=False, help="Run remote-execution tests only",
)
diff --git a/tests/elements/filter.py b/tests/elements/filter.py
index 54ddf216a..3b0be378a 100644
--- a/tests/elements/filter.py
+++ b/tests/elements/filter.py
@@ -22,10 +22,7 @@ def test_filter_include(datafiles, cli, tmpdir):
result.assert_success()
checkout = os.path.join(tmpdir.dirname, tmpdir.basename, "checkout")
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "output-include.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "output-include.bst", "--directory", checkout],)
result.assert_success()
assert os.path.exists(os.path.join(checkout, "foo"))
assert not os.path.exists(os.path.join(checkout, "bar"))
@@ -40,14 +37,7 @@ def test_filter_include_dynamic(datafiles, cli, tmpdir):
checkout = os.path.join(tmpdir.dirname, tmpdir.basename, "checkout")
result = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- "output-dynamic-include.bst",
- "--directory",
- checkout,
- ],
+ project=project, args=["artifact", "checkout", "output-dynamic-include.bst", "--directory", checkout,],
)
result.assert_success()
assert os.path.exists(os.path.join(checkout, "foo"))
@@ -61,10 +51,7 @@ def test_filter_exclude(datafiles, cli, tmpdir):
result.assert_success()
checkout = os.path.join(tmpdir.dirname, tmpdir.basename, "checkout")
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "output-exclude.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "output-exclude.bst", "--directory", checkout],)
result.assert_success()
assert not os.path.exists(os.path.join(checkout, "foo"))
assert os.path.exists(os.path.join(checkout, "bar"))
@@ -77,10 +64,7 @@ def test_filter_orphans(datafiles, cli, tmpdir):
result.assert_success()
checkout = os.path.join(tmpdir.dirname, tmpdir.basename, "checkout")
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "output-orphans.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "output-orphans.bst", "--directory", checkout],)
result.assert_success()
assert os.path.exists(os.path.join(checkout, "baz"))
@@ -91,10 +75,7 @@ def test_filter_deps_ok(datafiles, cli):
result = cli.run(project=project, args=["build", "deps-permitted.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["show", "--deps=run", "--format='%{name}'", "deps-permitted.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps=run", "--format='%{name}'", "deps-permitted.bst"],)
result.assert_success()
assert "output-exclude.bst" in result.output
@@ -133,10 +114,7 @@ def test_filter_forbid_also_rdep(datafiles, cli):
def test_filter_workspace_open(datafiles, cli, tmpdir):
project = str(datafiles)
workspace_dir = os.path.join(tmpdir.dirname, tmpdir.basename, "workspace")
- result = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace_dir, "deps-permitted.bst"],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--directory", workspace_dir, "deps-permitted.bst"],)
result.assert_success()
assert os.path.exists(os.path.join(workspace_dir, "foo"))
assert os.path.exists(os.path.join(workspace_dir, "bar"))
@@ -147,9 +125,7 @@ def test_filter_workspace_open(datafiles, cli, tmpdir):
def test_filter_workspace_open_multi(datafiles, cli):
project = str(datafiles)
result = cli.run(
- cwd=project,
- project=project,
- args=["workspace", "open", "deps-permitted.bst", "output-orphans.bst"],
+ cwd=project, project=project, args=["workspace", "open", "deps-permitted.bst", "output-orphans.bst"],
)
result.assert_success()
assert os.path.exists(os.path.join(project, "input"))
@@ -160,10 +136,7 @@ def test_filter_workspace_build(datafiles, cli, tmpdir):
project = str(datafiles)
tempdir = os.path.join(tmpdir.dirname, tmpdir.basename)
workspace_dir = os.path.join(tempdir, "workspace")
- result = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace_dir, "output-orphans.bst"],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--directory", workspace_dir, "output-orphans.bst"],)
result.assert_success()
src = os.path.join(workspace_dir, "foo")
dst = os.path.join(workspace_dir, "quux")
@@ -172,14 +145,7 @@ def test_filter_workspace_build(datafiles, cli, tmpdir):
result.assert_success()
checkout_dir = os.path.join(tempdir, "checkout")
result = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- "output-orphans.bst",
- "--directory",
- checkout_dir,
- ],
+ project=project, args=["artifact", "checkout", "output-orphans.bst", "--directory", checkout_dir,],
)
result.assert_success()
assert os.path.exists(os.path.join(checkout_dir, "quux"))
@@ -190,10 +156,7 @@ def test_filter_workspace_close(datafiles, cli, tmpdir):
project = str(datafiles)
tempdir = os.path.join(tmpdir.dirname, tmpdir.basename)
workspace_dir = os.path.join(tempdir, "workspace")
- result = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace_dir, "output-orphans.bst"],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--directory", workspace_dir, "output-orphans.bst"],)
result.assert_success()
src = os.path.join(workspace_dir, "foo")
dst = os.path.join(workspace_dir, "quux")
@@ -204,14 +167,7 @@ def test_filter_workspace_close(datafiles, cli, tmpdir):
result.assert_success()
checkout_dir = os.path.join(tempdir, "checkout")
result = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- "output-orphans.bst",
- "--directory",
- checkout_dir,
- ],
+ project=project, args=["artifact", "checkout", "output-orphans.bst", "--directory", checkout_dir,],
)
result.assert_success()
assert not os.path.exists(os.path.join(checkout_dir, "quux"))
@@ -222,10 +178,7 @@ def test_filter_workspace_reset(datafiles, cli, tmpdir):
project = str(datafiles)
tempdir = os.path.join(tmpdir.dirname, tmpdir.basename)
workspace_dir = os.path.join(tempdir, "workspace")
- result = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace_dir, "output-orphans.bst"],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--directory", workspace_dir, "output-orphans.bst"],)
result.assert_success()
src = os.path.join(workspace_dir, "foo")
dst = os.path.join(workspace_dir, "quux")
@@ -236,14 +189,7 @@ def test_filter_workspace_reset(datafiles, cli, tmpdir):
result.assert_success()
checkout_dir = os.path.join(tempdir, "checkout")
result = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- "output-orphans.bst",
- "--directory",
- checkout_dir,
- ],
+ project=project, args=["artifact", "checkout", "output-orphans.bst", "--directory", checkout_dir,],
)
result.assert_success()
assert not os.path.exists(os.path.join(checkout_dir, "quux"))
@@ -341,10 +287,7 @@ def test_filter_track_excepted(datafiles, cli, tmpdir):
assert cli.get_element_state(project, input_name) == "no reference"
# Now try to track it
- result = cli.run(
- project=project,
- args=["source", "track", "filter2.bst", "--except", "input.bst"],
- )
+ result = cli.run(project=project, args=["source", "track", "filter2.bst", "--except", "input.bst"],)
result.assert_success()
# Now check that a ref field exists
@@ -394,9 +337,7 @@ def test_filter_track_multi_to_one(datafiles, cli, tmpdir):
assert cli.get_element_state(project, input_name) == "no reference"
# Now try to track it
- result = cli.run(
- project=project, args=["source", "track", "filter1.bst", "filter2.bst"]
- )
+ result = cli.run(project=project, args=["source", "track", "filter1.bst", "filter2.bst"])
result.assert_success()
# Now check that a ref field exists
@@ -457,9 +398,7 @@ def test_filter_track_multi(datafiles, cli, tmpdir):
}
# Now try to track it
- result = cli.run(
- project=project, args=["source", "track", "filter1.bst", "filter2.bst"]
- )
+ result = cli.run(project=project, args=["source", "track", "filter1.bst", "filter2.bst"])
result.assert_success()
# Now check that a ref field exists
@@ -524,10 +463,7 @@ def test_filter_track_multi_exclude(datafiles, cli, tmpdir):
}
# Now try to track it
- result = cli.run(
- project=project,
- args=["source", "track", "filter1.bst", "filter2.bst", "--except", input_name],
- )
+ result = cli.run(project=project, args=["source", "track", "filter1.bst", "filter2.bst", "--except", input_name],)
result.assert_success()
# Now check that a ref field exists
@@ -544,21 +480,13 @@ def test_filter_track_multi_exclude(datafiles, cli, tmpdir):
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_filter_include_with_indirect_deps(datafiles, cli, tmpdir):
project = str(datafiles)
- result = cli.run(
- project=project, args=["build", "output-include-with-indirect-deps.bst"]
- )
+ result = cli.run(project=project, args=["build", "output-include-with-indirect-deps.bst"])
result.assert_success()
checkout = os.path.join(tmpdir.dirname, tmpdir.basename, "checkout")
result = cli.run(
project=project,
- args=[
- "artifact",
- "checkout",
- "output-include-with-indirect-deps.bst",
- "--directory",
- checkout,
- ],
+ args=["artifact", "checkout", "output-include-with-indirect-deps.bst", "--directory", checkout,],
)
result.assert_success()
@@ -573,9 +501,7 @@ def test_filter_include_with_indirect_deps(datafiles, cli, tmpdir):
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_filter_fails_for_nonexisting_domain(datafiles, cli):
project = str(datafiles)
- result = cli.run(
- project=project, args=["build", "output-include-nonexistent-domain.bst"]
- )
+ result = cli.run(project=project, args=["build", "output-include-nonexistent-domain.bst"])
result.assert_main_error(ErrorDomain.STREAM, None)
error = "Unknown domains were used in output-include-nonexistent-domain.bst [line 7 column 2]"
@@ -599,14 +525,7 @@ def test_filter_pass_integration(datafiles, cli):
checkout_dir = os.path.join(project, "no-pass")
result = cli.run(
project=project,
- args=[
- "artifact",
- "checkout",
- "--integrate",
- "--directory",
- checkout_dir,
- "no-pass-integration.bst",
- ],
+ args=["artifact", "checkout", "--integrate", "--directory", checkout_dir, "no-pass-integration.bst",],
)
result.assert_success()
@@ -615,14 +534,7 @@ def test_filter_pass_integration(datafiles, cli):
checkout_dir = os.path.join(project, "pass")
result = cli.run(
project=project,
- args=[
- "artifact",
- "checkout",
- "--integrate",
- "--directory",
- checkout_dir,
- "pass-integration.bst",
- ],
+ args=["artifact", "checkout", "--integrate", "--directory", checkout_dir, "pass-integration.bst",],
)
result.assert_main_error(ErrorDomain.STREAM, "missing-command")
diff --git a/tests/elements/filter/basic/element_plugins/dynamic.py b/tests/elements/filter/basic/element_plugins/dynamic.py
index 6cd6b1093..bf079111f 100644
--- a/tests/elements/filter/basic/element_plugins/dynamic.py
+++ b/tests/elements/filter/basic/element_plugins/dynamic.py
@@ -5,10 +5,7 @@ from buildstream import Element, Scope
class DynamicElement(Element):
def configure(self, node):
node.validate_keys(["split-rules"])
- self.split_rules = {
- key: value.as_str_list()
- for key, value in node.get_mapping("split-rules").items()
- }
+ self.split_rules = {key: value.as_str_list() for key, value in node.get_mapping("split-rules").items()}
def preflight(self):
pass
diff --git a/tests/examples/autotools.py b/tests/examples/autotools.py
index e684fd43c..bd5d530ef 100644
--- a/tests/examples/autotools.py
+++ b/tests/examples/autotools.py
@@ -10,24 +10,14 @@ from buildstream.testing._utils.site import IS_LINUX, MACHINE_ARCH, HAVE_SANDBOX
pytestmark = pytest.mark.integration
-DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- "..",
- "..",
- "doc",
- "examples",
- "autotools",
-)
+DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "doc", "examples", "autotools",)
# Tests a build of the autotools amhello project on a alpine-linux base runtime
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox")
@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox"
-)
-@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot",
- reason="This test is not meant to work with chroot sandbox",
+ HAVE_SANDBOX == "chroot", reason="This test is not meant to work with chroot sandbox",
)
@pytest.mark.datafiles(DATA_DIR)
def test_autotools_build(cli, datafiles):
@@ -38,10 +28,7 @@ def test_autotools_build(cli, datafiles):
result = cli.run(project=project, args=["build", "hello.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "hello.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "hello.bst", "--directory", checkout],)
result.assert_success()
assert_contains(
@@ -61,13 +48,10 @@ def test_autotools_build(cli, datafiles):
# Test running an executable built with autotools.
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
-@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox"
-)
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot",
- reason="This test is not meant to work with chroot sandbox",
+ HAVE_SANDBOX == "chroot", reason="This test is not meant to work with chroot sandbox",
)
@pytest.mark.datafiles(DATA_DIR)
def test_autotools_run(cli, datafiles):
diff --git a/tests/examples/developing.py b/tests/examples/developing.py
index df6e82623..4d7d8ab69 100644
--- a/tests/examples/developing.py
+++ b/tests/examples/developing.py
@@ -11,24 +11,13 @@ import tests.testutils.patch as patch
pytestmark = pytest.mark.integration
-DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- "..",
- "..",
- "doc",
- "examples",
- "developing",
-)
+DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "doc", "examples", "developing",)
# Test that the project builds successfully
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
-@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with SANDBOX"
-)
-@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot", reason="This is not meant to work with chroot"
-)
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with SANDBOX")
+@pytest.mark.skipif(HAVE_SANDBOX == "chroot", reason="This is not meant to work with chroot")
@pytest.mark.datafiles(DATA_DIR)
def test_autotools_build(cli, datafiles):
project = str(datafiles)
@@ -38,25 +27,16 @@ def test_autotools_build(cli, datafiles):
result = cli.run(project=project, args=["build", "hello.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "hello.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "hello.bst", "--directory", checkout],)
result.assert_success()
- assert_contains(
- checkout, ["/usr", "/usr/lib", "/usr/bin", "/usr/share", "/usr/bin/hello"]
- )
+ assert_contains(checkout, ["/usr", "/usr/lib", "/usr/bin", "/usr/share", "/usr/bin/hello"])
# Test the unmodified hello command works as expected.
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
-@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with SANDBOX"
-)
-@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot", reason="This is not meant to work with chroot"
-)
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with SANDBOX")
+@pytest.mark.skipif(HAVE_SANDBOX == "chroot", reason="This is not meant to work with chroot")
@pytest.mark.datafiles(DATA_DIR)
def test_run_unmodified_hello(cli, datafiles):
project = str(datafiles)
@@ -76,38 +56,26 @@ def test_open_workspace(cli, tmpdir, datafiles):
project = str(datafiles)
workspace_dir = os.path.join(str(tmpdir), "workspace_hello")
- result = cli.run(
- project=project,
- args=["workspace", "open", "-f", "--directory", workspace_dir, "hello.bst",],
- )
+ result = cli.run(project=project, args=["workspace", "open", "-f", "--directory", workspace_dir, "hello.bst",],)
result.assert_success()
result = cli.run(project=project, args=["workspace", "list"])
result.assert_success()
- result = cli.run(
- project=project, args=["workspace", "close", "--remove-dir", "hello.bst"]
- )
+ result = cli.run(project=project, args=["workspace", "close", "--remove-dir", "hello.bst"])
result.assert_success()
# Test making a change using the workspace
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
-@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with SANDBOX"
-)
-@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot", reason="This is not meant to work with chroot"
-)
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with SANDBOX")
+@pytest.mark.skipif(HAVE_SANDBOX == "chroot", reason="This is not meant to work with chroot")
@pytest.mark.datafiles(DATA_DIR)
def test_make_change_in_workspace(cli, tmpdir, datafiles):
project = str(datafiles)
workspace_dir = os.path.join(str(tmpdir), "workspace_hello")
- result = cli.run(
- project=project,
- args=["workspace", "open", "-f", "--directory", workspace_dir, "hello.bst"],
- )
+ result = cli.run(project=project, args=["workspace", "open", "-f", "--directory", workspace_dir, "hello.bst"],)
result.assert_success()
result = cli.run(project=project, args=["workspace", "list"])
@@ -124,7 +92,5 @@ def test_make_change_in_workspace(cli, tmpdir, datafiles):
result.assert_success()
assert result.output == "Hello World\nWe can use workspaces!\n"
- result = cli.run(
- project=project, args=["workspace", "close", "--remove-dir", "hello.bst"]
- )
+ result = cli.run(project=project, args=["workspace", "close", "--remove-dir", "hello.bst"])
result.assert_success()
diff --git a/tests/examples/first-project.py b/tests/examples/first-project.py
index 4a378df62..906bb326d 100644
--- a/tests/examples/first-project.py
+++ b/tests/examples/first-project.py
@@ -12,14 +12,7 @@ from buildstream.testing._utils.site import IS_LINUX
pytestmark = pytest.mark.integration
-DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- "..",
- "..",
- "doc",
- "examples",
- "first-project",
-)
+DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "doc", "examples", "first-project",)
@pytest.mark.skipif(not IS_LINUX, reason="Only available on linux")
@@ -31,10 +24,7 @@ def test_first_project_build_checkout(cli, datafiles):
result = cli.run(project=project, args=["build", "hello.bst"])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "hello.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "hello.bst", "--directory", checkout],)
assert result.exit_code == 0
assert_contains(checkout, ["/hello.world"])
diff --git a/tests/examples/flatpak-autotools.py b/tests/examples/flatpak-autotools.py
index 4e7a9e36f..9e9ee8827 100644
--- a/tests/examples/flatpak-autotools.py
+++ b/tests/examples/flatpak-autotools.py
@@ -13,19 +13,12 @@ pytestmark = pytest.mark.integration
DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- "..",
- "..",
- "doc",
- "examples",
- "flatpak-autotools",
+ os.path.dirname(os.path.realpath(__file__)), "..", "..", "doc", "examples", "flatpak-autotools",
)
try:
- from bst_plugins_experimental.sources import (
- _ostree,
- ) # pylint: disable=unused-import
+ from bst_plugins_experimental.sources import _ostree # pylint: disable=unused-import
# Even when we have the plugin, it might be missing dependencies. This requires
# bst_plugins_experimantal to be fully installed, with host ostree dependencies
@@ -41,15 +34,9 @@ except (ImportError, ValueError):
def workaround_setuptools_bug(project):
os.makedirs(os.path.join(project, "files", "links"), exist_ok=True)
try:
- os.symlink(
- os.path.join("usr", "lib"), os.path.join(project, "files", "links", "lib")
- )
- os.symlink(
- os.path.join("usr", "bin"), os.path.join(project, "files", "links", "bin")
- )
- os.symlink(
- os.path.join("usr", "etc"), os.path.join(project, "files", "links", "etc")
- )
+ os.symlink(os.path.join("usr", "lib"), os.path.join(project, "files", "links", "lib"))
+ os.symlink(os.path.join("usr", "bin"), os.path.join(project, "files", "links", "bin"))
+ os.symlink(os.path.join("usr", "etc"), os.path.join(project, "files", "links", "etc"))
except FileExistsError:
# If the files exist, we're running from a git checkout and
# not a source distribution, no need to complain
@@ -59,9 +46,7 @@ def workaround_setuptools_bug(project):
# Test that a build upon flatpak runtime 'works' - we use the autotools sample
# amhello project for this.
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
-@pytest.mark.skipif(
- not IS_LINUX or not HAVE_OSTREE_PLUGIN, reason="Only available on linux with ostree"
-)
+@pytest.mark.skipif(not IS_LINUX or not HAVE_OSTREE_PLUGIN, reason="Only available on linux with ostree")
@pytest.mark.datafiles(DATA_DIR)
def test_autotools_build(cli, datafiles):
project = str(datafiles)
@@ -71,10 +56,7 @@ def test_autotools_build(cli, datafiles):
result = cli.run(project=project, args=["build", "hello.bst"])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "hello.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "hello.bst", "--directory", checkout],)
assert result.exit_code == 0
assert_contains(
@@ -94,9 +76,7 @@ def test_autotools_build(cli, datafiles):
# Test running an executable built with autotools
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
-@pytest.mark.skipif(
- not IS_LINUX or not HAVE_OSTREE_PLUGIN, reason="Only available on linux with ostree"
-)
+@pytest.mark.skipif(not IS_LINUX or not HAVE_OSTREE_PLUGIN, reason="Only available on linux with ostree")
@pytest.mark.datafiles(DATA_DIR)
def test_autotools_run(cli, datafiles):
project = str(datafiles)
diff --git a/tests/examples/integration-commands.py b/tests/examples/integration-commands.py
index fac45fd22..257ecc44f 100644
--- a/tests/examples/integration-commands.py
+++ b/tests/examples/integration-commands.py
@@ -10,22 +10,14 @@ from buildstream.testing._utils.site import IS_LINUX, MACHINE_ARCH, HAVE_SANDBOX
pytestmark = pytest.mark.integration
DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- "..",
- "..",
- "doc",
- "examples",
- "integration-commands",
+ os.path.dirname(os.path.realpath(__file__)), "..", "..", "doc", "examples", "integration-commands",
)
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox")
@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox"
-)
-@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot",
- reason="This test is not meant to work with chroot sandbox",
+ HAVE_SANDBOX == "chroot", reason="This test is not meant to work with chroot sandbox",
)
@pytest.mark.datafiles(DATA_DIR)
def test_integration_commands_build(cli, datafiles):
@@ -37,12 +29,9 @@ def test_integration_commands_build(cli, datafiles):
# Test running the executable
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox")
@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox"
-)
-@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot",
- reason="This test is not meant to work with chroot sandbox",
+ HAVE_SANDBOX == "chroot", reason="This test is not meant to work with chroot sandbox",
)
@pytest.mark.datafiles(DATA_DIR)
def test_integration_commands_run(cli, datafiles):
@@ -51,8 +40,6 @@ def test_integration_commands_run(cli, datafiles):
result = cli.run(project=project, args=["build", "hello.bst"])
assert result.exit_code == 0
- result = cli.run(
- project=project, args=["shell", "hello.bst", "--", "hello", "pony"]
- )
+ result = cli.run(project=project, args=["shell", "hello.bst", "--", "hello", "pony"])
assert result.exit_code == 0
assert result.output == "Hello pony\n"
diff --git a/tests/examples/junctions.py b/tests/examples/junctions.py
index e93db8a68..c0a83a254 100644
--- a/tests/examples/junctions.py
+++ b/tests/examples/junctions.py
@@ -9,24 +9,14 @@ from buildstream.testing._utils.site import IS_LINUX, MACHINE_ARCH, HAVE_SANDBOX
pytestmark = pytest.mark.integration
-DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- "..",
- "..",
- "doc",
- "examples",
- "junctions",
-)
+DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "doc", "examples", "junctions",)
# Test that the project builds successfully
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with bubblewrap")
@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with bubblewrap"
-)
-@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot",
- reason="This test is not meant to work with chroot sandbox",
+ HAVE_SANDBOX == "chroot", reason="This test is not meant to work with chroot sandbox",
)
@pytest.mark.datafiles(DATA_DIR)
def test_build(cli, datafiles):
@@ -38,12 +28,9 @@ def test_build(cli, datafiles):
# Test the callHello script works as expected.
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with bubblewrap")
@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with bubblewrap"
-)
-@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot",
- reason="This test is not meant to work with chroot sandbox",
+ HAVE_SANDBOX == "chroot", reason="This test is not meant to work with chroot sandbox",
)
@pytest.mark.datafiles(DATA_DIR)
def test_shell_call_hello(cli, datafiles):
@@ -52,10 +39,7 @@ def test_shell_call_hello(cli, datafiles):
result = cli.run(project=project, args=["build", "callHello.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["shell", "callHello.bst", "--", "/bin/sh", "callHello.sh"],
- )
+ result = cli.run(project=project, args=["shell", "callHello.bst", "--", "/bin/sh", "callHello.sh"],)
result.assert_success()
assert result.output == "Calling hello:\nHello World!\nThis is amhello 1.0.\n"
@@ -68,19 +52,9 @@ def test_open_cross_junction_workspace(cli, tmpdir, datafiles):
workspace_dir = os.path.join(str(tmpdir), "workspace_hello_junction")
result = cli.run(
- project=project,
- args=[
- "workspace",
- "open",
- "--directory",
- workspace_dir,
- "hello-junction.bst:hello.bst",
- ],
+ project=project, args=["workspace", "open", "--directory", workspace_dir, "hello-junction.bst:hello.bst",],
)
result.assert_success()
- result = cli.run(
- project=project,
- args=["workspace", "close", "--remove-dir", "hello-junction.bst:hello.bst"],
- )
+ result = cli.run(project=project, args=["workspace", "close", "--remove-dir", "hello-junction.bst:hello.bst"],)
result.assert_success()
diff --git a/tests/examples/running-commands.py b/tests/examples/running-commands.py
index 177f4e3cc..3d6fd0d26 100644
--- a/tests/examples/running-commands.py
+++ b/tests/examples/running-commands.py
@@ -10,23 +10,15 @@ from buildstream.testing._utils.site import IS_LINUX, MACHINE_ARCH, HAVE_SANDBOX
pytestmark = pytest.mark.integration
DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- "..",
- "..",
- "doc",
- "examples",
- "running-commands",
+ os.path.dirname(os.path.realpath(__file__)), "..", "..", "doc", "examples", "running-commands",
)
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox")
@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox"
-)
-@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot",
- reason="This test is not meant to work with chroot sandbox",
+ HAVE_SANDBOX == "chroot", reason="This test is not meant to work with chroot sandbox",
)
def test_running_commands_build(cli, datafiles):
project = str(datafiles)
@@ -37,12 +29,9 @@ def test_running_commands_build(cli, datafiles):
# Test running the executable
@pytest.mark.skipif(MACHINE_ARCH != "x86-64", reason="Examples are written for x86-64")
+@pytest.mark.skipif(not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox")
@pytest.mark.skipif(
- not IS_LINUX or not HAVE_SANDBOX, reason="Only available on linux with sandbox"
-)
-@pytest.mark.skipif(
- HAVE_SANDBOX == "chroot",
- reason="This test is not meant to work with chroot sandbox",
+ HAVE_SANDBOX == "chroot", reason="This test is not meant to work with chroot sandbox",
)
@pytest.mark.datafiles(DATA_DIR)
def test_running_commands_run(cli, datafiles):
diff --git a/tests/external_plugins.py b/tests/external_plugins.py
index 3e5684ea5..2123b846b 100644
--- a/tests/external_plugins.py
+++ b/tests/external_plugins.py
@@ -31,17 +31,7 @@ class ExternalPluginRepo:
def clone(self, location):
self._clone_location = os.path.join(location, self.name)
subprocess.run(
- [
- "git",
- "clone",
- "--single-branch",
- "--branch",
- self.ref,
- "--depth",
- "1",
- self.url,
- self._clone_location,
- ]
+ ["git", "clone", "--single-branch", "--branch", self.ref, "--depth", "1", self.url, self._clone_location,]
)
return self._clone_location
@@ -61,9 +51,7 @@ class ExternalPluginRepo:
match_list.extend(matches)
if not match_list:
- raise ValueError(
- "No matches found for patterns {}".format(self._test_match_patterns)
- )
+ raise ValueError("No matches found for patterns {}".format(self._test_match_patterns))
return match_list
diff --git a/tests/format/include.py b/tests/format/include.py
index 9aec83ff5..d61754d82 100644
--- a/tests/format/include.py
+++ b/tests/format/include.py
@@ -18,10 +18,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "include")
@pytest.mark.datafiles(DATA_DIR)
def test_include_project_file(cli, datafiles):
project = os.path.join(str(datafiles), "file")
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_bool("included")
@@ -79,16 +76,10 @@ def test_include_junction_file(cli, tmpdir, datafiles):
project = os.path.join(str(datafiles), "junction")
generate_junction(
- tmpdir,
- os.path.join(project, "subproject"),
- os.path.join(project, "junction.bst"),
- store_ref=True,
+ tmpdir, os.path.join(project, "subproject"), os.path.join(project, "junction.bst"), store_ref=True,
)
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_bool("included")
@@ -100,17 +91,7 @@ def test_include_junction_options(cli, datafiles):
result = cli.run(
project=project,
- args=[
- "-o",
- "build_arch",
- "x86_64",
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- "element.bst",
- ],
+ args=["-o", "build_arch", "x86_64", "show", "--deps", "none", "--format", "%{vars}", "element.bst",],
)
result.assert_success()
loaded = _yaml.load_data(result.output)
@@ -135,10 +116,7 @@ def test_junction_element_partial_project_project(cli, tmpdir, datafiles):
element = {"kind": "junction", "sources": [repo.source_config(ref=ref)]}
_yaml.roundtrip_dump(element, junction_path)
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{vars}", "junction.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "junction.bst"],)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_str("included", default=None) is None
@@ -162,10 +140,7 @@ def test_junction_element_not_partial_project_file(cli, tmpdir, datafiles):
element = {"kind": "junction", "sources": [repo.source_config(ref=ref)]}
_yaml.roundtrip_dump(element, junction_path)
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{vars}", "junction.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "junction.bst"],)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_str("included", default=None) is not None
@@ -175,10 +150,7 @@ def test_junction_element_not_partial_project_file(cli, tmpdir, datafiles):
def test_include_element_overrides(cli, datafiles):
project = os.path.join(str(datafiles), "overrides")
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_str("manual_main_override", default=None) is not None
@@ -189,10 +161,7 @@ def test_include_element_overrides(cli, datafiles):
def test_include_element_overrides_composition(cli, datafiles):
project = os.path.join(str(datafiles), "overrides")
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{config}", "element.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{config}", "element.bst"],)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_str_list("build-commands") == ["first", "second"]
@@ -202,10 +171,7 @@ def test_include_element_overrides_composition(cli, datafiles):
def test_list_overide_does_not_fail_upon_first_composition(cli, datafiles):
project = os.path.join(str(datafiles), "eventual_overrides")
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{public}", "element.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{public}", "element.bst"],)
result.assert_success()
loaded = _yaml.load_data(result.output)
@@ -219,10 +185,7 @@ def test_list_overide_does_not_fail_upon_first_composition(cli, datafiles):
def test_include_element_overrides_sub_include(cli, datafiles):
project = os.path.join(str(datafiles), "sub-include")
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_str("included", default=None) is not None
@@ -233,16 +196,10 @@ def test_junction_do_not_use_included_overrides(cli, tmpdir, datafiles):
project = os.path.join(str(datafiles), "overrides-junction")
generate_junction(
- tmpdir,
- os.path.join(project, "subproject"),
- os.path.join(project, "junction.bst"),
- store_ref=True,
+ tmpdir, os.path.join(project, "subproject"), os.path.join(project, "junction.bst"), store_ref=True,
)
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{vars}", "junction.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "junction.bst"],)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_str("main_override", default=None) is not None
@@ -255,17 +212,7 @@ def test_conditional_in_fragment(cli, datafiles):
result = cli.run(
project=project,
- args=[
- "-o",
- "build_arch",
- "x86_64",
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- "element.bst",
- ],
+ args=["-o", "build_arch", "x86_64", "show", "--deps", "none", "--format", "%{vars}", "element.bst",],
)
result.assert_success()
loaded = _yaml.load_data(result.output)
@@ -277,17 +224,7 @@ def test_inner(cli, datafiles):
project = os.path.join(str(datafiles), "inner")
result = cli.run(
project=project,
- args=[
- "-o",
- "build_arch",
- "x86_64",
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- "element.bst",
- ],
+ args=["-o", "build_arch", "x86_64", "show", "--deps", "none", "--format", "%{vars}", "element.bst",],
)
result.assert_success()
loaded = _yaml.load_data(result.output)
@@ -298,10 +235,7 @@ def test_inner(cli, datafiles):
def test_recursive_include(cli, datafiles):
project = os.path.join(str(datafiles), "recursive")
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.RECURSIVE_INCLUDE)
assert "line 2 column 2" in result.stderr
@@ -311,16 +245,10 @@ def test_local_to_junction(cli, tmpdir, datafiles):
project = os.path.join(str(datafiles), "local_to_junction")
generate_junction(
- tmpdir,
- os.path.join(project, "subproject"),
- os.path.join(project, "junction.bst"),
- store_ref=True,
+ tmpdir, os.path.join(project, "subproject"), os.path.join(project, "junction.bst"), store_ref=True,
)
- result = cli.run(
- project=project,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
- )
+ result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_bool("included")
diff --git a/tests/format/include_composition.py b/tests/format/include_composition.py
index a840b6bad..4369ecbe8 100644
--- a/tests/format/include_composition.py
+++ b/tests/format/include_composition.py
@@ -21,9 +21,7 @@ def make_includes(basedir):
def test_main_has_priority(tmpdir):
with make_includes(str(tmpdir)) as includes:
- _yaml.roundtrip_dump(
- {"(@)": ["a.yml"], "test": ["main"]}, str(tmpdir.join("main.yml"))
- )
+ _yaml.roundtrip_dump({"(@)": ["a.yml"], "test": ["main"]}, str(tmpdir.join("main.yml")))
main = _yaml.load(str(tmpdir.join("main.yml")))
@@ -37,9 +35,7 @@ def test_main_has_priority(tmpdir):
def test_include_cannot_append(tmpdir):
with make_includes(str(tmpdir)) as includes:
- _yaml.roundtrip_dump(
- {"(@)": ["a.yml"], "test": ["main"]}, str(tmpdir.join("main.yml"))
- )
+ _yaml.roundtrip_dump({"(@)": ["a.yml"], "test": ["main"]}, str(tmpdir.join("main.yml")))
main = _yaml.load(str(tmpdir.join("main.yml")))
_yaml.roundtrip_dump({"test": {"(>)": ["a"]}}, str(tmpdir.join("a.yml")))
@@ -52,9 +48,7 @@ def test_include_cannot_append(tmpdir):
def test_main_can_append(tmpdir):
with make_includes(str(tmpdir)) as includes:
- _yaml.roundtrip_dump(
- {"(@)": ["a.yml"], "test": {"(>)": ["main"]}}, str(tmpdir.join("main.yml"))
- )
+ _yaml.roundtrip_dump({"(@)": ["a.yml"], "test": {"(>)": ["main"]}}, str(tmpdir.join("main.yml")))
main = _yaml.load(str(tmpdir.join("main.yml")))
_yaml.roundtrip_dump({"test": ["a"]}, str(tmpdir.join("a.yml")))
@@ -109,9 +103,7 @@ def test_lastest_sibling_has_priority(tmpdir):
def test_main_keeps_keys(tmpdir):
with make_includes(str(tmpdir)) as includes:
- _yaml.roundtrip_dump(
- {"(@)": ["a.yml"], "something": "else"}, str(tmpdir.join("main.yml"))
- )
+ _yaml.roundtrip_dump({"(@)": ["a.yml"], "something": "else"}, str(tmpdir.join("main.yml")))
main = _yaml.load(str(tmpdir.join("main.yml")))
_yaml.roundtrip_dump({"test": ["a"]}, str(tmpdir.join("a.yml")))
@@ -126,18 +118,14 @@ def test_overwrite_directive_on_later_composite(tmpdir):
with make_includes(str(tmpdir)) as includes:
_yaml.roundtrip_dump(
- {"(@)": ["a.yml", "b.yml"], "test": {"(=)": ["Overwritten"]}},
- str(tmpdir.join("main.yml")),
+ {"(@)": ["a.yml", "b.yml"], "test": {"(=)": ["Overwritten"]}}, str(tmpdir.join("main.yml")),
)
main = _yaml.load(str(tmpdir.join("main.yml")))
# a.yml
_yaml.roundtrip_dump(
- {
- "test": ["some useless", "list", "to be overwritten"],
- "foo": "should not be present",
- },
+ {"test": ["some useless", "list", "to be overwritten"], "foo": "should not be present",},
str(tmpdir.join("a.yml")),
)
diff --git a/tests/format/invalid_keys.py b/tests/format/invalid_keys.py
index 40a7b7c34..ce1e2e487 100644
--- a/tests/format/invalid_keys.py
+++ b/tests/format/invalid_keys.py
@@ -25,9 +25,4 @@ def test_compositied_node_fails_usefully(cli, datafiles, element, location):
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
assert "synthetic node" not in result.stderr
- assert (
- "{} [{}]: Dictionary did not contain expected key 'path'".format(
- element, location
- )
- in result.stderr
- )
+ assert "{} [{}]: Dictionary did not contain expected key 'path'".format(element, location) in result.stderr
diff --git a/tests/format/junctions.py b/tests/format/junctions.py
index eedf4d69b..269f2a525 100644
--- a/tests/format/junctions.py
+++ b/tests/format/junctions.py
@@ -19,8 +19,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "junctions"
def copy_subprojects(project, datafiles, subprojects):
for subproject in subprojects:
shutil.copytree(
- os.path.join(str(datafiles), subproject),
- os.path.join(str(project), subproject),
+ os.path.join(str(datafiles), subproject), os.path.join(str(project), subproject),
)
@@ -44,10 +43,7 @@ def test_simple_build(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the checkout contains the expected files from both projects
@@ -96,10 +92,7 @@ def test_workspaced_junction_missing_project_conf(cli, datafiles):
workspace_dir = project / "base_workspace"
copy_subprojects(project, datafiles, ["base"])
- result = cli.run(
- project=project,
- args=["workspace", "open", "base.bst", "--directory", workspace_dir],
- )
+ result = cli.run(project=project, args=["workspace", "open", "base.bst", "--directory", workspace_dir],)
print(result)
result.assert_success()
@@ -166,10 +159,7 @@ def test_nested_simple(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the checkout contains the expected files from all subprojects
@@ -193,10 +183,7 @@ def test_nested_double(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the checkout contains the expected files from all subprojects
@@ -284,10 +271,7 @@ def test_options_default(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
assert os.path.exists(os.path.join(checkoutdir, "pony.txt"))
@@ -304,10 +288,7 @@ def test_options(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
assert not os.path.exists(os.path.join(checkoutdir, "pony.txt"))
@@ -324,10 +305,7 @@ def test_options_inherit(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
assert not os.path.exists(os.path.join(checkoutdir, "pony.txt"))
@@ -370,10 +348,7 @@ def test_git_build(cli, tmpdir, datafiles):
# Build (with implicit fetch of subproject), checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the checkout contains the expected files from both projects
@@ -434,14 +409,7 @@ def test_build_git_cross_junction_names(cli, tmpdir, datafiles):
result = cli.run(project=project, args=["build", "base.bst:target.bst"])
result.assert_success()
result = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- "base.bst:target.bst",
- "--directory",
- checkoutdir,
- ],
+ project=project, args=["artifact", "checkout", "base.bst:target.bst", "--directory", checkoutdir,],
)
result.assert_success()
@@ -457,10 +425,7 @@ def test_config_target(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the checkout contains the expected files from sub-sub-project
@@ -474,10 +439,7 @@ def test_invalid_sources_and_target(cli, tmpdir, datafiles):
result = cli.run(project=project, args=["show", "invalid-source-target.bst"])
result.assert_main_error(ErrorDomain.ELEMENT, None)
- assert (
- "junction elements cannot define both 'sources' and 'target' config option"
- in result.stderr
- )
+ assert "junction elements cannot define both 'sources' and 'target' config option" in result.stderr
@pytest.mark.datafiles(DATA_DIR)
@@ -493,9 +455,7 @@ def test_invalid_target_name(cli, tmpdir, datafiles):
result = cli.run(project=project, args=["show", "subsubproject-junction.bst"])
result.assert_main_error(ErrorDomain.ELEMENT, None)
- assert (
- "junction elements cannot target an element with the same name" in result.stderr
- )
+ assert "junction elements cannot target an element with the same name" in result.stderr
# We cannot exhaustively test all possible ways in which this can go wrong, so
@@ -508,7 +468,4 @@ def test_invalid_target_format(cli, tmpdir, datafiles, target):
result = cli.run(project=project, args=["show", target])
result.assert_main_error(ErrorDomain.ELEMENT, None)
- assert (
- "'target' option must be in format '{junction-name}:{element-name}'"
- in result.stderr
- )
+ assert "'target' option must be in format '{junction-name}:{element-name}'" in result.stderr
diff --git a/tests/format/listdirectiveerrors.py b/tests/format/listdirectiveerrors.py
index e17dd7e8c..66b6c738b 100644
--- a/tests/format/listdirectiveerrors.py
+++ b/tests/format/listdirectiveerrors.py
@@ -12,44 +12,28 @@ DATA_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.mark.datafiles(DATA_DIR)
def test_project_error(cli, datafiles):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "list-directive-error-project"
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "list-directive-error-project")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.TRAILING_LIST_DIRECTIVE)
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.parametrize(
- "target", [("variables.bst"), ("environment.bst"), ("config.bst"), ("public.bst")]
-)
+@pytest.mark.parametrize("target", [("variables.bst"), ("environment.bst"), ("config.bst"), ("public.bst")])
def test_element_error(cli, datafiles, target):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "list-directive-error-element"
- )
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", target],
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "list-directive-error-element")
+ result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", target],)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.TRAILING_LIST_DIRECTIVE)
@pytest.mark.datafiles(DATA_DIR)
def test_project_composite_error(cli, datafiles):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "list-directive-type-error"
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "list-directive-type-error")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.ILLEGAL_COMPOSITE)
diff --git a/tests/format/optionarch.py b/tests/format/optionarch.py
index 69faee347..1d2cdc627 100644
--- a/tests/format/optionarch.py
+++ b/tests/format/optionarch.py
@@ -53,9 +53,7 @@ def test_unsupported_arch(cli, datafiles):
with override_platform_uname(machine="x86_64"):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-arch")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@@ -65,13 +63,9 @@ def test_unsupported_arch(cli, datafiles):
def test_alias(cli, datafiles):
with override_platform_uname(machine="arm"):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "option-arch-alias"
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "option-arch-alias")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_success()
@@ -83,9 +77,7 @@ def test_unknown_host_arch(cli, datafiles):
with override_platform_uname(machine="x86_128"):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-arch")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.PLATFORM, None)
@@ -96,9 +88,7 @@ def test_unknown_project_arch(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-arch-unknown")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
diff --git a/tests/format/optionbool.py b/tests/format/optionbool.py
index 275be61cf..58d353a2d 100644
--- a/tests/format/optionbool.py
+++ b/tests/format/optionbool.py
@@ -36,17 +36,7 @@ def test_conditional_cli(cli, datafiles, target, option, expected):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "pony",
- option,
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- target,
- ],
+ args=["--option", "pony", option, "show", "--deps", "none", "--format", "%{vars}", target,],
)
result.assert_success()
@@ -58,17 +48,12 @@ def test_conditional_cli(cli, datafiles, target, option, expected):
#
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
- "target,option,expected",
- [("element.bst", True, "a pony"), ("element.bst", False, "not pony"),],
+ "target,option,expected", [("element.bst", True, "a pony"), ("element.bst", False, "not pony"),],
)
def test_conditional_config(cli, datafiles, target, option, expected):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-bool")
cli.configure({"projects": {"test": {"options": {"pony": option}}}})
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", target],
- )
+ result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", target],)
result.assert_success()
loaded = _yaml.load_data(result.output)
@@ -82,31 +67,17 @@ def test_invalid_value_cli(cli, datafiles, cli_option):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "pony",
- cli_option,
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- "element.bst",
- ],
+ args=["--option", "pony", cli_option, "show", "--deps", "none", "--format", "%{vars}", "element.bst",],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.parametrize(
- "config_option", [("pony"), (["its", "a", "list"]), ({"dic": "tionary"})]
-)
+@pytest.mark.parametrize("config_option", [("pony"), (["its", "a", "list"]), ({"dic": "tionary"})])
def test_invalid_value_config(cli, datafiles, config_option):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-bool")
cli.configure({"projects": {"test": {"options": {"pony": config_option}}}})
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
diff --git a/tests/format/optioneltmask.py b/tests/format/optioneltmask.py
index 2530999bf..77eaf2c9b 100644
--- a/tests/format/optioneltmask.py
+++ b/tests/format/optioneltmask.py
@@ -25,17 +25,7 @@ def test_conditional_cli(cli, datafiles, target, value, expected):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "debug_elements",
- value,
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- target,
- ],
+ args=["--option", "debug_elements", value, "show", "--deps", "none", "--format", "%{vars}", target,],
)
result.assert_success()
@@ -55,11 +45,7 @@ def test_conditional_cli(cli, datafiles, target, value, expected):
def test_conditional_config(cli, datafiles, target, value, expected):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-element-mask")
cli.configure({"projects": {"test": {"options": {"debug_elements": value}}}})
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", target],
- )
+ result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", target],)
result.assert_success()
loaded = _yaml.load_data(result.output)
@@ -68,14 +54,8 @@ def test_conditional_config(cli, datafiles, target, value, expected):
@pytest.mark.datafiles(DATA_DIR)
def test_invalid_declaration(cli, datafiles):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "option-element-mask-invalid"
- )
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "pony.bst"],
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "option-element-mask-invalid")
+ result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "pony.bst"],)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
diff --git a/tests/format/optionenum.py b/tests/format/optionenum.py
index ee6a4fa0e..89d2d0cd9 100644
--- a/tests/format/optionenum.py
+++ b/tests/format/optionenum.py
@@ -30,17 +30,7 @@ def test_conditional_cli(cli, datafiles, target, option, value, expected):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- option,
- value,
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- target,
- ],
+ args=["--option", option, value, "show", "--deps", "none", "--format", "%{vars}", target,],
)
result.assert_success()
@@ -65,11 +55,7 @@ def test_conditional_cli(cli, datafiles, target, option, value, expected):
def test_conditional_config(cli, datafiles, target, option, value, expected):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-enum")
cli.configure({"projects": {"test": {"options": {option: value}}}})
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", target],
- )
+ result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", target],)
result.assert_success()
loaded = _yaml.load_data(result.output)
@@ -82,32 +68,18 @@ def test_invalid_value_cli(cli, datafiles):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "brother",
- "giraffy",
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- "element.bst",
- ],
+ args=["--option", "brother", "giraffy", "show", "--deps", "none", "--format", "%{vars}", "element.bst",],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.parametrize(
- "config_option", [("giraffy"), (["its", "a", "list"]), ({"dic": "tionary"})]
-)
+@pytest.mark.parametrize("config_option", [("giraffy"), (["its", "a", "list"]), ({"dic": "tionary"})])
def test_invalid_value_config(cli, datafiles, config_option):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-enum")
cli.configure({"projects": {"test": {"options": {"brother": config_option}}}})
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@@ -116,8 +88,6 @@ def test_invalid_value_config(cli, datafiles, config_option):
def test_missing_values(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-enum-missing")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
diff --git a/tests/format/optionexports.py b/tests/format/optionexports.py
index 90bbace97..486562f01 100644
--- a/tests/format/optionexports.py
+++ b/tests/format/optionexports.py
@@ -30,17 +30,7 @@ def test_export(cli, datafiles, option_name, option_value, var_name, var_value):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- option_name,
- option_value,
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- "element.bst",
- ],
+ args=["--option", option_name, option_value, "show", "--deps", "none", "--format", "%{vars}", "element.bst",],
)
result.assert_success()
diff --git a/tests/format/optionflags.py b/tests/format/optionflags.py
index 72d175bf8..f2ea129a7 100644
--- a/tests/format/optionflags.py
+++ b/tests/format/optionflags.py
@@ -33,17 +33,7 @@ def test_conditional_cli(cli, datafiles, target, option, value, expected):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- option,
- value,
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- target,
- ],
+ args=["--option", option, value, "show", "--deps", "none", "--format", "%{vars}", target,],
)
result.assert_success()
@@ -65,11 +55,7 @@ def test_conditional_cli(cli, datafiles, target, option, value, expected):
def test_conditional_config(cli, datafiles, target, option, value, expected):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-flags")
cli.configure({"projects": {"test": {"options": {option: value}}}})
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", target],
- )
+ result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", target],)
result.assert_success()
loaded = _yaml.load_data(result.output)
@@ -79,27 +65,14 @@ def test_conditional_config(cli, datafiles, target, option, value, expected):
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"cli_option",
- [
- ("giraffy"), # Not a valid animal for the farm option
- ("horsy pony"), # Does not include comma separators
- ],
+ [("giraffy"), ("horsy pony"),], # Not a valid animal for the farm option # Does not include comma separators
)
def test_invalid_value_cli(cli, datafiles, cli_option):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-flags")
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "farm",
- cli_option,
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- "element.bst",
- ],
+ args=["--option", "farm", cli_option, "show", "--deps", "none", "--format", "%{vars}", "element.bst",],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@@ -117,21 +90,15 @@ def test_invalid_value_config(cli, datafiles, config_option):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-flags")
cli.configure({"projects": {"test": {"options": {"farm": config_option}}}})
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@pytest.mark.datafiles(DATA_DIR)
def test_missing_values(cli, datafiles):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "option-flags-missing"
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "option-flags-missing")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
diff --git a/tests/format/optionos.py b/tests/format/optionos.py
index cb75db71f..b0f9cbd09 100644
--- a/tests/format/optionos.py
+++ b/tests/format/optionos.py
@@ -52,9 +52,7 @@ def test_unsupported_arch(cli, datafiles):
with override_platform_uname(system="AIX"):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-os")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
diff --git a/tests/format/options.py b/tests/format/options.py
index c2f4584d4..9376cd0d2 100644
--- a/tests/format/options.py
+++ b/tests/format/options.py
@@ -14,12 +14,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "options")
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"project_dir",
- [
- ("invalid-name-spaces"),
- ("invalid-name-dashes"),
- ("invalid-name-plus"),
- ("invalid-name-leading-number"),
- ],
+ [("invalid-name-spaces"), ("invalid-name-dashes"), ("invalid-name-plus"), ("invalid-name-leading-number"),],
)
def test_invalid_option_name(cli, datafiles, project_dir):
project = os.path.join(datafiles.dirname, datafiles.basename, project_dir)
@@ -28,9 +23,7 @@ def test_invalid_option_name(cli, datafiles, project_dir):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.parametrize(
- "project_dir", [("invalid-variable-name-spaces"), ("invalid-variable-name-plus"),]
-)
+@pytest.mark.parametrize("project_dir", [("invalid-variable-name-spaces"), ("invalid-variable-name-plus"),])
def test_invalid_variable_name(cli, datafiles, project_dir):
project = os.path.join(datafiles.dirname, datafiles.basename, project_dir)
result = cli.run(project=project, silent=True, args=["show", "element.bst"])
@@ -45,17 +38,7 @@ def test_invalid_option_type(cli, datafiles):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "opt",
- "funny",
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- "element.bst",
- ],
+ args=["--option", "opt", "funny", "show", "--deps", "none", "--format", "%{vars}", "element.bst",],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@@ -68,17 +51,7 @@ def test_invalid_option_cli(cli, datafiles):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "fart",
- "funny",
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- "element.bst",
- ],
+ args=["--option", "fart", "funny", "show", "--deps", "none", "--format", "%{vars}", "element.bst",],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@@ -88,9 +61,7 @@ def test_invalid_option_config(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "simple-condition")
cli.configure({"projects": {"test": {"options": {"fart": "Hello"}}}})
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@@ -99,9 +70,7 @@ def test_invalid_option_config(cli, datafiles):
def test_invalid_expression(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "invalid-expression")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.EXPRESSION_FAILED)
@@ -110,9 +79,7 @@ def test_invalid_expression(cli, datafiles):
def test_undefined(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "undefined-variable")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.EXPRESSION_FAILED)
@@ -121,17 +88,13 @@ def test_undefined(cli, datafiles):
def test_invalid_condition(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "invalid-condition")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"],
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.parametrize(
- "opt_option,expected_prefix", [("False", "/usr"), ("True", "/opt"),]
-)
+@pytest.mark.parametrize("opt_option,expected_prefix", [("False", "/usr"), ("True", "/opt"),])
def test_simple_conditional(cli, datafiles, opt_option, expected_prefix):
project = os.path.join(datafiles.dirname, datafiles.basename, "simple-condition")
@@ -139,17 +102,7 @@ def test_simple_conditional(cli, datafiles, opt_option, expected_prefix):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "opt",
- opt_option,
- "show",
- "--deps",
- "none",
- "--format",
- "%{vars}",
- "element.bst",
- ],
+ args=["--option", "opt", opt_option, "show", "--deps", "none", "--format", "%{vars}", "element.bst",],
)
result.assert_success()
loaded = _yaml.load_data(result.output)
@@ -159,12 +112,7 @@ def test_simple_conditional(cli, datafiles, opt_option, expected_prefix):
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"debug,logging,expected",
- [
- ("False", "False", "False"),
- ("True", "False", "False"),
- ("False", "True", "False"),
- ("True", "True", "True"),
- ],
+ [("False", "False", "False"), ("True", "False", "False"), ("False", "True", "False"), ("True", "True", "True"),],
)
def test_nested_conditional(cli, datafiles, debug, logging, expected):
project = os.path.join(datafiles.dirname, datafiles.basename, "nested-condition")
@@ -196,17 +144,10 @@ def test_nested_conditional(cli, datafiles, debug, logging, expected):
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"debug,logging,expected",
- [
- ("False", "False", "False"),
- ("True", "False", "False"),
- ("False", "True", "False"),
- ("True", "True", "True"),
- ],
+ [("False", "False", "False"), ("True", "False", "False"), ("False", "True", "False"), ("True", "True", "True"),],
)
def test_compound_and_conditional(cli, datafiles, debug, logging, expected):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "compound-and-condition"
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "compound-and-condition")
# Test with the opt option set
result = cli.run(
@@ -235,17 +176,10 @@ def test_compound_and_conditional(cli, datafiles, debug, logging, expected):
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"debug,logging,expected",
- [
- ("False", "False", "False"),
- ("True", "False", "True"),
- ("False", "True", "True"),
- ("True", "True", "True"),
- ],
+ [("False", "False", "False"), ("True", "False", "True"), ("False", "True", "True"), ("True", "True", "True"),],
)
def test_compound_or_conditional(cli, datafiles, debug, logging, expected):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "compound-or-condition"
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "compound-or-condition")
# Test with the opt option set
result = cli.run(
@@ -278,17 +212,7 @@ def test_deep_nesting_level1(cli, datafiles, option, expected):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "pony",
- option,
- "show",
- "--deps",
- "none",
- "--format",
- "%{public}",
- "element.bst",
- ],
+ args=["--option", "pony", option, "show", "--deps", "none", "--format", "%{public}", "element.bst",],
)
result.assert_success()
loaded = _yaml.load_data(result.output)
@@ -305,17 +229,7 @@ def test_deep_nesting_level2(cli, datafiles, option, expected):
result = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "pony",
- option,
- "show",
- "--deps",
- "none",
- "--format",
- "%{public}",
- "element-deeper.bst",
- ],
+ args=["--option", "pony", option, "show", "--deps", "none", "--format", "%{public}", "element-deeper.bst",],
)
result.assert_success()
loaded = _yaml.load_data(result.output)
diff --git a/tests/format/project.py b/tests/format/project.py
index b9171865e..8934ff5d0 100644
--- a/tests/format/project.py
+++ b/tests/format/project.py
@@ -85,9 +85,7 @@ def test_load_default_project(cli, datafiles):
def test_load_project_from_subdir(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "project-from-subdir")
result = cli.run(
- project=project,
- cwd=os.path.join(project, "subdirectory"),
- args=["show", "--format", "%{env}", "manual.bst"],
+ project=project, cwd=os.path.join(project, "subdirectory"), args=["show", "--format", "%{env}", "manual.bst"],
)
result.assert_success()
@@ -130,9 +128,7 @@ def test_element_path_not_a_directory(cli, datafiles):
for _file_type in filetypegenerator.generate_file_types(path):
result = cli.run(project=project, args=["workspace", "list"])
if not os.path.isdir(path):
- result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND
- )
+ result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND)
else:
result.assert_success()
@@ -151,9 +147,7 @@ def test_local_plugin_not_directory(cli, datafiles):
for _file_type in filetypegenerator.generate_file_types(path):
result = cli.run(project=project, args=["workspace", "list"])
if not os.path.isdir(path):
- result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND
- )
+ result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND)
else:
result.assert_success()
@@ -182,9 +176,7 @@ def test_plugin_no_load_ref(cli, datafiles, ref_storage):
config = {
"name": "test",
"ref-storage": ref_storage,
- "plugins": [
- {"origin": "local", "path": "plugins", "sources": {"noloadref": 0}}
- ],
+ "plugins": [{"origin": "local", "path": "plugins", "sources": {"noloadref": 0}}],
}
_yaml.roundtrip_dump(config, os.path.join(project, "project.conf"))
@@ -200,9 +192,7 @@ def test_plugin_no_load_ref(cli, datafiles, ref_storage):
@pytest.mark.datafiles(DATA_DIR)
def test_plugin_preflight_error(cli, datafiles):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "plugin-preflight-error"
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "plugin-preflight-error")
result = cli.run(project=project, args=["source", "fetch", "error.bst"])
result.assert_main_error(ErrorDomain.SOURCE, "the-preflight-error")
@@ -224,34 +214,14 @@ def test_project_refs_options(cli, datafiles):
result1 = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "test",
- "True",
- "show",
- "--deps",
- "none",
- "--format",
- "%{key}",
- "target.bst",
- ],
+ args=["--option", "test", "True", "show", "--deps", "none", "--format", "%{key}", "target.bst",],
)
result1.assert_success()
result2 = cli.run(
project=project,
silent=True,
- args=[
- "--option",
- "test",
- "False",
- "show",
- "--deps",
- "none",
- "--format",
- "%{key}",
- "target.bst",
- ],
+ args=["--option", "test", "False", "show", "--deps", "none", "--format", "%{key}", "target.bst",],
)
result2.assert_success()
diff --git a/tests/format/project/plugin-preflight-error/errorplugin/preflighterror.py b/tests/format/project/plugin-preflight-error/errorplugin/preflighterror.py
index f0d66e3c7..762be8f36 100644
--- a/tests/format/project/plugin-preflight-error/errorplugin/preflighterror.py
+++ b/tests/format/project/plugin-preflight-error/errorplugin/preflighterror.py
@@ -9,8 +9,7 @@ class PreflightErrorSource(Source):
# Raise a preflight error unconditionally
raise SourceError(
- "Unsatisfied requirements in preflight, raising this error",
- reason="the-preflight-error",
+ "Unsatisfied requirements in preflight, raising this error", reason="the-preflight-error",
)
def get_unique_key(self):
diff --git a/tests/format/projectoverrides.py b/tests/format/projectoverrides.py
index bba630c54..e2aaa4173 100644
--- a/tests/format/projectoverrides.py
+++ b/tests/format/projectoverrides.py
@@ -7,20 +7,14 @@ from buildstream import _yaml
from buildstream.testing.runcli import cli # pylint: disable=unused-import
# Project directory
-DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), "project-overrides"
-)
+DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project-overrides")
@pytest.mark.datafiles(DATA_DIR)
def test_prepend_configure_commands(cli, datafiles):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "prepend-configure-commands"
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "prepend-configure-commands")
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{config}", "element.bst"],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{config}", "element.bst"],
)
result.assert_success()
diff --git a/tests/format/variables.py b/tests/format/variables.py
index 31f969f4b..343fe7237 100644
--- a/tests/format/variables.py
+++ b/tests/format/variables.py
@@ -29,11 +29,7 @@ def print_warning(msg):
@pytest.mark.parametrize(
"target,varname,expected",
[
- (
- "autotools.bst",
- "make-install",
- 'make -j1 DESTDIR="/buildstream-install" install',
- ),
+ ("autotools.bst", "make-install", 'make -j1 DESTDIR="/buildstream-install" install',),
(
"cmake.bst",
"cmake",
@@ -44,34 +40,17 @@ def print_warning(msg):
(
"distutils.bst",
"python-install",
- 'python3 ./setup.py install --prefix "/usr" \\\n'
- + '--root "/buildstream-install"',
- ),
- (
- "makemaker.bst",
- "configure",
- "perl Makefile.PL PREFIX=/buildstream-install/usr",
- ),
- (
- "modulebuild.bst",
- "configure",
- 'perl Build.PL --prefix "/buildstream-install/usr"',
- ),
- (
- "qmake.bst",
- "make-install",
- 'make -j1 INSTALL_ROOT="/buildstream-install" install',
+ 'python3 ./setup.py install --prefix "/usr" \\\n' + '--root "/buildstream-install"',
),
+ ("makemaker.bst", "configure", "perl Makefile.PL PREFIX=/buildstream-install/usr",),
+ ("modulebuild.bst", "configure", 'perl Build.PL --prefix "/buildstream-install/usr"',),
+ ("qmake.bst", "make-install", 'make -j1 INSTALL_ROOT="/buildstream-install" install',),
],
)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "defaults"))
def test_defaults(cli, datafiles, target, varname, expected):
project = str(datafiles)
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", target],
- )
+ result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", target],)
result.assert_success()
result_vars = _yaml.load_data(result.output)
assert result_vars.get_str(varname) == expected
@@ -83,11 +62,7 @@ def test_defaults(cli, datafiles, target, varname, expected):
@pytest.mark.parametrize(
"target,varname,expected",
[
- (
- "autotools.bst",
- "make-install",
- 'make -j1 DESTDIR="/custom/install/root" install',
- ),
+ ("autotools.bst", "make-install", 'make -j1 DESTDIR="/custom/install/root" install',),
(
"cmake.bst",
"cmake",
@@ -98,34 +73,17 @@ def test_defaults(cli, datafiles, target, varname, expected):
(
"distutils.bst",
"python-install",
- 'python3 ./setup.py install --prefix "/opt" \\\n'
- + '--root "/custom/install/root"',
- ),
- (
- "makemaker.bst",
- "configure",
- "perl Makefile.PL PREFIX=/custom/install/root/opt",
- ),
- (
- "modulebuild.bst",
- "configure",
- 'perl Build.PL --prefix "/custom/install/root/opt"',
- ),
- (
- "qmake.bst",
- "make-install",
- 'make -j1 INSTALL_ROOT="/custom/install/root" install',
+ 'python3 ./setup.py install --prefix "/opt" \\\n' + '--root "/custom/install/root"',
),
+ ("makemaker.bst", "configure", "perl Makefile.PL PREFIX=/custom/install/root/opt",),
+ ("modulebuild.bst", "configure", 'perl Build.PL --prefix "/custom/install/root/opt"',),
+ ("qmake.bst", "make-install", 'make -j1 INSTALL_ROOT="/custom/install/root" install',),
],
)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "overrides"))
def test_overrides(cli, datafiles, target, varname, expected):
project = str(datafiles)
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{vars}", target],
- )
+ result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", target],)
result.assert_success()
result_vars = _yaml.load_data(result.output)
assert result_vars.get_str(varname) == expected
@@ -135,21 +93,14 @@ def test_overrides(cli, datafiles, target, varname, expected):
@pytest.mark.datafiles(os.path.join(DATA_DIR, "missing_variables"))
def test_missing_variable(cli, datafiles, element):
project = str(datafiles)
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{config}", element],
- )
+ result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{config}", element],)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.UNRESOLVED_VARIABLE)
@pytest.mark.timeout(3, method="signal")
@pytest.mark.datafiles(os.path.join(DATA_DIR, "cyclic_variables"))
def test_cyclic_variables(cli, datafiles):
- print_warning(
- "Performing cyclic test, if this test times out it will "
- + "exit the test sequence"
- )
+ print_warning("Performing cyclic test, if this test times out it will " + "exit the test sequence")
project = str(datafiles)
result = cli.run(project=project, silent=True, args=["build", "cyclic.bst"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.RECURSIVE_VARIABLE)
@@ -169,9 +120,7 @@ def test_use_of_protected_var_project_conf(cli, datafiles, protected_var):
_yaml.roundtrip_dump(element, os.path.join(project, "target.bst"))
result = cli.run(project=project, args=["build", "target.bst"])
- result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.PROTECTED_VARIABLE_REDEFINED
- )
+ result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROTECTED_VARIABLE_REDEFINED)
@pytest.mark.parametrize("protected_var", PROTECTED_VARIABLES)
@@ -191,9 +140,7 @@ def test_use_of_protected_var_element_overrides(cli, datafiles, protected_var):
_yaml.roundtrip_dump(element, os.path.join(project, "target.bst"))
result = cli.run(project=project, args=["build", "target.bst"])
- result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.PROTECTED_VARIABLE_REDEFINED
- )
+ result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROTECTED_VARIABLE_REDEFINED)
@pytest.mark.parametrize("protected_var", PROTECTED_VARIABLES)
@@ -208,6 +155,4 @@ def test_use_of_protected_var_in_element(cli, datafiles, protected_var):
_yaml.roundtrip_dump(element, os.path.join(project, "target.bst"))
result = cli.run(project=project, args=["build", "target.bst"])
- result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.PROTECTED_VARIABLE_REDEFINED
- )
+ result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROTECTED_VARIABLE_REDEFINED)
diff --git a/tests/frontend/artifact_delete.py b/tests/frontend/artifact_delete.py
index 9389788b3..a93d99ef6 100644
--- a/tests/frontend/artifact_delete.py
+++ b/tests/frontend/artifact_delete.py
@@ -74,9 +74,7 @@ def test_artifact_delete_artifact(cli, tmpdir, datafiles):
result.assert_success()
# Check that the ARTIFACT is no longer in the cache
- assert not os.path.exists(
- os.path.join(local_cache, "cas", "refs", "heads", artifact)
- )
+ assert not os.path.exists(os.path.join(local_cache, "cas", "refs", "heads", artifact))
# Test the `bst artifact delete` command with multiple, different arguments.
@@ -190,9 +188,7 @@ def test_artifact_delete_elements_build_deps(cli, tmpdir, datafiles):
for state in bdep_states.values():
assert state == "cached"
- result = cli.run(
- project=project, args=["artifact", "delete", "--deps", "build", element]
- )
+ result = cli.run(project=project, args=["artifact", "delete", "--deps", "build", element])
result.assert_success()
# Assert that the build deps have been deleted and that the artifact remains cached
@@ -227,20 +223,14 @@ def test_artifact_delete_artifacts_build_deps(cli, tmpdir, datafiles):
bdep_refs = []
bdep_states = cli.get_element_states(project, [element], deps="build")
for bdep in bdep_states.keys():
- bdep_refs.append(
- os.path.join(
- "test", _get_normal_name(bdep), cli.get_element_key(project, bdep)
- )
- )
+ bdep_refs.append(os.path.join("test", _get_normal_name(bdep), cli.get_element_key(project, bdep)))
# Assert build dependencies are cached
for ref in bdep_refs:
assert os.path.exists(os.path.join(local_cache, "artifacts", "refs", ref))
# Delete the artifact
- result = cli.run(
- project=project, args=["artifact", "delete", "--deps", "build", artifact]
- )
+ result = cli.run(project=project, args=["artifact", "delete", "--deps", "build", artifact])
result.assert_success()
# Check that the artifact's build deps are no longer in the cache
@@ -265,9 +255,7 @@ def test_artifact_delete_artifact_with_deps_all_fails(cli, tmpdir, datafiles):
artifact = os.path.join("test", os.path.splitext(element)[0], cache_key)
# Try to delete the artifact with all of its dependencies
- result = cli.run(
- project=project, args=["artifact", "delete", "--deps", "all", artifact]
- )
+ result = cli.run(project=project, args=["artifact", "delete", "--deps", "all", artifact])
result.assert_main_error(ErrorDomain.STREAM, None)
assert "Error: '--deps all' is not supported for artifact refs" in result.stderr
diff --git a/tests/frontend/artifact_list_contents.py b/tests/frontend/artifact_list_contents.py
index ddd2d50a6..7e8bb6508 100644
--- a/tests/frontend/artifact_list_contents.py
+++ b/tests/frontend/artifact_list_contents.py
@@ -37,9 +37,7 @@ def test_artifact_list_exact_contents_element(cli, datafiles):
assert result.exit_code == 0
# List the contents via the element name
- result = cli.run(
- project=project, args=["artifact", "list-contents", "import-bin.bst"]
- )
+ result = cli.run(project=project, args=["artifact", "list-contents", "import-bin.bst"])
assert result.exit_code == 0
expected_output = "import-bin.bst:\n" "\tusr\n" "\tusr/bin\n" "\tusr/bin/hello\n\n"
assert expected_output in result.output
@@ -57,14 +55,10 @@ def test_artifact_list_exact_contents_ref(cli, datafiles):
assert result.exit_code == 0
# List the contents via the key
- result = cli.run(
- project=project, args=["artifact", "list-contents", "test/import-bin/" + key]
- )
+ result = cli.run(project=project, args=["artifact", "list-contents", "test/import-bin/" + key])
assert result.exit_code == 0
- expected_output = (
- "test/import-bin/" + key + ":\n" "\tusr\n" "\tusr/bin\n" "\tusr/bin/hello\n\n"
- )
+ expected_output = "test/import-bin/" + key + ":\n" "\tusr\n" "\tusr/bin\n" "\tusr/bin/hello\n\n"
assert expected_output in result.output
@@ -106,9 +100,7 @@ def test_artifact_list_exact_contents_element_long(cli, datafiles):
assert result.exit_code == 0
# List the contents via the element name
- result = cli.run(
- project=project, args=["artifact", "list-contents", "--long", "import-bin.bst"]
- )
+ result = cli.run(project=project, args=["artifact", "list-contents", "--long", "import-bin.bst"])
assert result.exit_code == 0
expected_output = (
"import-bin.bst:\n"
@@ -132,10 +124,7 @@ def test_artifact_list_exact_contents_ref_long(cli, datafiles):
assert result.exit_code == 0
# List the contents via the key
- result = cli.run(
- project=project,
- args=["artifact", "list-contents", "-l", "test/import-bin/" + key],
- )
+ result = cli.run(project=project, args=["artifact", "list-contents", "-l", "test/import-bin/" + key],)
assert result.exit_code == 0
expected_output = (
diff --git a/tests/frontend/artifact_log.py b/tests/frontend/artifact_log.py
index 44c35aa3d..806a3b437 100644
--- a/tests/frontend/artifact_log.py
+++ b/tests/frontend/artifact_log.py
@@ -36,15 +36,7 @@ def test_artifact_log(cli, datafiles):
result = cli.run(
project=project,
silent=True,
- args=[
- "--no-colors",
- "show",
- "--deps",
- "none",
- "--format",
- "%{full-key}",
- "target.bst",
- ],
+ args=["--no-colors", "show", "--deps", "none", "--format", "%{full-key}", "target.bst",],
)
key = result.output.strip()
@@ -89,10 +81,7 @@ def test_artifact_log_files(cli, datafiles):
assert not os.path.exists(import_bin)
# Run the command and ensure the file now exists
- result = cli.run(
- project=project,
- args=["artifact", "log", "--out", logfiles, "target.bst", "import-bin.bst"],
- )
+ result = cli.run(project=project, args=["artifact", "log", "--out", logfiles, "target.bst", "import-bin.bst"],)
assert result.exit_code == 0
assert os.path.exists(logfiles)
assert os.path.exists(target)
diff --git a/tests/frontend/artifact_show.py b/tests/frontend/artifact_show.py
index c47222e18..6f824c0e4 100644
--- a/tests/frontend/artifact_show.py
+++ b/tests/frontend/artifact_show.py
@@ -79,9 +79,7 @@ def test_artifact_show_element_missing_deps(cli, tmpdir, datafiles):
result = cli.run(project=project, args=["artifact", "delete", dependency])
result.assert_success()
- result = cli.run(
- project=project, args=["artifact", "show", "--deps", "all", element]
- )
+ result = cli.run(project=project, args=["artifact", "show", "--deps", "all", element])
result.assert_success()
assert "not cached {}".format(dependency) in result.output
assert "cached {}".format(element) in result.output
diff --git a/tests/frontend/buildcheckout.py b/tests/frontend/buildcheckout.py
index f3080269d..7772c48ef 100644
--- a/tests/frontend/buildcheckout.py
+++ b/tests/frontend/buildcheckout.py
@@ -33,12 +33,7 @@ def strict_args(args, strict):
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"strict,hardlinks",
- [
- ("strict", "copies"),
- ("strict", "hardlinks"),
- ("non-strict", "copies"),
- ("non-strict", "hardlinks"),
- ],
+ [("strict", "copies"), ("strict", "hardlinks"), ("non-strict", "copies"), ("non-strict", "hardlinks"),],
)
def test_build_checkout(datafiles, cli, strict, hardlinks):
project = str(datafiles)
@@ -115,9 +110,7 @@ def test_build_invalid_suffix_dep(datafiles, cli, strict, hardlinks):
project = str(datafiles)
# target2.bst depends on an element called target.foo
- result = cli.run(
- project=project, args=strict_args(["build", "target2.bst"], strict)
- )
+ result = cli.run(project=project, args=strict_args(["build", "target2.bst"], strict))
result.assert_main_error(ErrorDomain.LOAD, "bad-element-suffix")
@@ -134,9 +127,7 @@ def test_build_invalid_filename_chars(datafiles, cli):
}
_yaml.roundtrip_dump(element, os.path.join(project, "elements", element_name))
- result = cli.run(
- project=project, args=strict_args(["build", element_name], "non-strict")
- )
+ result = cli.run(project=project, args=strict_args(["build", element_name], "non-strict"))
result.assert_main_error(ErrorDomain.LOAD, "bad-characters-in-name")
@@ -154,10 +145,7 @@ def test_build_invalid_filename_chars_dep(datafiles, cli):
}
_yaml.roundtrip_dump(element, os.path.join(project, "elements", element_name))
- result = cli.run(
- project=project,
- args=strict_args(["build", "invalid-chars-in-dep.bst"], "non-strict"),
- )
+ result = cli.run(project=project, args=strict_args(["build", "invalid-chars-in-dep.bst"], "non-strict"),)
result.assert_main_error(ErrorDomain.LOAD, "bad-characters-in-name")
@@ -179,16 +167,7 @@ def test_build_checkout_deps(datafiles, cli, deps):
# Now check it out
result = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- element_name,
- "--deps",
- deps,
- "--directory",
- checkout,
- ],
+ project=project, args=["artifact", "checkout", element_name, "--deps", deps, "--directory", checkout,],
)
result.assert_success()
@@ -220,10 +199,7 @@ def test_build_checkout_unbuilt(datafiles, cli):
checkout = os.path.join(cli.directory, "checkout")
# Check that checking out an unbuilt element fails nicely
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkout],)
result.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt")
@@ -246,10 +222,7 @@ def test_build_checkout_compression_no_tar(datafiles, cli):
]
result = cli.run(project=project, args=checkout_args)
- assert (
- "ERROR: --compression can only be provided if --tar is provided"
- in result.stderr
- )
+ assert "ERROR: --compression can only be provided if --tar is provided" in result.stderr
assert result.exit_code != 0
@@ -466,10 +439,7 @@ def test_build_checkout_invalid_ref(datafiles, cli):
]
result = cli.run(project=project, args=checkout_args)
- assert (
- "Error while staging dependencies into a sandbox: 'No artifacts to stage'"
- in result.stderr
- )
+ assert "Error while staging dependencies into a sandbox: 'No artifacts to stage'" in result.stderr
@pytest.mark.datafiles(DATA_DIR)
@@ -613,9 +583,7 @@ def test_build_checkout_tarball_links(datafiles, cli):
# of the symlink and the test therefore doesn't have the correct content
os.symlink(
os.path.join("..", "basicfile"),
- os.path.join(
- project, "files", "files-and-links", "basicfolder", "basicsymlink"
- ),
+ os.path.join(project, "files", "files-and-links", "basicfolder", "basicsymlink"),
)
result = cli.run(project=project, args=["build", "import-links.bst"])
@@ -632,10 +600,7 @@ def test_build_checkout_tarball_links(datafiles, cli):
tar = tarfile.open(name=checkout, mode="r:")
tar.extractall(extract)
- assert (
- open(os.path.join(extract, "basicfolder", "basicsymlink")).read()
- == "file contents\n"
- )
+ assert open(os.path.join(extract, "basicfolder", "basicsymlink")).read() == "file contents\n"
@pytest.mark.datafiles(DATA_DIR)
@@ -648,9 +613,7 @@ def test_build_checkout_links(datafiles, cli):
# of the symlink and the test therefore doesn't have the correct content
os.symlink(
os.path.join("..", "basicfile"),
- os.path.join(
- project, "files", "files-and-links", "basicfolder", "basicsymlink"
- ),
+ os.path.join(project, "files", "files-and-links", "basicfolder", "basicsymlink"),
)
result = cli.run(project=project, args=["build", "import-links.bst"])
@@ -671,10 +634,7 @@ def test_build_checkout_links(datafiles, cli):
result = cli.run(project=project, args=checkout_args)
result.assert_success()
- assert (
- open(os.path.join(checkout, "basicfolder", "basicsymlink")).read()
- == "file contents\n"
- )
+ assert open(os.path.join(checkout, "basicfolder", "basicsymlink")).read() == "file contents\n"
@pytest.mark.datafiles(DATA_DIR)
@@ -836,9 +796,7 @@ def test_unfetched_junction(cli, tmpdir, datafiles, ref_storage):
configure_project(project, {"ref-storage": ref_storage})
# Create a repo to hold the subproject and generate a junction element for it
- ref = generate_junction(
- tmpdir, subproject_path, junction_path, store_ref=(ref_storage == "inline")
- )
+ ref = generate_junction(tmpdir, subproject_path, junction_path, store_ref=(ref_storage == "inline"))
# Create a stack element to depend on a cross junction element
#
@@ -891,10 +849,7 @@ def test_build_checkout_junction(cli, tmpdir, datafiles):
assert cli.get_element_state(project, "junction-dep.bst") == "cached"
# Now check it out
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout],)
result.assert_success()
# Assert the content of /etc/animal.conf
@@ -934,10 +889,7 @@ def test_build_checkout_junction_default_targets(cli, tmpdir, datafiles):
assert cli.get_element_state(project, "junction-dep.bst") == "cached"
# Now check it out
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout],)
result.assert_success()
# Assert the content of /etc/animal.conf
@@ -970,10 +922,7 @@ def test_build_checkout_workspaced_junction(cli, tmpdir, datafiles):
# Now open a workspace on the junction
#
- result = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, "junction.bst"],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--directory", workspace, "junction.bst"],)
result.assert_success()
filename = os.path.join(workspace, "files", "etc-files", "etc", "animal.conf")
@@ -996,10 +945,7 @@ def test_build_checkout_workspaced_junction(cli, tmpdir, datafiles):
assert cli.get_element_state(project, "junction-dep.bst") == "cached"
# Now check it out
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout],)
result.assert_success()
# Assert the workspace modified content of /etc/animal.conf
@@ -1023,14 +969,7 @@ def test_build_checkout_cross_junction(datafiles, cli, tmpdir):
result.assert_success()
result = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- "junction.bst:import-etc.bst",
- "--directory",
- checkout,
- ],
+ project=project, args=["artifact", "checkout", "junction.bst:import-etc.bst", "--directory", checkout,],
)
result.assert_success()
@@ -1063,10 +1002,7 @@ def test_build_junction_short_notation(cli, tmpdir, datafiles):
assert cli.get_element_state(project, "junction-dep.bst") == "cached"
# Now check it out
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout],)
result.assert_success()
# Assert the content of /etc/animal.conf
@@ -1105,10 +1041,7 @@ def test_build_junction_short_notation_filename(cli, tmpdir, datafiles):
assert cli.get_element_state(project, "junction-dep.bst") == "cached"
# Now check it out
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout],)
result.assert_success()
# Assert the content of /etc/animal.conf
@@ -1133,9 +1066,7 @@ def test_build_junction_short_notation_with_junction(cli, tmpdir, datafiles):
# colon (:) as the separator
element = {
"kind": "stack",
- "depends": [
- {"filename": "junction.bst:import-etc.bst", "junction": "junction.bst",}
- ],
+ "depends": [{"filename": "junction.bst:import-etc.bst", "junction": "junction.bst",}],
}
_yaml.roundtrip_dump(element, element_path)
@@ -1202,30 +1133,17 @@ def test_partial_artifact_checkout_fetch(cli, datafiles, tmpdir):
# A push artifact cache means we have to pull to push to them, so
# delete some blobs from that CAS such that we have to fetch
- digest = utils.sha256sum(
- os.path.join(project, "files", "bin-files", "usr", "bin", "hello")
- )
+ digest = utils.sha256sum(os.path.join(project, "files", "bin-files", "usr", "bin", "hello"))
objpath = os.path.join(cli.directory, "cas", "objects", digest[:2], digest[2:])
os.unlink(objpath)
# Verify that the build-only dependency is not (complete) in the local cache
- result = cli.run(
- project=project,
- args=["artifact", "checkout", input_name, "--directory", checkout_dir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", input_name, "--directory", checkout_dir],)
result.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt")
# Verify that the pull method fetches relevant artifacts in order to stage
result = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- "--pull",
- input_name,
- "--directory",
- checkout_dir,
- ],
+ project=project, args=["artifact", "checkout", "--pull", input_name, "--directory", checkout_dir,],
)
result.assert_success()
@@ -1244,17 +1162,7 @@ def test_partial_checkout_fail(tmpdir, datafiles, cli):
cli.configure({"artifacts": {"url": share.repo, "push": True}})
res = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- "--pull",
- build_elt,
- "--directory",
- checkout_dir,
- ],
+ project=project, args=["artifact", "checkout", "--pull", build_elt, "--directory", checkout_dir,],
)
res.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt")
- assert re.findall(
- r"Remote \((\S+)\) does not have artifact (\S+) cached", res.stderr
- )
+ assert re.findall(r"Remote \((\S+)\) does not have artifact (\S+) cached", res.stderr)
diff --git a/tests/frontend/completions.py b/tests/frontend/completions.py
index 075fd70f1..3603543c7 100644
--- a/tests/frontend/completions.py
+++ b/tests/frontend/completions.py
@@ -84,13 +84,7 @@ MIXED_ELEMENTS = PROJECT_ELEMENTS + INVALID_ELEMENTS
def assert_completion(cli, cmd, word_idx, expected, cwd=None):
result = cli.run(
- project=".",
- cwd=cwd,
- env={
- "_BST_COMPLETION": "complete",
- "COMP_WORDS": cmd,
- "COMP_CWORD": str(word_idx),
- },
+ project=".", cwd=cwd, env={"_BST_COMPLETION": "complete", "COMP_WORDS": cmd, "COMP_CWORD": str(word_idx),},
)
words = []
if result.output:
@@ -105,14 +99,7 @@ def assert_completion(cli, cmd, word_idx, expected, cwd=None):
def assert_completion_failed(cli, cmd, word_idx, expected, cwd=None):
- result = cli.run(
- cwd=cwd,
- env={
- "_BST_COMPLETION": "complete",
- "COMP_WORDS": cmd,
- "COMP_CWORD": str(word_idx),
- },
- )
+ result = cli.run(cwd=cwd, env={"_BST_COMPLETION": "complete", "COMP_WORDS": cmd, "COMP_CWORD": str(word_idx),},)
words = []
if result.output:
words = result.output.splitlines()
@@ -182,29 +169,14 @@ def test_option_choice(cli, cmd, word_idx, expected):
# Note that elements/ and files/ are partial completions and
# as such do not come with trailing whitespace
("bst --config ", 2, ["cache/", "elements/", "files/", "project.conf "], None),
- (
- "bst --log-file ",
- 2,
- ["cache/", "elements/", "files/", "project.conf "],
- None,
- ),
+ ("bst --log-file ", 2, ["cache/", "elements/", "files/", "project.conf "], None,),
("bst --config f", 2, ["files/"], None),
("bst --log-file f", 2, ["files/"], None),
("bst --config files", 2, ["files/bin-files/", "files/dev-files/"], None),
("bst --log-file files", 2, ["files/bin-files/", "files/dev-files/"], None),
("bst --config files/", 2, ["files/bin-files/", "files/dev-files/"], None),
- (
- "bst --log-file elements/",
- 2,
- [os.path.join("elements", e) + " " for e in PROJECT_ELEMENTS],
- None,
- ),
- (
- "bst --config ../",
- 2,
- ["../cache/", "../elements/", "../files/", "../project.conf "],
- "files",
- ),
+ ("bst --log-file elements/", 2, [os.path.join("elements", e) + " " for e in PROJECT_ELEMENTS], None,),
+ ("bst --config ../", 2, ["../cache/", "../elements/", "../files/", "../project.conf "], "files",),
(
"bst --config ../elements/",
2,
@@ -251,11 +223,7 @@ def test_option_directory(datafiles, cli, cmd, word_idx, expected, subdir):
"project",
"bst build com",
2,
- [
- "compose-all.bst ",
- "compose-include-bin.bst ",
- "compose-exclude-dev.bst ",
- ],
+ ["compose-all.bst ", "compose-include-bin.bst ", "compose-exclude-dev.bst ",],
None,
),
# When running from the files subdir
@@ -264,83 +232,37 @@ def test_option_directory(datafiles, cli, cmd, word_idx, expected, subdir):
"project",
"bst build com",
2,
- [
- "compose-all.bst ",
- "compose-include-bin.bst ",
- "compose-exclude-dev.bst ",
- ],
+ ["compose-all.bst ", "compose-include-bin.bst ", "compose-exclude-dev.bst ",],
"files",
),
# When passing the project directory
- (
- "project",
- "bst --directory ../ show ",
- 4,
- [e + " " for e in PROJECT_ELEMENTS],
- "files",
- ),
+ ("project", "bst --directory ../ show ", 4, [e + " " for e in PROJECT_ELEMENTS], "files",),
(
"project",
"bst --directory ../ build com",
4,
- [
- "compose-all.bst ",
- "compose-include-bin.bst ",
- "compose-exclude-dev.bst ",
- ],
+ ["compose-all.bst ", "compose-include-bin.bst ", "compose-exclude-dev.bst ",],
"files",
),
# Also try multi arguments together
- (
- "project",
- "bst --directory ../ artifact checkout t ",
- 5,
- ["target.bst "],
- "files",
- ),
- (
- "project",
- "bst --directory ../ artifact checkout --directory ",
- 6,
- ["bin-files/", "dev-files/"],
- "files",
- ),
+ ("project", "bst --directory ../ artifact checkout t ", 5, ["target.bst "], "files",),
+ ("project", "bst --directory ../ artifact checkout --directory ", 6, ["bin-files/", "dev-files/"], "files",),
# When running in the project directory
- (
- "no-element-path",
- "bst show ",
- 2,
- [e + " " for e in PROJECT_ELEMENTS] + ["files/"],
- None,
- ),
+ ("no-element-path", "bst show ", 2, [e + " " for e in PROJECT_ELEMENTS] + ["files/"], None,),
(
"no-element-path",
"bst build com",
2,
- [
- "compose-all.bst ",
- "compose-include-bin.bst ",
- "compose-exclude-dev.bst ",
- ],
+ ["compose-all.bst ", "compose-include-bin.bst ", "compose-exclude-dev.bst ",],
None,
),
# When running from the files subdir
- (
- "no-element-path",
- "bst show ",
- 2,
- [e + " " for e in PROJECT_ELEMENTS] + ["files/"],
- "files",
- ),
+ ("no-element-path", "bst show ", 2, [e + " " for e in PROJECT_ELEMENTS] + ["files/"], "files",),
(
"no-element-path",
"bst build com",
2,
- [
- "compose-all.bst ",
- "compose-include-bin.bst ",
- "compose-exclude-dev.bst ",
- ],
+ ["compose-all.bst ", "compose-include-bin.bst ", "compose-exclude-dev.bst ",],
"files",
),
# When passing the project directory
@@ -352,32 +274,16 @@ def test_option_directory(datafiles, cli, cmd, word_idx, expected, subdir):
"files",
),
("no-element-path", "bst --directory ../ show f", 4, ["files/"], "files"),
- (
- "no-element-path",
- "bst --directory ../ show files/",
- 4,
- ["files/bin-files/", "files/dev-files/"],
- "files",
- ),
+ ("no-element-path", "bst --directory ../ show files/", 4, ["files/bin-files/", "files/dev-files/"], "files",),
(
"no-element-path",
"bst --directory ../ build com",
4,
- [
- "compose-all.bst ",
- "compose-include-bin.bst ",
- "compose-exclude-dev.bst ",
- ],
+ ["compose-all.bst ", "compose-include-bin.bst ", "compose-exclude-dev.bst ",],
"files",
),
# Also try multi arguments together
- (
- "no-element-path",
- "bst --directory ../ artifact checkout t ",
- 5,
- ["target.bst "],
- "files",
- ),
+ ("no-element-path", "bst --directory ../ artifact checkout t ", 5, ["target.bst "], "files",),
(
"no-element-path",
"bst --directory ../ artifact checkout --directory ",
@@ -402,18 +308,10 @@ def test_argument_element(datafiles, cli, project, cmd, word_idx, expected, subd
"project,cmd,word_idx,expected,subdir",
[
# When element has invalid suffix
- (
- "project",
- "bst --directory ../ show ",
- 4,
- [e + " " for e in MIXED_ELEMENTS],
- "files",
- )
+ ("project", "bst --directory ../ show ", 4, [e + " " for e in MIXED_ELEMENTS], "files",)
],
)
-def test_argument_element_invalid(
- datafiles, cli, project, cmd, word_idx, expected, subdir
-):
+def test_argument_element_invalid(datafiles, cli, project, cmd, word_idx, expected, subdir):
cwd = os.path.join(str(datafiles), project)
if subdir:
cwd = os.path.join(cwd, subdir)
@@ -442,9 +340,7 @@ def test_argument_artifact(cli, datafiles):
project = str(datafiles)
# Build an import element with no dependencies (as there will only be ONE cache key)
- result = cli.run(
- project=project, args=["build", "import-bin.bst"]
- ) # Has no dependencies
+ result = cli.run(project=project, args=["build", "import-bin.bst"]) # Has no dependencies
result.assert_success()
# Get the key and the artifact ref ($project/$element_name/$key)
@@ -459,23 +355,15 @@ def test_argument_artifact(cli, datafiles):
result = cli.run(
project=project,
cwd=project,
- env={
- "_BST_COMPLETION": "complete",
- "COMP_WORDS": cmd,
- "COMP_CWORD": str(word_idx),
- },
+ env={"_BST_COMPLETION": "complete", "COMP_WORDS": cmd, "COMP_CWORD": str(word_idx),},
)
if result.output:
- words = (
- result.output.splitlines()
- ) # This leaves an extra space on each e.g. ['foo.bst ']
+ words = result.output.splitlines() # This leaves an extra space on each e.g. ['foo.bst ']
words = [word.strip() for word in words]
if i == 0:
- expected = PROJECT_ELEMENTS + [
- artifact
- ] # We should now be able to see the artifact
+ expected = PROJECT_ELEMENTS + [artifact] # We should now be able to see the artifact
elif i == 1:
expected = ["target.bst", artifact]
elif i == 2:
diff --git a/tests/frontend/compose_splits.py b/tests/frontend/compose_splits.py
index 3a308a9f5..d333b031e 100644
--- a/tests/frontend/compose_splits.py
+++ b/tests/frontend/compose_splits.py
@@ -9,9 +9,7 @@ from buildstream.testing.runcli import cli # pylint: disable=unused-import
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project",)
-@pytest.mark.parametrize(
- "target", [("compose-include-bin.bst"), ("compose-exclude-dev.bst")]
-)
+@pytest.mark.parametrize("target", [("compose-include-bin.bst"), ("compose-exclude-dev.bst")])
@pytest.mark.datafiles(DATA_DIR)
def test_compose_splits(datafiles, cli, target):
project = str(datafiles)
@@ -22,9 +20,7 @@ def test_compose_splits(datafiles, cli, target):
result.assert_success()
# Now check it out
- result = cli.run(
- project=project, args=["artifact", "checkout", target, "--directory", checkout]
- )
+ result = cli.run(project=project, args=["artifact", "checkout", target, "--directory", checkout])
result.assert_success()
# Check that the executable hello file is found in the checkout
diff --git a/tests/frontend/configurable_warnings.py b/tests/frontend/configurable_warnings.py
index f756aae2b..52cb03cec 100644
--- a/tests/frontend/configurable_warnings.py
+++ b/tests/frontend/configurable_warnings.py
@@ -19,11 +19,7 @@ def get_project(fatal_warnings):
"name": "test",
"element-path": "elements",
"plugins": [
- {
- "origin": "local",
- "path": "plugins",
- "elements": {"warninga": 0, "warningb": 0, "corewarn": 0,},
- }
+ {"origin": "local", "path": "plugins", "elements": {"warninga": 0, "warningb": 0, "corewarn": 0,},}
],
"fatal-warnings": fatal_warnings,
}
@@ -53,9 +49,7 @@ def build_project(datafiles, fatal_warnings):
("warningb.bst", [CoreWarnings.OVERLAPS], False, None),
],
)
-def test_fatal_warnings(
- cli, datafiles, element_name, fatal_warnings, expect_fatal, error_domain
-):
+def test_fatal_warnings(cli, datafiles, element_name, fatal_warnings, expect_fatal, error_domain):
if HAVE_SANDBOX == "buildbox" and error_domain != ErrorDomain.STREAM:
pytest.xfail()
project_path = build_project(datafiles, fatal_warnings)
diff --git a/tests/frontend/configuredwarning/plugins/corewarn.py b/tests/frontend/configuredwarning/plugins/corewarn.py
index 5e43115f7..7ca8daed9 100644
--- a/tests/frontend/configuredwarning/plugins/corewarn.py
+++ b/tests/frontend/configuredwarning/plugins/corewarn.py
@@ -20,8 +20,7 @@ class CoreWarn(Element):
def assemble(self, sandbox):
self.warn(
- "Testing: CoreWarning produced during assemble",
- warning_token=CoreWarnings.OVERLAPS,
+ "Testing: CoreWarning produced during assemble", warning_token=CoreWarnings.OVERLAPS,
)
diff --git a/tests/frontend/configuredwarning/plugins/warninga.py b/tests/frontend/configuredwarning/plugins/warninga.py
index dde90bb42..9fd8dc61b 100644
--- a/tests/frontend/configuredwarning/plugins/warninga.py
+++ b/tests/frontend/configuredwarning/plugins/warninga.py
@@ -20,9 +20,7 @@ class WarningA(Element):
pass
def assemble(self, sandbox):
- self.warn(
- "Testing: warning-a produced during assemble", warning_token=WARNING_A
- )
+ self.warn("Testing: warning-a produced during assemble", warning_token=WARNING_A)
def setup():
diff --git a/tests/frontend/configuredwarning/plugins/warningb.py b/tests/frontend/configuredwarning/plugins/warningb.py
index d9229f0d0..64d25ef39 100644
--- a/tests/frontend/configuredwarning/plugins/warningb.py
+++ b/tests/frontend/configuredwarning/plugins/warningb.py
@@ -20,9 +20,7 @@ class WarningB(Element):
pass
def assemble(self, sandbox):
- self.warn(
- "Testing: warning-b produced during assemble", warning_token=WARNING_B
- )
+ self.warn("Testing: warning-b produced during assemble", warning_token=WARNING_B)
def setup():
diff --git a/tests/frontend/consistencyerror/plugins/consistencyerror.py b/tests/frontend/consistencyerror/plugins/consistencyerror.py
index 656bd981c..125baf39c 100644
--- a/tests/frontend/consistencyerror/plugins/consistencyerror.py
+++ b/tests/frontend/consistencyerror/plugins/consistencyerror.py
@@ -14,9 +14,7 @@ class ConsistencyErrorSource(Source):
def get_consistency(self):
# Raise an error unconditionally
- raise SourceError(
- "Something went terribly wrong", reason="the-consistency-error"
- )
+ raise SourceError("Something went terribly wrong", reason="the-consistency-error")
def get_ref(self):
return None
diff --git a/tests/frontend/cross_junction_workspace.py b/tests/frontend/cross_junction_workspace.py
index 90e68d8ac..3ac3e8814 100644
--- a/tests/frontend/cross_junction_workspace.py
+++ b/tests/frontend/cross_junction_workspace.py
@@ -27,8 +27,7 @@ def prepare_junction_project(cli, tmpdir):
import_ref = import_repo.create(str(import_dir))
_yaml.roundtrip_dump(
- {"kind": "import", "sources": [import_repo.source_config(ref=import_ref)]},
- str(sub_project.join("data.bst")),
+ {"kind": "import", "sources": [import_repo.source_config(ref=import_ref)]}, str(sub_project.join("data.bst")),
)
sub_repo_dir = tmpdir.join("sub_repo")
@@ -37,8 +36,7 @@ def prepare_junction_project(cli, tmpdir):
sub_ref = sub_repo.create(str(sub_project))
_yaml.roundtrip_dump(
- {"kind": "junction", "sources": [sub_repo.source_config(ref=sub_ref)]},
- str(main_project.join("sub.bst")),
+ {"kind": "junction", "sources": [sub_repo.source_config(ref=sub_ref)]}, str(main_project.join("sub.bst")),
)
args = ["source", "fetch", "sub.bst"]
diff --git a/tests/frontend/fetch.py b/tests/frontend/fetch.py
index d34764d13..10a420ddd 100644
--- a/tests/frontend/fetch.py
+++ b/tests/frontend/fetch.py
@@ -85,9 +85,7 @@ def test_unfetched_junction(cli, tmpdir, datafiles, strict, ref_storage):
cli.configure({"projects": {"test": {"strict": strict}}})
# Create a repo to hold the subproject and generate a junction element for it
- ref = generate_junction(
- tmpdir, subproject_path, junction_path, store_ref=(ref_storage == "inline")
- )
+ ref = generate_junction(tmpdir, subproject_path, junction_path, store_ref=(ref_storage == "inline"))
# Create a stack element to depend on a cross junction element
#
diff --git a/tests/frontend/help.py b/tests/frontend/help.py
index 3bbae44f5..de3b0e678 100644
--- a/tests/frontend/help.py
+++ b/tests/frontend/help.py
@@ -9,8 +9,7 @@ def assert_help(cli_output):
expected_start = "Usage: "
if not cli_output.startswith(expected_start):
raise AssertionError(
- "Help output expected to begin with '{}',".format(expected_start)
- + " output was: {}".format(cli_output)
+ "Help output expected to begin with '{}',".format(expected_start) + " output was: {}".format(cli_output)
)
@@ -21,16 +20,7 @@ def test_help_main(cli):
@pytest.mark.parametrize(
- "command",
- [
- ("artifact"),
- ("build"),
- ("checkout"),
- ("shell"),
- ("show"),
- ("source"),
- ("workspace"),
- ],
+ "command", [("artifact"), ("build"), ("checkout"), ("shell"), ("show"), ("source"), ("workspace"),],
)
def test_help(cli, command):
result = cli.run(args=[command, "--help"])
diff --git a/tests/frontend/init.py b/tests/frontend/init.py
index 01686b7c6..aef9d148e 100644
--- a/tests/frontend/init.py
+++ b/tests/frontend/init.py
@@ -29,16 +29,7 @@ def test_all_options(cli, tmpdir):
project_path = os.path.join(project, "project.conf")
result = cli.run(
- args=[
- "init",
- "--project-name",
- "foo",
- "--format-version",
- "2",
- "--element-path",
- "ponies",
- project,
- ]
+ args=["init", "--project-name", "foo", "--format-version", "2", "--element-path", "ponies", project,]
)
result.assert_success()
@@ -96,9 +87,7 @@ def test_relative_path_directory_as_argument(cli, tmpdir):
def test_set_directory_and_directory_as_argument(cli, tmpdir):
- result = cli.run(
- args=["-C", "/foo/bar", "init", "--project-name", "foo", "/boo/far"]
- )
+ result = cli.run(args=["-C", "/foo/bar", "init", "--project-name", "foo", "/boo/far"])
result.assert_main_error(ErrorDomain.APP, "init-with-set-directory")
@@ -110,33 +99,13 @@ def test_bad_project_name(cli, tmpdir, project_name):
@pytest.mark.parametrize("format_version", [(str(-1)), (str(BST_FORMAT_VERSION + 1))])
def test_bad_format_version(cli, tmpdir, format_version):
- result = cli.run(
- args=[
- "init",
- "--project-name",
- "foo",
- "--format-version",
- format_version,
- str(tmpdir),
- ]
- )
+ result = cli.run(args=["init", "--project-name", "foo", "--format-version", format_version, str(tmpdir),])
result.assert_main_error(ErrorDomain.APP, "invalid-format-version")
-@pytest.mark.parametrize(
- "element_path", [("/absolute/path"), ("../outside/of/project")]
-)
+@pytest.mark.parametrize("element_path", [("/absolute/path"), ("../outside/of/project")])
def test_bad_element_path(cli, tmpdir, element_path):
- result = cli.run(
- args=[
- "init",
- "--project-name",
- "foo",
- "--element-path",
- element_path,
- str(tmpdir),
- ]
- )
+ result = cli.run(args=["init", "--project-name", "foo", "--element-path", element_path, str(tmpdir),])
result.assert_main_error(ErrorDomain.APP, "invalid-element-path")
@@ -154,9 +123,7 @@ def test_element_path_interactive(cli, tmp_path, monkeypatch, element_path):
def create(cls, *args, **kwargs):
return DummyInteractiveApp(*args, **kwargs)
- def _init_project_interactive(
- self, *args, **kwargs
- ): # pylint: disable=arguments-differ
+ def _init_project_interactive(self, *args, **kwargs): # pylint: disable=arguments-differ
return ("project_name", "0", element_path)
monkeypatch.setattr(App, "create", DummyInteractiveApp.create)
diff --git a/tests/frontend/large_directory.py b/tests/frontend/large_directory.py
index e01d5f3c6..ea29fd1ca 100644
--- a/tests/frontend/large_directory.py
+++ b/tests/frontend/large_directory.py
@@ -37,9 +37,7 @@ def limit_grpc_message_length(limit):
orig_insecure_channel = grpc.insecure_channel
def new_insecure_channel(target):
- return orig_insecure_channel(
- target, options=(("grpc.max_send_message_length", limit),)
- )
+ return orig_insecure_channel(target, options=(("grpc.max_send_message_length", limit),))
grpc.insecure_channel = new_insecure_channel
try:
@@ -71,9 +69,7 @@ def test_large_directory(cli, tmpdir, datafiles):
# Enforce 1 MB gRPC message limit
with limit_grpc_message_length(MAX_MESSAGE_LENGTH):
# Build and push
- result = cli.run(
- project=project, args=["build", "import-large-directory.bst"]
- )
+ result = cli.run(project=project, args=["build", "import-large-directory.bst"])
result.assert_success()
# Assert that we are now cached locally
diff --git a/tests/frontend/logging.py b/tests/frontend/logging.py
index d4f8d0d23..27ff88352 100644
--- a/tests/frontend/logging.py
+++ b/tests/frontend/logging.py
@@ -37,9 +37,7 @@ def test_default_logging(cli, tmpdir, datafiles):
result = cli.run(project=project, args=["source", "fetch", element_name])
result.assert_success()
- m = re.search(
- r"\[\d\d:\d\d:\d\d\]\[\s*\]\[.*\] SUCCESS Checking sources", result.stderr
- )
+ m = re.search(r"\[\d\d:\d\d:\d\d\]\[\s*\]\[.*\] SUCCESS Checking sources", result.stderr)
assert m is not None
@@ -51,8 +49,7 @@ def test_custom_logging(cli, tmpdir, datafiles):
element_name = "fetch-test-git.bst"
custom_log_format = (
- "%{elapsed},%{elapsed-us},%{wallclock},%{wallclock-us},"
- "%{key},%{element},%{action},%{message}"
+ "%{elapsed},%{elapsed-us},%{wallclock},%{wallclock-us}," "%{key},%{element},%{action},%{message}"
)
user_config = {"logging": {"message-format": custom_log_format}}
cli.configure(user_config)
@@ -72,8 +69,7 @@ def test_custom_logging(cli, tmpdir, datafiles):
result.assert_success()
m = re.search(
- r"\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6}\s*,.*"
- r",SUCCESS,Checking sources",
+ r"\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6}\s*,.*" r",SUCCESS,Checking sources",
result.stderr,
)
assert m is not None
@@ -89,9 +85,7 @@ def test_failed_build_listing(cli, datafiles):
element = {"kind": "script", "config": {"commands": ["false"]}}
_yaml.roundtrip_dump(element, os.path.join(project, element_path))
element_names.append(element_name)
- result = cli.run(
- project=project, args=["--on-error=continue", "build", *element_names]
- )
+ result = cli.run(project=project, args=["--on-error=continue", "build", *element_names])
result.assert_main_error(ErrorDomain.STREAM, None)
# Check that we re-print the failure summaries only in the "Failure Summary"
@@ -102,12 +96,8 @@ def test_failed_build_listing(cli, datafiles):
# testfail-0.bst:
# [00:00:00][44f1b8c3][ build:testfail-0.bst ] FAILURE Running 'commands'
#
- failure_heading_pos = re.search(
- r"^Failure Summary$", result.stderr, re.MULTILINE
- ).start()
- pipeline_heading_pos = re.search(
- r"^Pipeline Summary$", result.stderr, re.MULTILINE
- ).start()
+ failure_heading_pos = re.search(r"^Failure Summary$", result.stderr, re.MULTILINE).start()
+ pipeline_heading_pos = re.search(r"^Pipeline Summary$", result.stderr, re.MULTILINE).start()
failure_summary_range = range(failure_heading_pos, pipeline_heading_pos)
matches = tuple(re.finditer(r"^\s+testfail-.\.bst:$", result.stderr, re.MULTILINE))
for m in matches:
@@ -119,6 +109,4 @@ def test_failed_build_listing(cli, datafiles):
# with the name of the relevant element, e.g. 'testfail-1.bst'. Check that
# they have the name as expected.
pattern = r"\[..:..:..\] FAILURE testfail-.\.bst: Staged artifacts do not provide command 'sh'"
- assert (
- len(re.findall(pattern, result.stderr, re.MULTILINE)) == 6
- ) # each element should be matched twice.
+ assert len(re.findall(pattern, result.stderr, re.MULTILINE)) == 6 # each element should be matched twice.
diff --git a/tests/frontend/mirror.py b/tests/frontend/mirror.py
index dbd21e1e9..1146893cd 100644
--- a/tests/frontend/mirror.py
+++ b/tests/frontend/mirror.py
@@ -48,9 +48,7 @@ def generate_project():
{"name": "arrakis", "aliases": {"foo": ["OFO/"], "bar": ["RBA/"],},},
{"name": "oz", "aliases": {"foo": ["ooF/"], "bar": ["raB/"],}},
],
- "plugins": [
- {"origin": "local", "path": "sources", "sources": {"fetch_source": 0}}
- ],
+ "plugins": [{"origin": "local", "path": "sources", "sources": {"fetch_source": 0}}],
}
return project
@@ -75,11 +73,7 @@ def test_mirror_fetch_ref_storage(cli, tmpdir, datafiles, ref_storage, mirror):
element = {
"kind": "import",
- "sources": [
- upstream_repo.source_config(
- ref=upstream_ref if ref_storage == "inline" else None
- )
- ],
+ "sources": [upstream_repo.source_config(ref=upstream_ref if ref_storage == "inline" else None)],
}
element_name = "test.bst"
element_path = os.path.join(element_dir, element_name)
@@ -109,11 +103,7 @@ def test_mirror_fetch_ref_storage(cli, tmpdir, datafiles, ref_storage, mirror):
mirror_data = [{"name": "middle-earth", "aliases": {alias: [mirror_map + "/"]}}]
if mirror == "unrelated-mirror":
mirror_data.insert(
- 0,
- {
- "name": "narnia",
- "aliases": {"frob": ["http://www.example.com/repo"]},
- },
+ 0, {"name": "narnia", "aliases": {"frob": ["http://www.example.com/repo"]},},
)
project["mirrors"] = mirror_data
@@ -164,10 +154,7 @@ def test_mirror_fetch_default_cmdline(cli, tmpdir):
project = generate_project()
_yaml.roundtrip_dump(project, project_file)
- result = cli.run(
- project=project_dir,
- args=["--default-mirror", "arrakis", "source", "fetch", element_name],
- )
+ result = cli.run(project=project_dir, args=["--default-mirror", "arrakis", "source", "fetch", element_name],)
result.assert_success()
with open(output_file) as f:
contents = f.read()
@@ -179,9 +166,7 @@ def test_mirror_fetch_default_cmdline(cli, tmpdir):
me_str = "OOF/repo1"
me_pos = contents.find(me_str)
assert me_pos != -1, "'{}' wasn't found".format(me_str)
- assert arrakis_pos < me_pos, "'{}' wasn't found before '{}'".format(
- arrakis_str, me_str
- )
+ assert arrakis_pos < me_pos, "'{}' wasn't found before '{}'".format(arrakis_str, me_str)
@pytest.mark.datafiles(DATA_DIR)
@@ -237,10 +222,7 @@ def test_mirror_fetch_default_cmdline_overrides_config(cli, tmpdir):
userconfig = {"projects": {"test": {"default-mirror": "oz"}}}
cli.configure(userconfig)
- result = cli.run(
- project=project_dir,
- args=["--default-mirror", "arrakis", "source", "fetch", element_name],
- )
+ result = cli.run(project=project_dir, args=["--default-mirror", "arrakis", "source", "fetch", element_name],)
result.assert_success()
with open(output_file) as f:
contents = f.read()
@@ -252,9 +234,7 @@ def test_mirror_fetch_default_cmdline_overrides_config(cli, tmpdir):
me_str = "OOF/repo1"
me_pos = contents.find(me_str)
assert me_pos != -1, "'{}' wasn't found".format(me_str)
- assert arrakis_pos < me_pos, "'{}' wasn't found before '{}'".format(
- arrakis_str, me_str
- )
+ assert arrakis_pos < me_pos, "'{}' wasn't found before '{}'".format(arrakis_str, me_str)
@pytest.mark.datafiles(DATA_DIR)
@@ -317,9 +297,7 @@ def test_mirror_git_submodule_fetch(cli, tmpdir, datafiles):
"name": "test",
"element-path": "elements",
"aliases": {alias: "http://www.example.com/"},
- "mirrors": [
- {"name": "middle-earth", "aliases": {alias: [mirror_map + "/"],},},
- ],
+ "mirrors": [{"name": "middle-earth", "aliases": {alias: [mirror_map + "/"],},},],
}
project_file = os.path.join(project_dir, "project.conf")
_yaml.roundtrip_dump(project, project_file)
@@ -382,9 +360,7 @@ def test_mirror_fallback_git_only_submodules(cli, tmpdir, datafiles):
element = {
"kind": "import",
- "sources": [
- main_repo.source_config_extra(ref=main_ref, checkout_submodules=True)
- ],
+ "sources": [main_repo.source_config_extra(ref=main_ref, checkout_submodules=True)],
}
element_name = "test.bst"
element_path = os.path.join(element_dir, element_name)
@@ -409,10 +385,7 @@ def test_mirror_fallback_git_only_submodules(cli, tmpdir, datafiles):
result.assert_success()
checkout = os.path.join(str(tmpdir), "checkout")
- result = cli.run(
- project=project_dir,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project_dir, args=["artifact", "checkout", element_name, "--directory", checkout],)
result.assert_success()
assert os.path.exists(os.path.join(checkout, "bin", "bin", "hello"))
@@ -471,11 +444,7 @@ def test_mirror_fallback_git_with_submodules(cli, tmpdir, datafiles):
element = {
"kind": "import",
- "sources": [
- upstream_main_repo.source_config_extra(
- ref=upstream_main_ref, checkout_submodules=True
- )
- ],
+ "sources": [upstream_main_repo.source_config_extra(ref=upstream_main_ref, checkout_submodules=True)],
}
element["sources"][0]["url"] = aliased_repo
element_name = "test.bst"
@@ -501,10 +470,7 @@ def test_mirror_fallback_git_with_submodules(cli, tmpdir, datafiles):
result.assert_success()
checkout = os.path.join(str(tmpdir), "checkout")
- result = cli.run(
- project=project_dir,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project_dir, args=["artifact", "checkout", element_name, "--directory", checkout],)
result.assert_success()
assert os.path.exists(os.path.join(checkout, "bin", "bin", "hello"))
diff --git a/tests/frontend/order.py b/tests/frontend/order.py
index a66064694..9032379ef 100644
--- a/tests/frontend/order.py
+++ b/tests/frontend/order.py
@@ -57,12 +57,7 @@ def create_element(project, name, dependencies):
# First simple test
(
"3.bst",
- {
- "0.bst": ["1.bst"],
- "1.bst": [],
- "2.bst": ["0.bst"],
- "3.bst": ["0.bst", "1.bst", "2.bst"],
- },
+ {"0.bst": ["1.bst"], "1.bst": [], "2.bst": ["0.bst"], "3.bst": ["0.bst", "1.bst", "2.bst"],},
["1.bst", "0.bst", "2.bst", "3.bst"],
),
# A more complicated test with build of build dependencies
@@ -74,22 +69,9 @@ def create_element(project, name, dependencies):
"timezones.bst": [],
"middleware.bst": [{"filename": "base.bst", "type": "build"}],
"app.bst": [{"filename": "middleware.bst", "type": "build"}],
- "target.bst": [
- "a.bst",
- "base.bst",
- "middleware.bst",
- "app.bst",
- "timezones.bst",
- ],
+ "target.bst": ["a.bst", "base.bst", "middleware.bst", "app.bst", "timezones.bst",],
},
- [
- "base.bst",
- "middleware.bst",
- "a.bst",
- "app.bst",
- "timezones.bst",
- "target.bst",
- ],
+ ["base.bst", "middleware.bst", "a.bst", "app.bst", "timezones.bst", "target.bst",],
),
],
)
@@ -109,18 +91,12 @@ def test_order(cli, datafiles, operation, target, template, expected):
# Run test and collect results
if operation == "show":
- result = cli.run(
- args=["show", "--deps", "plan", "--format", "%{name}", target],
- project=project,
- silent=True,
- )
+ result = cli.run(args=["show", "--deps", "plan", "--format", "%{name}", target], project=project, silent=True,)
result.assert_success()
results = result.output.splitlines()
else:
if operation == "fetch":
- result = cli.run(
- args=["source", "fetch", target], project=project, silent=True
- )
+ result = cli.run(args=["source", "fetch", target], project=project, silent=True)
else:
result = cli.run(args=[operation, target], project=project, silent=True)
result.assert_success()
diff --git a/tests/frontend/overlaps.py b/tests/frontend/overlaps.py
index 4f6f72af5..d3e0c9d60 100644
--- a/tests/frontend/overlaps.py
+++ b/tests/frontend/overlaps.py
@@ -13,9 +13,7 @@ from tests.testutils import generate_junction
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "overlaps")
-def gen_project(
- project_dir, fail_on_overlap, use_fatal_warnings=True, project_name="test"
-):
+def gen_project(project_dir, fail_on_overlap, use_fatal_warnings=True, project_name="test"):
template = {"name": project_name}
if use_fatal_warnings:
template["fatal-warnings"] = [CoreWarnings.OVERLAPS] if fail_on_overlap else []
@@ -48,9 +46,7 @@ def test_overlaps_error(cli, datafiles, use_fatal_warnings):
def test_overlaps_whitelist(cli, datafiles):
project_dir = str(datafiles)
gen_project(project_dir, True)
- result = cli.run(
- project=project_dir, silent=True, args=["build", "collect-whitelisted.bst"]
- )
+ result = cli.run(project=project_dir, silent=True, args=["build", "collect-whitelisted.bst"])
result.assert_success()
@@ -58,9 +54,7 @@ def test_overlaps_whitelist(cli, datafiles):
def test_overlaps_whitelist_ignored(cli, datafiles):
project_dir = str(datafiles)
gen_project(project_dir, False)
- result = cli.run(
- project=project_dir, silent=True, args=["build", "collect-whitelisted.bst"]
- )
+ result = cli.run(project=project_dir, silent=True, args=["build", "collect-whitelisted.bst"])
result.assert_success()
@@ -71,11 +65,7 @@ def test_overlaps_whitelist_on_overlapper(cli, datafiles):
# it'll still fail because A doesn't permit overlaps.
project_dir = str(datafiles)
gen_project(project_dir, True)
- result = cli.run(
- project=project_dir,
- silent=True,
- args=["build", "collect-partially-whitelisted.bst"],
- )
+ result = cli.run(project=project_dir, silent=True, args=["build", "collect-partially-whitelisted.bst"],)
result.assert_main_error(ErrorDomain.STREAM, None)
result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.OVERLAPS)
@@ -100,9 +90,7 @@ def test_overlap_subproject(cli, tmpdir, datafiles, project_policy, subproject_p
junction_path = os.path.join(project_dir, "sub-project.bst")
gen_project(project_dir, bool(project_policy == "fail"), project_name="test")
- gen_project(
- subproject_dir, bool(subproject_policy == "fail"), project_name="subtest"
- )
+ gen_project(subproject_dir, bool(subproject_policy == "fail"), project_name="subtest")
generate_junction(tmpdir, subproject_dir, junction_path)
# Here we have a dependency chain where the project element
@@ -111,9 +99,7 @@ def test_overlap_subproject(cli, tmpdir, datafiles, project_policy, subproject_p
# Test that overlap error vs warning policy for this overlap
# is always controlled by the project and not the subproject.
#
- result = cli.run(
- project=project_dir, silent=True, args=["build", "sub-collect.bst"]
- )
+ result = cli.run(project=project_dir, silent=True, args=["build", "sub-collect.bst"])
if project_policy == "fail":
result.assert_main_error(ErrorDomain.STREAM, None)
result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.OVERLAPS)
diff --git a/tests/frontend/progress.py b/tests/frontend/progress.py
index 3ca81f543..86abe830c 100644
--- a/tests/frontend/progress.py
+++ b/tests/frontend/progress.py
@@ -43,9 +43,7 @@ def test_junction_tally(cli, tmpdir, datafiles):
}
_yaml.roundtrip_dump(element, element_path)
- result = cli.run(
- project=project, silent=True, args=["source", "fetch", "junction.bst"]
- )
+ result = cli.run(project=project, silent=True, args=["source", "fetch", "junction.bst"])
result.assert_success()
# Assert the correct progress tallies are in the logging
@@ -62,9 +60,7 @@ def test_nested_junction_tally(cli, tmpdir, datafiles):
sub1_path = os.path.join(project, "files", "sub-project")
sub2_path = os.path.join(project, "files", "sub2-project")
# A junction element which pulls sub1 into sub2
- sub1_element = os.path.join(
- project, "files", "sub2-project", "elements", "sub-junction.bst"
- )
+ sub1_element = os.path.join(project, "files", "sub2-project", "elements", "sub-junction.bst")
# A junction element which pulls sub2 into the main project
sub2_element = os.path.join(project, "elements", "junction.bst")
element_path = os.path.join(project, "elements", "junction-dep.bst")
@@ -80,9 +76,7 @@ def test_nested_junction_tally(cli, tmpdir, datafiles):
}
_yaml.roundtrip_dump(element, element_path)
- result = cli.run(
- project=project, silent=True, args=["source", "fetch", "junction.bst"]
- )
+ result = cli.run(project=project, silent=True, args=["source", "fetch", "junction.bst"])
result.assert_success()
# Assert the correct progress tallies are in the logging
@@ -116,9 +110,7 @@ def test_junction_dep_tally(cli, tmpdir, datafiles):
}
_yaml.roundtrip_dump(element, element_path)
- result = cli.run(
- project=project, silent=True, args=["source", "fetch", "junction-dep.bst"]
- )
+ result = cli.run(project=project, silent=True, args=["source", "fetch", "junction-dep.bst"])
# Since we aren't allowed to specify any dependencies on a
# junction, we should fail
diff --git a/tests/frontend/project/sources/fetch_source.py b/tests/frontend/project/sources/fetch_source.py
index cb3ab024e..51bfe1049 100644
--- a/tests/frontend/project/sources/fetch_source.py
+++ b/tests/frontend/project/sources/fetch_source.py
@@ -22,16 +22,10 @@ class FetchFetcher(SourceFetcher):
self.mark_download_url(url)
def fetch(self, alias_override=None):
- url = self.source.translate_url(
- self.original_url, alias_override=alias_override, primary=self.primary
- )
+ url = self.source.translate_url(self.original_url, alias_override=alias_override, primary=self.primary)
with open(self.source.output_file, "a") as f:
- success = (
- url in self.source.fetch_succeeds and self.source.fetch_succeeds[url]
- )
- message = "Fetch {} {} from {}\n".format(
- self.original_url, "succeeded" if success else "failed", url
- )
+ success = url in self.source.fetch_succeeds and self.source.fetch_succeeds[url]
+ message = "Fetch {} {} from {}\n".format(self.original_url, "succeeded" if success else "failed", url)
f.write(message)
if not success:
raise SourceError("Failed to fetch {}".format(url))
@@ -42,10 +36,7 @@ class FetchSource(Source):
def configure(self, node):
self.original_urls = node.get_str_list("urls")
self.output_file = node.get_str("output-text")
- self.fetch_succeeds = {
- key: value.as_bool()
- for key, value in node.get_mapping("fetch-succeeds", {}).items()
- }
+ self.fetch_succeeds = {key: value.as_bool() for key, value in node.get_mapping("fetch-succeeds", {}).items()}
# First URL is the primary one for this test
#
diff --git a/tests/frontend/pull.py b/tests/frontend/pull.py
index 970987d36..100a9a914 100644
--- a/tests/frontend/pull.py
+++ b/tests/frontend/pull.py
@@ -60,9 +60,7 @@ def test_push_pull_all(cli, tmpdir, datafiles):
assert not any(states[e] == "cached" for e in all_elements)
# Now try bst artifact pull
- result = cli.run(
- project=project, args=["artifact", "pull", "--deps", "all", "target.bst"]
- )
+ result = cli.run(project=project, args=["artifact", "pull", "--deps", "all", "target.bst"])
result.assert_success()
# And assert that it's again in the local cache, without having built
@@ -132,21 +130,12 @@ def test_push_pull_default_targets(cli, tmpdir, datafiles):
def test_pull_secondary_cache(cli, tmpdir, datafiles):
project = str(datafiles)
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare1")
- ) as share1, create_artifact_share(
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare1")) as share1, create_artifact_share(
os.path.join(str(tmpdir), "artifactshare2")
) as share2:
# Build the target and push it to share2 only.
- cli.configure(
- {
- "artifacts": [
- {"url": share1.repo, "push": False},
- {"url": share2.repo, "push": True},
- ]
- }
- )
+ cli.configure({"artifacts": [{"url": share1.repo, "push": False}, {"url": share2.repo, "push": True},]})
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
@@ -180,9 +169,7 @@ def test_pull_secondary_cache(cli, tmpdir, datafiles):
def test_push_pull_specific_remote(cli, tmpdir, datafiles):
project = str(datafiles)
- with create_artifact_share(
- os.path.join(str(tmpdir), "goodartifactshare")
- ) as good_share, create_artifact_share(
+ with create_artifact_share(os.path.join(str(tmpdir), "goodartifactshare")) as good_share, create_artifact_share(
os.path.join(str(tmpdir), "badartifactshare")
) as bad_share:
@@ -200,10 +187,7 @@ def test_push_pull_specific_remote(cli, tmpdir, datafiles):
)
# Now try `bst artifact push` to the good_share.
- result = cli.run(
- project=project,
- args=["artifact", "push", "target.bst", "--remote", good_share.repo],
- )
+ result = cli.run(project=project, args=["artifact", "push", "target.bst", "--remote", good_share.repo],)
result.assert_success()
# Assert that all the artifacts are in the share we pushed
@@ -219,10 +203,7 @@ def test_push_pull_specific_remote(cli, tmpdir, datafiles):
artifactdir = os.path.join(cli.directory, "artifacts")
shutil.rmtree(artifactdir)
- result = cli.run(
- project=project,
- args=["artifact", "pull", "target.bst", "--remote", good_share.repo],
- )
+ result = cli.run(project=project, args=["artifact", "pull", "target.bst", "--remote", good_share.repo],)
result.assert_success()
# And assert that it's again in the local cache, without having built
@@ -240,10 +221,7 @@ def test_push_pull_non_strict(cli, tmpdir, datafiles):
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
# First build the target element and push to the remote.
cli.configure(
- {
- "artifacts": {"url": share.repo, "push": True},
- "projects": {"test": {"strict": False}},
- }
+ {"artifacts": {"url": share.repo, "push": True}, "projects": {"test": {"strict": False}},}
)
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
@@ -272,9 +250,7 @@ def test_push_pull_non_strict(cli, tmpdir, datafiles):
assert cli.get_element_state(project, element_name) != "cached"
# Add a file to force change in strict cache key of import-bin.bst
- with open(
- os.path.join(str(project), "files", "bin-files", "usr", "bin", "world"), "w"
- ) as f:
+ with open(os.path.join(str(project), "files", "bin-files", "usr", "bin", "world"), "w") as f:
f.write("world")
# Assert that the workspaced element requires a rebuild
@@ -283,9 +259,7 @@ def test_push_pull_non_strict(cli, tmpdir, datafiles):
assert cli.get_element_state(project, "target.bst") == "waiting"
# Now try bst artifact pull
- result = cli.run(
- project=project, args=["artifact", "pull", "--deps", "all", "target.bst"]
- )
+ result = cli.run(project=project, args=["artifact", "pull", "--deps", "all", "target.bst"])
result.assert_success()
# And assert that the target is again in the local cache, without having built
@@ -313,14 +287,10 @@ def test_push_pull_cross_junction(cli, tmpdir, datafiles):
artifact_dir = os.path.join(project, "cache", "artifacts")
shutil.rmtree(artifact_dir)
- assert (
- cli.get_element_state(project, "junction.bst:import-etc.bst") == "buildable"
- )
+ assert cli.get_element_state(project, "junction.bst:import-etc.bst") == "buildable"
# Now try bst artifact pull
- result = cli.run(
- project=project, args=["artifact", "pull", "junction.bst:import-etc.bst"]
- )
+ result = cli.run(project=project, args=["artifact", "pull", "junction.bst:import-etc.bst"])
result.assert_success()
# And assert that it's again in the local cache, without having built
@@ -418,9 +388,7 @@ def test_pull_missing_local_blob(cli, tmpdir, datafiles):
# This is a placeholder to test partial CAS handling until we support
# partial artifact pulling (or blob-based CAS expiry).
#
- digest = utils.sha256sum(
- os.path.join(project, "files", "bin-files", "usr", "bin", "hello")
- )
+ digest = utils.sha256sum(os.path.join(project, "files", "bin-files", "usr", "bin", "hello"))
objpath = os.path.join(cli.directory, "cas", "objects", digest[:2], digest[2:])
os.unlink(objpath)
@@ -443,9 +411,7 @@ def test_pull_missing_notifies_user(caplog, cli, tmpdir, datafiles):
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- assert (
- not result.get_pulled_elements()
- ), "No elements should have been pulled since the cache was empty"
+ assert not result.get_pulled_elements(), "No elements should have been pulled since the cache was empty"
assert "INFO Remote ({}) does not have".format(share.repo) in result.stderr
assert "SKIPPED Pull" in result.stderr
@@ -456,19 +422,13 @@ def test_build_remote_option(caplog, cli, tmpdir, datafiles):
project = str(datafiles)
caplog.set_level(1)
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare1")
- ) as shareuser, create_artifact_share(
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare1")) as shareuser, create_artifact_share(
os.path.join(str(tmpdir), "artifactshare2")
- ) as shareproject, create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare3")
- ) as sharecli:
+ ) as shareproject, create_artifact_share(os.path.join(str(tmpdir), "artifactshare3")) as sharecli:
# Add shareproject repo url to project.conf
with open(os.path.join(project, "project.conf"), "a") as projconf:
- projconf.write(
- "artifacts:\n url: {}\n push: True".format(shareproject.repo)
- )
+ projconf.write("artifacts:\n url: {}\n push: True".format(shareproject.repo))
# Configure shareuser remote in user conf
cli.configure({"artifacts": {"url": shareuser.repo, "push": True}})
@@ -489,9 +449,7 @@ def test_build_remote_option(caplog, cli, tmpdir, datafiles):
# Now check that a build with cli set as sharecli results in nothing being pulled,
# as it doesn't have them cached and shareuser/shareproject should be ignored. This
# will however result in the artifacts being built and pushed to it
- result = cli.run(
- project=project, args=["build", "--remote", sharecli.repo, "target.bst"]
- )
+ result = cli.run(project=project, args=["build", "--remote", sharecli.repo, "target.bst"])
result.assert_success()
for element_name in all_elements:
assert element_name not in result.get_pulled_elements()
@@ -500,9 +458,7 @@ def test_build_remote_option(caplog, cli, tmpdir, datafiles):
# Now check that a clean build with cli set as sharecli should result in artifacts only
# being pulled from it, as that was provided via the cli and is populated
- result = cli.run(
- project=project, args=["build", "--remote", sharecli.repo, "target.bst"]
- )
+ result = cli.run(project=project, args=["build", "--remote", sharecli.repo, "target.bst"])
result.assert_success()
for element_name in all_elements:
assert cli.get_element_state(project, element_name) == "cached"
@@ -616,9 +572,7 @@ def test_pull_artifact(cli, tmpdir, datafiles):
# Assert that the *artifact* is cached locally
cache_key = cli.get_element_key(project, element)
artifact_ref = os.path.join("test", os.path.splitext(element)[0], cache_key)
- assert os.path.exists(
- os.path.join(local_cache, "artifacts", "refs", artifact_ref)
- )
+ assert os.path.exists(os.path.join(local_cache, "artifacts", "refs", artifact_ref))
# Assert that the target is shared (note that assert shared will use the artifact name)
assert_shared(cli, share, project, element)
@@ -627,15 +581,11 @@ def test_pull_artifact(cli, tmpdir, datafiles):
shutil.rmtree(os.path.join(local_cache, "artifacts"))
# Assert that nothing is cached locally anymore
- assert not os.path.exists(
- os.path.join(local_cache, "artifacts", "refs", artifact_ref)
- )
+ assert not os.path.exists(os.path.join(local_cache, "artifacts", "refs", artifact_ref))
# Now try bst artifact pull
result = cli.run(project=project, args=["artifact", "pull", artifact_ref])
result.assert_success()
# And assert that it's again in the local cache, without having built
- assert os.path.exists(
- os.path.join(local_cache, "artifacts", "refs", artifact_ref)
- )
+ assert os.path.exists(os.path.join(local_cache, "artifacts", "refs", artifact_ref))
diff --git a/tests/frontend/push.py b/tests/frontend/push.py
index 21a47838c..6e2e283cd 100644
--- a/tests/frontend/push.py
+++ b/tests/frontend/push.py
@@ -61,9 +61,7 @@ def test_push(cli, tmpdir, datafiles):
# Set up two artifact shares.
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare1")) as share1:
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare2")
- ) as share2:
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare2")) as share2:
# Try pushing with no remotes configured. This should fail.
result = cli.run(project=project, args=["artifact", "push", "target.bst"])
@@ -78,14 +76,7 @@ def test_push(cli, tmpdir, datafiles):
result.assert_main_error(ErrorDomain.STREAM, None)
# Configure bst to push to one of the caches and run `bst artifact push`. This works.
- cli.configure(
- {
- "artifacts": [
- {"url": share1.repo, "push": False},
- {"url": share2.repo, "push": True},
- ]
- }
- )
+ cli.configure({"artifacts": [{"url": share1.repo, "push": False}, {"url": share2.repo, "push": True},]})
cli.run(project=project, args=["artifact", "push", "target.bst"])
assert_not_shared(cli, share1, project, "target.bst")
@@ -93,17 +84,8 @@ def test_push(cli, tmpdir, datafiles):
# Now try pushing to both
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare2")
- ) as share2:
- cli.configure(
- {
- "artifacts": [
- {"url": share1.repo, "push": True},
- {"url": share2.repo, "push": True},
- ]
- }
- )
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare2")) as share2:
+ cli.configure({"artifacts": [{"url": share1.repo, "push": True}, {"url": share2.repo, "push": True},]})
cli.run(project=project, args=["artifact", "push", "target.bst"])
assert_shared(cli, share1, project, "target.bst")
@@ -129,9 +111,7 @@ def test_push_artifact(cli, tmpdir, datafiles):
# Assert that the *artifact* is cached locally
cache_key = cli.get_element_key(project, element)
artifact_ref = os.path.join("test", os.path.splitext(element)[0], cache_key)
- assert os.path.exists(
- os.path.join(local_cache, "artifacts", "refs", artifact_ref)
- )
+ assert os.path.exists(os.path.join(local_cache, "artifacts", "refs", artifact_ref))
# Configure artifact share
cli.configure(
@@ -215,15 +195,7 @@ def test_push_fails_with_on_error_continue(cli, tmpdir, datafiles):
# Now try and push the target with its deps using --on-error continue
# and assert that push failed, but what could be pushed was pushed
result = cli.run(
- project=project,
- args=[
- "--on-error=continue",
- "artifact",
- "push",
- "--deps",
- "all",
- "target.bst",
- ],
+ project=project, args=["--on-error=continue", "artifact", "push", "--deps", "all", "target.bst",],
)
# The overall process should return as failed
@@ -279,9 +251,7 @@ def test_push_all(cli, tmpdir, datafiles):
)
# Now try bst artifact push all the deps
- result = cli.run(
- project=project, args=["artifact", "push", "target.bst", "--deps", "all"]
- )
+ result = cli.run(project=project, args=["artifact", "push", "target.bst", "--deps", "all"])
result.assert_success()
# And finally assert that all the artifacts are in the share
@@ -310,9 +280,7 @@ def test_push_artifacts_all_deps_fails(cli, tmpdir, datafiles):
# Assert that the *artifact* is cached locally
cache_key = cli.get_element_key(project, element)
artifact_ref = os.path.join("test", os.path.splitext(element)[0], cache_key)
- assert os.path.exists(
- os.path.join(local_cache, "artifacts", "refs", artifact_ref)
- )
+ assert os.path.exists(os.path.join(local_cache, "artifacts", "refs", artifact_ref))
# Configure artifact share
cli.configure(
@@ -331,9 +299,7 @@ def test_push_artifacts_all_deps_fails(cli, tmpdir, datafiles):
)
# Now try bst artifact push all the deps
- result = cli.run(
- project=project, args=["artifact", "push", "--deps", "all", artifact_ref]
- )
+ result = cli.run(project=project, args=["artifact", "push", "--deps", "all", artifact_ref])
result.assert_main_error(ErrorDomain.STREAM, None)
assert "Error: '--deps all' is not supported for artifact refs" in result.stderr
@@ -347,9 +313,7 @@ def test_push_after_pull(cli, tmpdir, datafiles):
project = str(datafiles)
# Set up two artifact shares.
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare1")
- ) as share1, create_artifact_share(
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare1")) as share1, create_artifact_share(
os.path.join(str(tmpdir), "artifactshare2")
) as share2:
@@ -381,14 +345,7 @@ def test_push_after_pull(cli, tmpdir, datafiles):
# Now we add share2 into the mix as a second push remote. This time,
# `bst build` should push to share2 after pulling from share1.
- cli.configure(
- {
- "artifacts": [
- {"url": share1.repo, "push": True},
- {"url": share2.repo, "push": True},
- ]
- }
- )
+ cli.configure({"artifacts": [{"url": share1.repo, "push": True}, {"url": share2.repo, "push": True},]})
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
assert result.get_pulled_elements() == ["target.bst"]
@@ -405,9 +362,7 @@ def test_artifact_expires(cli, datafiles, tmpdir):
# Create an artifact share (remote artifact cache) in the tmpdir/artifactshare
# Set a 22 MB quota
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare"), quota=int(22e6)
- ) as share:
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare"), quota=int(22e6)) as share:
# Configure bst to push to the cache
cli.configure(
@@ -459,9 +414,7 @@ def test_artifact_too_large(cli, datafiles, tmpdir):
# Create an artifact share (remote cache) in tmpdir/artifactshare
# Mock a file system with 5 MB total space
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare"), quota=int(5e6)
- ) as share:
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare"), quota=int(5e6)) as share:
# Configure bst to push to the remote cache
cli.configure(
@@ -488,9 +441,7 @@ def test_artifact_too_large(cli, datafiles, tmpdir):
result.assert_main_error(ErrorDomain.STREAM, None)
# Ensure that the small artifact is still in the share
- states = cli.get_element_states(
- project, ["small_element.bst", "large_element.bst"]
- )
+ states = cli.get_element_states(project, ["small_element.bst", "large_element.bst"])
assert states["small_element.bst"] == "cached"
assert_shared(cli, share, project, "small_element.bst")
@@ -507,9 +458,7 @@ def test_recently_pulled_artifact_does_not_expire(cli, datafiles, tmpdir):
# Create an artifact share (remote cache) in tmpdir/artifactshare
# Set a 22 MB quota
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare"), quota=int(22e6)
- ) as share:
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare"), quota=int(22e6)) as share:
# Configure bst to push to the cache
cli.configure(
@@ -541,10 +490,7 @@ def test_recently_pulled_artifact_does_not_expire(cli, datafiles, tmpdir):
assert cli.get_element_state(project, "element1.bst") != "cached"
# Pull the element1 from the remote cache (this should update its mtime)
- result = cli.run(
- project=project,
- args=["artifact", "pull", "element1.bst", "--remote", share.repo],
- )
+ result = cli.run(project=project, args=["artifact", "pull", "element1.bst", "--remote", share.repo],)
result.assert_success()
# Ensure element1 is cached locally
@@ -583,16 +529,10 @@ def test_push_cross_junction(cli, tmpdir, datafiles):
cli.configure(
{"artifacts": {"url": share.repo, "push": True},}
)
- cli.run(
- project=project, args=["artifact", "push", "junction.bst:import-etc.bst"]
- )
+ cli.run(project=project, args=["artifact", "push", "junction.bst:import-etc.bst"])
cache_key = cli.get_element_key(project, "junction.bst:import-etc.bst")
- assert share.get_artifact(
- cli.get_artifact_name(
- project, "subtest", "import-etc.bst", cache_key=cache_key
- )
- )
+ assert share.get_artifact(cli.get_artifact_name(project, "subtest", "import-etc.bst", cache_key=cache_key))
@pytest.mark.datafiles(DATA_DIR)
@@ -611,9 +551,7 @@ def test_push_already_cached(caplog, cli, tmpdir, datafiles):
result = cli.run(project=project, args=["artifact", "push", "target.bst"])
result.assert_success()
- assert (
- not result.get_pushed_elements()
- ), "No elements should have been pushed since the cache was populated"
+ assert not result.get_pushed_elements(), "No elements should have been pushed since the cache was populated"
assert "INFO Remote ({}) already has ".format(share.repo) in result.stderr
assert "SKIPPED Push" in result.stderr
@@ -623,26 +561,18 @@ def test_build_remote_option(caplog, cli, tmpdir, datafiles):
project = str(datafiles)
caplog.set_level(1)
- with create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare1")
- ) as shareuser, create_artifact_share(
+ with create_artifact_share(os.path.join(str(tmpdir), "artifactshare1")) as shareuser, create_artifact_share(
os.path.join(str(tmpdir), "artifactshare2")
- ) as shareproject, create_artifact_share(
- os.path.join(str(tmpdir), "artifactshare3")
- ) as sharecli:
+ ) as shareproject, create_artifact_share(os.path.join(str(tmpdir), "artifactshare3")) as sharecli:
# Add shareproject repo url to project.conf
with open(os.path.join(project, "project.conf"), "a") as projconf:
- projconf.write(
- "artifacts:\n url: {}\n push: True".format(shareproject.repo)
- )
+ projconf.write("artifacts:\n url: {}\n push: True".format(shareproject.repo))
# Configure shareuser remote in user conf
cli.configure({"artifacts": {"url": shareuser.repo, "push": True}})
- result = cli.run(
- project=project, args=["build", "--remote", sharecli.repo, "target.bst"]
- )
+ result = cli.run(project=project, args=["build", "--remote", sharecli.repo, "target.bst"])
# Artifacts should have only been pushed to sharecli, as that was provided via the cli
result.assert_success()
@@ -668,10 +598,7 @@ def test_push_no_strict(caplog, cli, tmpdir, datafiles, buildtrees):
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
cli.configure(
- {
- "artifacts": {"url": share.repo, "push": True},
- "projects": {"test": {"strict": False}},
- }
+ {"artifacts": {"url": share.repo, "push": True}, "projects": {"test": {"strict": False}},}
)
# First get us a build
diff --git a/tests/frontend/rebuild.py b/tests/frontend/rebuild.py
index d3e36e6f4..1aef8e423 100644
--- a/tests/frontend/rebuild.py
+++ b/tests/frontend/rebuild.py
@@ -25,15 +25,11 @@ def test_rebuild(datafiles, cli, strict):
result.assert_success()
# Modify base import
- with open(
- os.path.join(project, "files", "dev-files", "usr", "include", "new.h"), "w"
- ) as f:
+ with open(os.path.join(project, "files", "dev-files", "usr", "include", "new.h"), "w") as f:
f.write("#define NEW")
# Rebuild base import and build top-level rebuild-target.bst
# In non-strict mode, this does not rebuild intermediate target.bst,
# which means that a weakly cached target.bst will be staged as dependency.
- result = cli.run(
- project=project, args=strict_args(["build", "rebuild-target.bst"], strict)
- )
+ result = cli.run(project=project, args=strict_args(["build", "rebuild-target.bst"], strict))
result.assert_success()
diff --git a/tests/frontend/show.py b/tests/frontend/show.py
index a54d625ea..a686dbd2d 100644
--- a/tests/frontend/show.py
+++ b/tests/frontend/show.py
@@ -29,24 +29,14 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),)
)
def test_show(cli, datafiles, target, fmt, expected):
project = str(datafiles)
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", fmt, target],
- )
+ result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", fmt, target],)
result.assert_success()
if result.output.strip() != expected:
- raise AssertionError(
- "Expected output:\n{}\nInstead received output:\n{}".format(
- expected, result.output
- )
- )
+ raise AssertionError("Expected output:\n{}\nInstead received output:\n{}".format(expected, result.output))
-@pytest.mark.datafiles(
- os.path.join(os.path.dirname(os.path.realpath(__file__)), "invalid_element_path",)
-)
+@pytest.mark.datafiles(os.path.join(os.path.dirname(os.path.realpath(__file__)), "invalid_element_path",))
def test_show_invalid_element_path(cli, datafiles):
project = str(datafiles)
cli.run(project=project, silent=True, args=["show", "foo.bst"])
@@ -77,16 +67,8 @@ def test_show_fail(cli, datafiles):
@pytest.mark.parametrize(
"target,except_,expected",
[
- (
- "target.bst",
- "import-bin.bst",
- ["import-dev.bst", "compose-all.bst", "target.bst"],
- ),
- (
- "target.bst",
- "import-dev.bst",
- ["import-bin.bst", "compose-all.bst", "target.bst"],
- ),
+ ("target.bst", "import-bin.bst", ["import-dev.bst", "compose-all.bst", "target.bst"],),
+ ("target.bst", "import-dev.bst", ["import-bin.bst", "compose-all.bst", "target.bst"],),
("target.bst", "compose-all.bst", ["import-bin.bst", "target.bst"]),
("compose-all.bst", "import-bin.bst", ["import-dev.bst", "compose-all.bst"]),
],
@@ -96,27 +78,14 @@ def test_show_except_simple(cli, datafiles, target, except_, expected):
result = cli.run(
project=project,
silent=True,
- args=[
- "show",
- "--deps",
- "all",
- "--format",
- "%{name}",
- "--except",
- except_,
- target,
- ],
+ args=["show", "--deps", "all", "--format", "%{name}", "--except", except_, target,],
)
result.assert_success()
results = result.output.strip().splitlines()
if results != expected:
- raise AssertionError(
- "Expected elements:\n{}\nInstead received elements:\n{}".format(
- expected, results
- )
- )
+ raise AssertionError("Expected elements:\n{}\nInstead received elements:\n{}".format(expected, results))
# This test checks various constructions of a pipeline
@@ -200,22 +169,14 @@ def test_show_except_simple(cli, datafiles, target, except_, expected):
],
),
# Test one target and excepting two elements
- (
- ["build.bst"],
- ["unrelated-1.bst", "unrelated-2.bst"],
- ["first-level-1.bst", "build.bst",],
- ),
+ (["build.bst"], ["unrelated-1.bst", "unrelated-2.bst"], ["first-level-1.bst", "build.bst",],),
],
)
def test_show_except(cli, datafiles, targets, exceptions, expected):
basedir = str(datafiles)
results = cli.get_pipeline(basedir, targets, except_=exceptions, scope="all")
if results != expected:
- raise AssertionError(
- "Expected elements:\n{}\nInstead received elements:\n{}".format(
- expected, results
- )
- )
+ raise AssertionError("Expected elements:\n{}\nInstead received elements:\n{}".format(expected, results))
###############################################################
@@ -271,13 +232,9 @@ def test_target_is_dependency(cli, datafiles):
@pytest.mark.datafiles(os.path.join(DATA_DIR, "project"))
@pytest.mark.parametrize("ref_storage", [("inline"), ("project.refs")])
-@pytest.mark.parametrize(
- "element_name", ["junction-dep.bst", "junction.bst:import-etc.bst"]
-)
+@pytest.mark.parametrize("element_name", ["junction-dep.bst", "junction.bst:import-etc.bst"])
@pytest.mark.parametrize("workspaced", [True, False], ids=["workspace", "no-workspace"])
-def test_unfetched_junction(
- cli, tmpdir, datafiles, ref_storage, element_name, workspaced
-):
+def test_unfetched_junction(cli, tmpdir, datafiles, ref_storage, element_name, workspaced):
project = str(datafiles)
subproject_path = os.path.join(project, "files", "sub-project")
junction_path = os.path.join(project, "elements", "junction.bst")
@@ -286,9 +243,7 @@ def test_unfetched_junction(
configure_project(project, {"ref-storage": ref_storage})
# Create a repo to hold the subproject and generate a junction element for it
- ref = generate_junction(
- tmpdir, subproject_path, junction_path, store_ref=(ref_storage == "inline")
- )
+ ref = generate_junction(tmpdir, subproject_path, junction_path, store_ref=(ref_storage == "inline"))
# Create a stack element to depend on a cross junction element
#
@@ -309,14 +264,7 @@ def test_unfetched_junction(
result = cli.run(
project=project,
silent=True,
- args=[
- "workspace",
- "open",
- "--no-checkout",
- "--directory",
- subproject_path,
- "junction.bst",
- ],
+ args=["workspace", "open", "--no-checkout", "--directory", subproject_path, "junction.bst",],
)
result.assert_success()
@@ -352,26 +300,15 @@ def test_inconsistent_junction(cli, tmpdir, datafiles, ref_storage, workspaced):
result = cli.run(
project=project,
silent=True,
- args=[
- "workspace",
- "open",
- "--no-checkout",
- "--directory",
- subproject_path,
- "junction.bst",
- ],
+ args=["workspace", "open", "--no-checkout", "--directory", subproject_path, "junction.bst",],
)
result.assert_success()
# Assert the correct error when trying to show the pipeline
- dep_result = cli.run(
- project=project, silent=True, args=["show", "junction-dep.bst"]
- )
+ dep_result = cli.run(project=project, silent=True, args=["show", "junction-dep.bst"])
# Assert the correct error when trying to show the pipeline
- etc_result = cli.run(
- project=project, silent=True, args=["show", "junction.bst:import-etc.bst"]
- )
+ etc_result = cli.run(project=project, silent=True, args=["show", "junction.bst:import-etc.bst"])
# If a workspace is open, no ref is needed
if workspaced:
@@ -384,18 +321,12 @@ def test_inconsistent_junction(cli, tmpdir, datafiles, ref_storage, workspaced):
provenance = ref_node.get_provenance()
assert str(provenance) in dep_result.stderr
- dep_result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_INCONSISTENT
- )
- etc_result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_INCONSISTENT
- )
+ dep_result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_INCONSISTENT)
+ etc_result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_INCONSISTENT)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "project"))
-@pytest.mark.parametrize(
- "element_name", ["junction-dep.bst", "junction.bst:import-etc.bst"]
-)
+@pytest.mark.parametrize("element_name", ["junction-dep.bst", "junction.bst:import-etc.bst"])
@pytest.mark.parametrize("workspaced", [True, False], ids=["workspace", "no-workspace"])
def test_fetched_junction(cli, tmpdir, datafiles, element_name, workspaced):
project = str(datafiles)
@@ -415,9 +346,7 @@ def test_fetched_junction(cli, tmpdir, datafiles, element_name, workspaced):
}
_yaml.roundtrip_dump(element, element_path)
- result = cli.run(
- project=project, silent=True, args=["source", "fetch", "junction.bst"]
- )
+ result = cli.run(project=project, silent=True, args=["source", "fetch", "junction.bst"])
result.assert_success()
# Open a workspace if we're testing workspaced behavior
@@ -425,23 +354,12 @@ def test_fetched_junction(cli, tmpdir, datafiles, element_name, workspaced):
result = cli.run(
project=project,
silent=True,
- args=[
- "workspace",
- "open",
- "--no-checkout",
- "--directory",
- subproject_path,
- "junction.bst",
- ],
+ args=["workspace", "open", "--no-checkout", "--directory", subproject_path, "junction.bst",],
)
result.assert_success()
# Assert the correct error when trying to show the pipeline
- result = cli.run(
- project=project,
- silent=True,
- args=["show", "--format", "%{name}-%{state}", element_name],
- )
+ result = cli.run(project=project, silent=True, args=["show", "--format", "%{name}-%{state}", element_name],)
results = result.output.strip().splitlines()
assert "junction.bst:import-etc.bst-buildable" in results
@@ -464,9 +382,7 @@ def test_exceed_max_recursion_depth(cli, tmpdir, dependency_depth):
"""
os.mkdir(project_path)
- result = cli.run(
- silent=True, args=["init", "--project-name", project_name, project_path]
- )
+ result = cli.run(silent=True, args=["init", "--project-name", project_name, project_path])
result.assert_success()
sourcefiles_path = os.path.join(project_path, "files")
@@ -481,20 +397,14 @@ def test_exceed_max_recursion_depth(cli, tmpdir, dependency_depth):
}
if i == 0:
del element["depends"]
- _yaml.roundtrip_dump(
- element, os.path.join(element_path, "element{}.bst".format(str(i)))
- )
+ _yaml.roundtrip_dump(element, os.path.join(element_path, "element{}.bst".format(str(i))))
source = os.path.join(sourcefiles_path, "source{}".format(str(i)))
open(source, "x").close()
assert os.path.exists(source)
setup_test()
- result = cli.run(
- project=project_path,
- silent=True,
- args=["show", "element{}.bst".format(str(dependency_depth))],
- )
+ result = cli.run(project=project_path, silent=True, args=["show", "element{}.bst".format(str(dependency_depth))],)
recursion_limit = sys.getrecursionlimit()
if dependency_depth <= recursion_limit:
@@ -523,19 +433,13 @@ def test_format_deps(cli, datafiles, dep_kind, expected_deps):
project = str(datafiles)
target = "checkout-deps.bst"
result = cli.run(
- project=project,
- silent=True,
- args=["show", "--deps", "none", "--format", "%{name}: " + dep_kind, target],
+ project=project, silent=True, args=["show", "--deps", "none", "--format", "%{name}: " + dep_kind, target],
)
result.assert_success()
expected = "{name}: {deps}".format(name=target, deps=expected_deps)
if result.output.strip() != expected:
- raise AssertionError(
- "Expected output:\n{}\nInstead received output:\n{}".format(
- expected, result.output
- )
- )
+ raise AssertionError("Expected output:\n{}\nInstead received output:\n{}".format(expected, result.output))
# This tests the resolved value of the 'max-jobs' variable,
@@ -544,8 +448,7 @@ def test_format_deps(cli, datafiles, dep_kind, expected_deps):
#
@pytest.mark.datafiles(os.path.join(DATA_DIR, "project"))
@pytest.mark.parametrize(
- "cli_value, config_value",
- [(None, None), (None, "16"), ("16", None), ("5", "16"), ("0", "16"), ("16", "0"),],
+ "cli_value, config_value", [(None, None), (None, "16"), ("16", None), ("5", "16"), ("0", "16"), ("16", "0"),],
)
def test_max_jobs(cli, datafiles, cli_value, config_value):
project = str(datafiles)
@@ -599,8 +502,7 @@ def test_max_jobs(cli, datafiles, cli_value, config_value):
#
@pytest.mark.datafiles(os.path.join(DATA_DIR, "strict-depends"))
@pytest.mark.parametrize(
- "target, expected_state",
- [("non-strict-depends.bst", "cached"), ("strict-depends.bst", "waiting"),],
+ "target, expected_state", [("non-strict-depends.bst", "cached"), ("strict-depends.bst", "waiting"),],
)
def test_strict_dependencies(cli, datafiles, target, expected_state):
project = str(datafiles)
diff --git a/tests/frontend/source_checkout.py b/tests/frontend/source_checkout.py
index f10c24c5d..ff897b1cf 100644
--- a/tests/frontend/source_checkout.py
+++ b/tests/frontend/source_checkout.py
@@ -44,10 +44,7 @@ def test_source_checkout(datafiles, cli, tmpdir_factory, with_workspace, guess_e
if with_workspace:
ws_cmd = ["-C", workspace]
- result = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, target],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--directory", workspace, target],)
result.assert_success()
else:
ws_cmd = []
@@ -64,9 +61,7 @@ def test_source_checkout(datafiles, cli, tmpdir_factory, with_workspace, guess_e
result = cli.run(project=project, args=args)
result.assert_success()
- assert os.path.exists(
- os.path.join(checkout, "checkout-deps", "etc", "buildstream", "config")
- )
+ assert os.path.exists(os.path.join(checkout, "checkout-deps", "etc", "buildstream", "config"))
@pytest.mark.datafiles(DATA_DIR)
@@ -80,23 +75,11 @@ def test_source_checkout_force(datafiles, cli, force_flag):
os.makedirs(os.path.join(checkout, "some-thing"))
result = cli.run(
- project=project,
- args=[
- "source",
- "checkout",
- force_flag,
- "--deps",
- "none",
- "--directory",
- checkout,
- target,
- ],
+ project=project, args=["source", "checkout", force_flag, "--deps", "none", "--directory", checkout, target,],
)
result.assert_success()
- assert os.path.exists(
- os.path.join(checkout, "checkout-deps", "etc", "buildstream", "config")
- )
+ assert os.path.exists(os.path.join(checkout, "checkout-deps", "etc", "buildstream", "config"))
@pytest.mark.datafiles(DATA_DIR)
@@ -105,17 +88,12 @@ def test_source_checkout_tar(datafiles, cli):
tar = os.path.join(cli.directory, "source-checkout.tar")
target = "checkout-deps.bst"
- result = cli.run(
- project=project,
- args=["source", "checkout", "--tar", tar, "--deps", "none", target],
- )
+ result = cli.run(project=project, args=["source", "checkout", "--tar", tar, "--deps", "none", target],)
result.assert_success()
assert os.path.exists(tar)
with tarfile.open(tar) as tf:
- expected_content = os.path.join(
- tar, "checkout-deps", "etc", "buildstream", "config"
- )
+ expected_content = os.path.join(tar, "checkout-deps", "etc", "buildstream", "config")
tar_members = [f.name for f in tf]
for member in tar_members:
assert member in expected_content
@@ -131,23 +109,11 @@ def test_source_checkout_compressed_tar(datafiles, cli, compression):
result = cli.run(
project=project,
- args=[
- "source",
- "checkout",
- "--tar",
- tar,
- "--compression",
- compression,
- "--deps",
- "none",
- target,
- ],
+ args=["source", "checkout", "--tar", tar, "--compression", compression, "--deps", "none", target,],
)
result.assert_success()
tar = tarfile.open(name=tar, mode="r:" + compression)
- assert (
- os.path.join("checkout-deps", "etc", "buildstream", "config") in tar.getnames()
- )
+ assert os.path.join("checkout-deps", "etc", "buildstream", "config") in tar.getnames()
@pytest.mark.datafiles(DATA_DIR)
@@ -157,33 +123,24 @@ def test_source_checkout_deps(datafiles, cli, deps):
checkout = os.path.join(cli.directory, "source-checkout")
target = "checkout-deps.bst"
- result = cli.run(
- project=project,
- args=["source", "checkout", "--directory", checkout, "--deps", deps, target],
- )
+ result = cli.run(project=project, args=["source", "checkout", "--directory", checkout, "--deps", deps, target],)
result.assert_success()
# Sources of the target
if deps == "build":
assert not os.path.exists(os.path.join(checkout, "checkout-deps"))
else:
- assert os.path.exists(
- os.path.join(checkout, "checkout-deps", "etc", "buildstream", "config")
- )
+ assert os.path.exists(os.path.join(checkout, "checkout-deps", "etc", "buildstream", "config"))
# Sources of the target's build dependencies
if deps in ("build", "all"):
- assert os.path.exists(
- os.path.join(checkout, "import-dev", "usr", "include", "pony.h")
- )
+ assert os.path.exists(os.path.join(checkout, "import-dev", "usr", "include", "pony.h"))
else:
assert not os.path.exists(os.path.join(checkout, "import-dev"))
# Sources of the target's runtime dependencies
if deps in ("run", "all"):
- assert os.path.exists(
- os.path.join(checkout, "import-bin", "usr", "bin", "hello")
- )
+ assert os.path.exists(os.path.join(checkout, "import-bin", "usr", "bin", "hello"))
else:
assert not os.path.exists(os.path.join(checkout, "import-bin"))
@@ -196,32 +153,18 @@ def test_source_checkout_except(datafiles, cli):
result = cli.run(
project=project,
- args=[
- "source",
- "checkout",
- "--directory",
- checkout,
- "--deps",
- "all",
- "--except",
- "import-bin.bst",
- target,
- ],
+ args=["source", "checkout", "--directory", checkout, "--deps", "all", "--except", "import-bin.bst", target,],
)
result.assert_success()
# Sources for the target should be present
- assert os.path.exists(
- os.path.join(checkout, "checkout-deps", "etc", "buildstream", "config")
- )
+ assert os.path.exists(os.path.join(checkout, "checkout-deps", "etc", "buildstream", "config"))
# Sources for import-bin.bst should not be present
assert not os.path.exists(os.path.join(checkout, "import-bin"))
# Sources for other dependencies should be present
- assert os.path.exists(
- os.path.join(checkout, "import-dev", "usr", "include", "pony.h")
- )
+ assert os.path.exists(os.path.join(checkout, "import-dev", "usr", "include", "pony.h"))
@pytest.mark.datafiles(DATA_DIR)
@@ -233,8 +176,7 @@ def test_source_checkout_fetch(datafiles, cli):
# Create an element with remote source
element = generate_remote_import_element(
- os.path.join(project, "files", "dev-files", "usr", "include", "pony.h"),
- "pony.h",
+ os.path.join(project, "files", "dev-files", "usr", "include", "pony.h"), "pony.h",
)
_yaml.roundtrip_dump(element, target_path)
@@ -244,9 +186,7 @@ def test_source_checkout_fetch(datafiles, cli):
args = ["source", "checkout"]
args += [target, checkout]
- result = cli.run(
- project=project, args=["source", "checkout", "--directory", checkout, target]
- )
+ result = cli.run(project=project, args=["source", "checkout", "--directory", checkout, target])
result.assert_success()
assert os.path.exists(os.path.join(checkout, "remote-import-dev", "pony.h"))
@@ -309,10 +249,7 @@ def test_source_checkout_options_tar_and_dir_conflict(cli, tmpdir, datafiles):
tar_file = os.path.join(str(tmpdir), "source-checkout.tar")
target = "checkout-deps.bst"
- result = cli.run(
- project=project,
- args=["source", "checkout", "--directory", checkout, "--tar", tar_file, target],
- )
+ result = cli.run(project=project, args=["source", "checkout", "--directory", checkout, "--tar", tar_file, target],)
assert result.exit_code != 0
assert "ERROR: options --directory and --tar conflict" in result.stderr
@@ -326,16 +263,7 @@ def test_source_checkout_compression_without_tar(cli, tmpdir, datafiles):
target = "checkout-deps.bst"
result = cli.run(
- project=project,
- args=[
- "source",
- "checkout",
- "--directory",
- checkout,
- "--compression",
- "xz",
- target,
- ],
+ project=project, args=["source", "checkout", "--directory", checkout, "--compression", "xz", target,],
)
assert result.exit_code != 0
diff --git a/tests/frontend/track.py b/tests/frontend/track.py
index 6d9e3bb3f..02a19787c 100644
--- a/tests/frontend/track.py
+++ b/tests/frontend/track.py
@@ -41,9 +41,7 @@ def test_track_single(cli, tmpdir, datafiles):
# Write out our test targets
generate_element(repo, os.path.join(element_path, element_dep_name))
- generate_element(
- repo, os.path.join(element_path, element_target_name), dep_name=element_dep_name
- )
+ generate_element(repo, os.path.join(element_path, element_target_name), dep_name=element_dep_name)
# Assert that tracking is needed for both elements
states = cli.get_element_states(project, [element_target_name])
@@ -53,15 +51,11 @@ def test_track_single(cli, tmpdir, datafiles):
}
# Now first try to track only one element
- result = cli.run(
- project=project, args=["source", "track", "--deps", "none", element_target_name]
- )
+ result = cli.run(project=project, args=["source", "track", "--deps", "none", element_target_name])
result.assert_success()
# And now fetch it
- result = cli.run(
- project=project, args=["source", "fetch", "--deps", "none", element_target_name]
- )
+ result = cli.run(project=project, args=["source", "fetch", "--deps", "none", element_target_name])
result.assert_success()
# Assert that the dependency is waiting and the target has still never been tracked
@@ -75,9 +69,7 @@ def test_track_single(cli, tmpdir, datafiles):
@pytest.mark.datafiles(os.path.join(TOP_DIR))
@pytest.mark.parametrize("ref_storage", [("inline"), ("project-refs")])
def test_track_optional(cli, tmpdir, datafiles, ref_storage):
- project = os.path.join(
- datafiles.dirname, datafiles.basename, "track-optional-" + ref_storage
- )
+ project = os.path.join(datafiles.dirname, datafiles.basename, "track-optional-" + ref_storage)
dev_files_path = os.path.join(project, "files")
element_path = os.path.join(project, "target.bst")
@@ -104,49 +96,23 @@ def test_track_optional(cli, tmpdir, datafiles, ref_storage):
#
# We want to track and persist the ref separately in this test
#
- result = cli.run(
- project=project,
- args=["--option", "test", "False", "source", "track", "target.bst"],
- )
+ result = cli.run(project=project, args=["--option", "test", "False", "source", "track", "target.bst"],)
result.assert_success()
- result = cli.run(
- project=project,
- args=["--option", "test", "True", "source", "track", "target.bst"],
- )
+ result = cli.run(project=project, args=["--option", "test", "True", "source", "track", "target.bst"],)
result.assert_success()
# Now fetch the key for both options
#
result = cli.run(
project=project,
- args=[
- "--option",
- "test",
- "False",
- "show",
- "--deps",
- "none",
- "--format",
- "%{key}",
- "target.bst",
- ],
+ args=["--option", "test", "False", "show", "--deps", "none", "--format", "%{key}", "target.bst",],
)
result.assert_success()
master_key = result.output
result = cli.run(
project=project,
- args=[
- "--option",
- "test",
- "True",
- "show",
- "--deps",
- "none",
- "--format",
- "%{key}",
- "target.bst",
- ],
+ args=["--option", "test", "True", "show", "--deps", "none", "--format", "%{key}", "target.bst",],
)
result.assert_success()
test_key = result.output
@@ -187,15 +153,7 @@ def test_track_cross_junction(cli, tmpdir, datafiles, cross_junction, ref_storag
#
def get_subproject_element_state():
result = cli.run(
- project=project,
- args=[
- "show",
- "--deps",
- "all",
- "--format",
- "%{name}|%{state}",
- "target.bst",
- ],
+ project=project, args=["show", "--deps", "all", "--format", "%{name}|%{state}", "target.bst",],
)
result.assert_success()
diff --git a/tests/frontend/workspace.py b/tests/frontend/workspace.py
index ba4e9577f..f469939d1 100644
--- a/tests/frontend/workspace.py
+++ b/tests/frontend/workspace.py
@@ -68,9 +68,7 @@ class WorkspaceCreator:
self.workspace_cmd = os.path.join(self.project_path, "workspace_cmd")
- def create_workspace_element(
- self, kind, suffix="", workspace_dir=None, element_attrs=None
- ):
+ def create_workspace_element(self, kind, suffix="", workspace_dir=None, element_attrs=None):
element_name = "workspace-test-{}{}.bst".format(kind, suffix)
element_path = os.path.join(self.project_path, "elements")
if not workspace_dir:
@@ -90,9 +88,7 @@ class WorkspaceCreator:
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
return element_name, element_path, workspace_dir
- def create_workspace_elements(
- self, kinds, suffixs=None, workspace_dir_usr=None, element_attrs=None
- ):
+ def create_workspace_elements(self, kinds, suffixs=None, workspace_dir_usr=None, element_attrs=None):
element_tuples = []
@@ -109,25 +105,16 @@ class WorkspaceCreator:
element_tuples.append((element_name, workspace_dir))
# Assert that there is a fetch is needed
- states = self.cli.get_element_states(
- self.project_path, [e for e, _ in element_tuples]
- )
+ states = self.cli.get_element_states(self.project_path, [e for e, _ in element_tuples])
assert not any(states[e] != "fetch needed" for e, _ in element_tuples)
return element_tuples
def open_workspaces(
- self,
- kinds,
- suffixs=None,
- workspace_dir=None,
- element_attrs=None,
- no_checkout=False,
+ self, kinds, suffixs=None, workspace_dir=None, element_attrs=None, no_checkout=False,
):
- element_tuples = self.create_workspace_elements(
- kinds, suffixs, workspace_dir, element_attrs
- )
+ element_tuples = self.create_workspace_elements(kinds, suffixs, workspace_dir, element_attrs)
os.makedirs(self.workspace_cmd, exist_ok=True)
# Now open the workspace, this should have the effect of automatically
@@ -140,20 +127,14 @@ class WorkspaceCreator:
_, workspace_dir = element_tuples[0]
args.extend(["--directory", workspace_dir])
- args.extend(
- [element_name for element_name, workspace_dir_suffix in element_tuples]
- )
- result = self.cli.run(
- cwd=self.workspace_cmd, project=self.project_path, args=args
- )
+ args.extend([element_name for element_name, workspace_dir_suffix in element_tuples])
+ result = self.cli.run(cwd=self.workspace_cmd, project=self.project_path, args=args)
result.assert_success()
if not no_checkout:
# Assert that we are now buildable because the source is now cached.
- states = self.cli.get_element_states(
- self.project_path, [e for e, _ in element_tuples]
- )
+ states = self.cli.get_element_states(self.project_path, [e for e, _ in element_tuples])
assert not any(states[e] != "buildable" for e, _ in element_tuples)
# Check that the executable hello file is found in each workspace
@@ -176,9 +157,7 @@ def open_workspace(
no_checkout=False,
):
workspace_object = WorkspaceCreator(cli, tmpdir, datafiles, project_path)
- workspaces = workspace_object.open_workspaces(
- (kind,), (suffix,), workspace_dir, element_attrs, no_checkout
- )
+ workspaces = workspace_object.open_workspaces((kind,), (suffix,), workspace_dir, element_attrs, no_checkout)
assert len(workspaces) == 1
element_name, workspace = workspaces[0]
return element_name, workspace_object.project_path, workspace
@@ -197,9 +176,7 @@ def test_open_bzr_customize(cli, tmpdir, datafiles):
source_config = element_config.get_sequence("sources").mapping_at(0)
output = subprocess.check_output(["bzr", "info"], cwd=workspace)
stripped_url = source_config.get_str("url").lstrip("file:///")
- expected_output_str = "checkout of branch: /{}/{}".format(
- stripped_url, source_config.get_str("track")
- )
+ expected_output_str = "checkout of branch: /{}/{}".format(stripped_url, source_config.get_str("track"))
assert expected_output_str in str(output)
@@ -221,9 +198,7 @@ def test_open_multi(cli, tmpdir, datafiles):
assert ".bzr" not in workspace_lsdir
-@pytest.mark.skipif(
- os.geteuid() == 0, reason="root may have CAP_DAC_OVERRIDE and ignore permissions"
-)
+@pytest.mark.skipif(os.geteuid() == 0, reason="root may have CAP_DAC_OVERRIDE and ignore permissions")
@pytest.mark.datafiles(DATA_DIR)
def test_open_multi_unwritable(cli, tmpdir, datafiles):
workspace_object = WorkspaceCreator(cli, tmpdir, datafiles)
@@ -240,9 +215,7 @@ def test_open_multi_unwritable(cli, tmpdir, datafiles):
cwdstat = os.stat(workspace_object.workspace_cmd)
try:
os.chmod(workspace_object.workspace_cmd, cwdstat.st_mode - stat.S_IWRITE)
- result = workspace_object.cli.run(
- project=workspace_object.project_path, args=args
- )
+ result = workspace_object.cli.run(project=workspace_object.project_path, args=args)
finally:
# Using this finally to make sure we always put thing back how they should be.
os.chmod(workspace_object.workspace_cmd, cwdstat.st_mode)
@@ -250,12 +223,7 @@ def test_open_multi_unwritable(cli, tmpdir, datafiles):
result.assert_main_error(ErrorDomain.STREAM, None)
# Normally we avoid checking stderr in favour of using the mechine readable result.assert_main_error
# But Tristan was very keen that the names of the elements left needing workspaces were present in the out put
- assert (
- " ".join(
- [element_name for element_name, workspace_dir_suffix in element_tuples[1:]]
- )
- in result.stderr
- )
+ assert " ".join([element_name for element_name, workspace_dir_suffix in element_tuples[1:]]) in result.stderr
@pytest.mark.datafiles(DATA_DIR)
@@ -272,9 +240,7 @@ def test_open_multi_with_directory(cli, tmpdir, datafiles):
args.extend([element_name for element_name, workspace_dir_suffix in element_tuples])
result = workspace_object.cli.run(
- cwd=workspace_object.workspace_cmd,
- project=workspace_object.project_path,
- args=args,
+ cwd=workspace_object.workspace_cmd, project=workspace_object.project_path, args=args,
)
result.assert_main_error(ErrorDomain.STREAM, "directory-with-multiple-elements")
@@ -285,9 +251,7 @@ def test_open_defaultlocation(cli, tmpdir, datafiles):
workspace_object = WorkspaceCreator(cli, tmpdir, datafiles)
# pylint: disable=unbalanced-tuple-unpacking
- ((element_name, workspace_dir),) = workspace_object.create_workspace_elements(
- ["git"], ["git"]
- )
+ ((element_name, workspace_dir),) = workspace_object.create_workspace_elements(["git"], ["git"])
os.makedirs(workspace_object.workspace_cmd, exist_ok=True)
# Now open the workspace, this should have the effect of automatically
@@ -304,10 +268,7 @@ def test_open_defaultlocation(cli, tmpdir, datafiles):
result.assert_success()
- assert (
- cli.get_element_state(workspace_object.project_path, element_name)
- == "buildable"
- )
+ assert cli.get_element_state(workspace_object.project_path, element_name) == "buildable"
# Check that the executable hello file is found in the workspace
# even though the cli.run function was not run with cwd = workspace_object.workspace_cmd
@@ -322,9 +283,7 @@ def test_open_defaultlocation_exists(cli, tmpdir, datafiles):
workspace_object = WorkspaceCreator(cli, tmpdir, datafiles)
# pylint: disable=unbalanced-tuple-unpacking
- ((element_name, workspace_dir),) = workspace_object.create_workspace_elements(
- ["git"], ["git"]
- )
+ ((element_name, workspace_dir),) = workspace_object.create_workspace_elements(["git"], ["git"])
os.makedirs(workspace_object.workspace_cmd, exist_ok=True)
with open(workspace_dir, "w") as fl:
@@ -362,10 +321,7 @@ def test_open_force(cli, tmpdir, datafiles):
assert os.path.exists(workspace)
# Now open the workspace again with --force, this should happily succeed
- result = cli.run(
- project=project,
- args=["workspace", "open", "--force", "--directory", workspace, element_name],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--force", "--directory", workspace, element_name],)
result.assert_success()
@@ -377,10 +333,7 @@ def test_open_force_open(cli, tmpdir, datafiles):
assert os.path.exists(workspace)
# Now open the workspace again with --force, this should happily succeed
- result = cli.run(
- project=project,
- args=["workspace", "open", "--force", "--directory", workspace, element_name],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--force", "--directory", workspace, element_name],)
result.assert_success()
@@ -400,15 +353,7 @@ def test_open_force_open_no_checkout(cli, tmpdir, datafiles):
# Now open the workspace again with --force and --no-checkout
result = cli.run(
project=project,
- args=[
- "workspace",
- "open",
- "--force",
- "--no-checkout",
- "--directory",
- workspace,
- element_name,
- ],
+ args=["workspace", "open", "--force", "--no-checkout", "--directory", workspace, element_name,],
)
result.assert_success()
@@ -430,9 +375,7 @@ def test_open_force_different_workspace(cli, tmpdir, datafiles):
tmpdir = os.path.join(str(tmpdir), "-beta")
shutil.move(hello_path, hello1_path)
- element_name2, _, workspace2 = open_workspace(
- cli, tmpdir, datafiles, "git", "-beta"
- )
+ element_name2, _, workspace2 = open_workspace(cli, tmpdir, datafiles, "git", "-beta")
# Assert the workspace dir exists
assert os.path.exists(workspace2)
@@ -444,10 +387,7 @@ def test_open_force_different_workspace(cli, tmpdir, datafiles):
assert os.path.exists(os.path.join(workspace2, "usr", "bin", "hello"))
# Now open the workspace again with --force, this should happily succeed
- result = cli.run(
- project=project,
- args=["workspace", "open", "--force", "--directory", workspace, element_name2],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--force", "--directory", workspace, element_name2],)
# Assert that the file in workspace 1 has been replaced
# With the file from workspace 2
@@ -462,9 +402,7 @@ def test_close(cli, tmpdir, datafiles):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
# Close the workspace
- result = cli.run(
- project=project, args=["workspace", "close", "--remove-dir", element_name]
- )
+ result = cli.run(project=project, args=["workspace", "close", "--remove-dir", element_name])
result.assert_success()
# Assert the workspace dir has been deleted
@@ -475,18 +413,14 @@ def test_close(cli, tmpdir, datafiles):
def test_close_external_after_move_project(cli, tmpdir, datafiles):
workspace_dir = os.path.join(str(tmpdir), "workspace")
project_path = os.path.join(str(tmpdir), "initial_project")
- element_name, _, _ = open_workspace(
- cli, tmpdir, datafiles, "git", "", workspace_dir, project_path
- )
+ element_name, _, _ = open_workspace(cli, tmpdir, datafiles, "git", "", workspace_dir, project_path)
assert os.path.exists(workspace_dir)
moved_dir = os.path.join(str(tmpdir), "external_project")
shutil.move(project_path, moved_dir)
assert os.path.exists(moved_dir)
# Close the workspace
- result = cli.run(
- project=moved_dir, args=["workspace", "close", "--remove-dir", element_name]
- )
+ result = cli.run(project=moved_dir, args=["workspace", "close", "--remove-dir", element_name])
result.assert_success()
# Assert the workspace dir has been deleted
@@ -498,21 +432,14 @@ def test_close_internal_after_move_project(cli, tmpdir, datafiles):
initial_dir = os.path.join(str(tmpdir), "initial_project")
initial_workspace = os.path.join(initial_dir, "workspace")
element_name, _, _ = open_workspace(
- cli,
- tmpdir,
- datafiles,
- "git",
- workspace_dir=initial_workspace,
- project_path=initial_dir,
+ cli, tmpdir, datafiles, "git", workspace_dir=initial_workspace, project_path=initial_dir,
)
moved_dir = os.path.join(str(tmpdir), "internal_project")
shutil.move(initial_dir, moved_dir)
assert os.path.exists(moved_dir)
# Close the workspace
- result = cli.run(
- project=moved_dir, args=["workspace", "close", "--remove-dir", element_name]
- )
+ result = cli.run(project=moved_dir, args=["workspace", "close", "--remove-dir", element_name])
result.assert_success()
# Assert the workspace dir has been deleted
@@ -538,9 +465,7 @@ def test_close_removed(cli, tmpdir, datafiles):
@pytest.mark.datafiles(DATA_DIR)
def test_close_nonexistant_element(cli, tmpdir, datafiles):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
- element_path = os.path.join(
- datafiles.dirname, datafiles.basename, "elements", element_name
- )
+ element_path = os.path.join(datafiles.dirname, datafiles.basename, "elements", element_name)
# First brutally remove the element.bst file, ensuring that
# the element does not exist anymore in the project where
@@ -548,9 +473,7 @@ def test_close_nonexistant_element(cli, tmpdir, datafiles):
os.remove(element_path)
# Close the workspace
- result = cli.run(
- project=project, args=["workspace", "close", "--remove-dir", element_name]
- )
+ result = cli.run(project=project, args=["workspace", "close", "--remove-dir", element_name])
result.assert_success()
# Assert the workspace dir has been deleted
@@ -561,17 +484,11 @@ def test_close_nonexistant_element(cli, tmpdir, datafiles):
def test_close_multiple(cli, tmpdir, datafiles):
tmpdir_alpha = os.path.join(str(tmpdir), "alpha")
tmpdir_beta = os.path.join(str(tmpdir), "beta")
- alpha, project, workspace_alpha = open_workspace(
- cli, tmpdir_alpha, datafiles, "git", suffix="-alpha"
- )
- beta, project, workspace_beta = open_workspace(
- cli, tmpdir_beta, datafiles, "git", suffix="-beta"
- )
+ alpha, project, workspace_alpha = open_workspace(cli, tmpdir_alpha, datafiles, "git", suffix="-alpha")
+ beta, project, workspace_beta = open_workspace(cli, tmpdir_beta, datafiles, "git", suffix="-beta")
# Close the workspaces
- result = cli.run(
- project=project, args=["workspace", "close", "--remove-dir", alpha, beta]
- )
+ result = cli.run(project=project, args=["workspace", "close", "--remove-dir", alpha, beta])
result.assert_success()
# Assert the workspace dirs have been deleted
@@ -583,17 +500,11 @@ def test_close_multiple(cli, tmpdir, datafiles):
def test_close_all(cli, tmpdir, datafiles):
tmpdir_alpha = os.path.join(str(tmpdir), "alpha")
tmpdir_beta = os.path.join(str(tmpdir), "beta")
- _, project, workspace_alpha = open_workspace(
- cli, tmpdir_alpha, datafiles, "git", suffix="-alpha"
- )
- _, project, workspace_beta = open_workspace(
- cli, tmpdir_beta, datafiles, "git", suffix="-beta"
- )
+ _, project, workspace_alpha = open_workspace(cli, tmpdir_alpha, datafiles, "git", suffix="-alpha")
+ _, project, workspace_beta = open_workspace(cli, tmpdir_beta, datafiles, "git", suffix="-beta")
# Close the workspaces
- result = cli.run(
- project=project, args=["workspace", "close", "--remove-dir", "--all"]
- )
+ result = cli.run(project=project, args=["workspace", "close", "--remove-dir", "--all"])
result.assert_success()
# Assert the workspace dirs have been deleted
@@ -657,9 +568,7 @@ def test_reset_soft(cli, tmpdir, datafiles):
assert os.path.exists(pony_path)
# Now soft-reset the open workspace, this should not revert the changes
- result = cli.run(
- project=project, args=["workspace", "reset", "--soft", element_name]
- )
+ result = cli.run(project=project, args=["workspace", "reset", "--soft", element_name])
result.assert_success()
# we removed this dir
assert not os.path.exists(os.path.join(workspace, "usr", "bin"))
@@ -677,12 +586,8 @@ def test_reset_multiple(cli, tmpdir, datafiles):
# Open the workspaces
tmpdir_alpha = os.path.join(str(tmpdir), "alpha")
tmpdir_beta = os.path.join(str(tmpdir), "beta")
- alpha, project, workspace_alpha = open_workspace(
- cli, tmpdir_alpha, datafiles, "git", suffix="-alpha"
- )
- beta, project, workspace_beta = open_workspace(
- cli, tmpdir_beta, datafiles, "git", suffix="-beta"
- )
+ alpha, project, workspace_alpha = open_workspace(cli, tmpdir_alpha, datafiles, "git", suffix="-alpha")
+ beta, project, workspace_beta = open_workspace(cli, tmpdir_beta, datafiles, "git", suffix="-beta")
# Modify workspaces
shutil.rmtree(os.path.join(workspace_alpha, "usr", "bin"))
@@ -703,12 +608,8 @@ def test_reset_all(cli, tmpdir, datafiles):
# Open the workspaces
tmpdir_alpha = os.path.join(str(tmpdir), "alpha")
tmpdir_beta = os.path.join(str(tmpdir), "beta")
- _, project, workspace_alpha = open_workspace(
- cli, tmpdir_alpha, datafiles, "git", suffix="-alpha"
- )
- _, project, workspace_beta = open_workspace(
- cli, tmpdir_beta, datafiles, "git", suffix="-beta"
- )
+ _, project, workspace_alpha = open_workspace(cli, tmpdir_alpha, datafiles, "git", suffix="-alpha")
+ _, project, workspace_beta = open_workspace(cli, tmpdir_beta, datafiles, "git", suffix="-beta")
# Modify workspaces
shutil.rmtree(os.path.join(workspace_alpha, "usr", "bin"))
@@ -749,13 +650,9 @@ def test_list(cli, tmpdir, datafiles):
[(False, False), (True, True), (True, False)],
ids=["project-no-guess", "workspace-guess", "workspace-no-guess"],
)
-def test_build(
- cli, tmpdir_factory, datafiles, kind, strict, from_workspace, guess_element
-):
+def test_build(cli, tmpdir_factory, datafiles, kind, strict, from_workspace, guess_element):
tmpdir = tmpdir_factory.mktemp("")
- element_name, project, workspace = open_workspace(
- cli, tmpdir, datafiles, kind, False
- )
+ element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, kind, False)
checkout = os.path.join(str(tmpdir), "checkout")
args_dir = ["-C", workspace] if from_workspace else []
args_elm = [element_name] if not guess_element else []
@@ -786,10 +683,7 @@ def test_build(
assert key_1 == key_2
# Checkout the result
- result = cli.run(
- project=project,
- args=args_dir + ["artifact", "checkout", "--directory", checkout, *args_elm],
- )
+ result = cli.run(project=project, args=args_dir + ["artifact", "checkout", "--directory", checkout, *args_elm],)
result.assert_success()
# Check that the pony.conf from the modified workspace exists
@@ -896,10 +790,7 @@ def test_detect_modifications(cli, tmpdir, datafiles, modification, strict):
assert key_1 != key_3
# Checkout the result
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
result.assert_success()
# Check the result for the changes we made
@@ -927,10 +818,7 @@ def test_detect_modifications(cli, tmpdir, datafiles, modification, strict):
# Test loading a negative workspace version
{"format-version": -1},
# Test loading version 0 with two sources
- {
- "format-version": 0,
- "alpha.bst": {0: "/workspaces/bravo", 1: "/workspaces/charlie",},
- },
+ {"format-version": 0, "alpha.bst": {0: "/workspaces/bravo", 1: "/workspaces/charlie",},},
# Test loading a version with decimals
{"format-version": 0.5},
# Test loading a future version
@@ -959,13 +847,7 @@ def test_list_unsupported_workspace(cli, datafiles, workspace_cfg):
{"alpha.bst": "/workspaces/bravo"},
{
"format-version": BST_WORKSPACE_FORMAT_VERSION,
- "workspaces": {
- "alpha.bst": {
- "prepared": False,
- "path": "/workspaces/bravo",
- "running_files": {},
- }
- },
+ "workspaces": {"alpha.bst": {"prepared": False, "path": "/workspaces/bravo", "running_files": {},}},
},
),
# Test loading version 0 with only one source
@@ -973,30 +855,15 @@ def test_list_unsupported_workspace(cli, datafiles, workspace_cfg):
{"alpha.bst": {0: "/workspaces/bravo"}},
{
"format-version": BST_WORKSPACE_FORMAT_VERSION,
- "workspaces": {
- "alpha.bst": {
- "prepared": False,
- "path": "/workspaces/bravo",
- "running_files": {},
- }
- },
+ "workspaces": {"alpha.bst": {"prepared": False, "path": "/workspaces/bravo", "running_files": {},}},
},
),
# Test loading version 1
(
- {
- "format-version": 1,
- "workspaces": {"alpha.bst": {"path": "/workspaces/bravo"}},
- },
+ {"format-version": 1, "workspaces": {"alpha.bst": {"path": "/workspaces/bravo"}},},
{
"format-version": BST_WORKSPACE_FORMAT_VERSION,
- "workspaces": {
- "alpha.bst": {
- "prepared": False,
- "path": "/workspaces/bravo",
- "running_files": {},
- }
- },
+ "workspaces": {"alpha.bst": {"prepared": False, "path": "/workspaces/bravo", "running_files": {},}},
},
),
# Test loading version 2
@@ -1027,23 +894,11 @@ def test_list_unsupported_workspace(cli, datafiles, workspace_cfg):
(
{
"format-version": 3,
- "workspaces": {
- "alpha.bst": {
- "prepared": True,
- "path": "/workspaces/bravo",
- "running_files": {},
- }
- },
+ "workspaces": {"alpha.bst": {"prepared": True, "path": "/workspaces/bravo", "running_files": {},}},
},
{
"format-version": BST_WORKSPACE_FORMAT_VERSION,
- "workspaces": {
- "alpha.bst": {
- "prepared": True,
- "path": "/workspaces/bravo",
- "running_files": {},
- }
- },
+ "workspaces": {"alpha.bst": {"prepared": True, "path": "/workspaces/bravo", "running_files": {},}},
},
),
],
@@ -1087,14 +942,9 @@ def test_list_supported_workspace(cli, tmpdir, datafiles, workspace_cfg, expecte
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
# Make a change to the workspaces file
- result = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
result.assert_success()
- result = cli.run(
- project=project, args=["workspace", "close", "--remove-dir", element_name]
- )
+ result = cli.run(project=project, args=["workspace", "close", "--remove-dir", element_name])
result.assert_success()
# Check that workspace config is converted correctly if necessary
@@ -1116,9 +966,7 @@ def test_inconsitent_pipeline_message(cli, tmpdir, datafiles):
@pytest.mark.parametrize("strict", [("strict"), ("non-strict")])
def test_cache_key_workspace_in_dependencies(cli, tmpdir, datafiles, strict):
checkout = os.path.join(str(tmpdir), "checkout")
- element_name, project, workspace = open_workspace(
- cli, os.path.join(str(tmpdir), "repo-a"), datafiles, "git"
- )
+ element_name, project, workspace = open_workspace(cli, os.path.join(str(tmpdir), "repo-a"), datafiles, "git")
element_path = os.path.join(project, "elements")
back_dep_element_name = "workspace-test-back-dep.bst"
@@ -1165,10 +1013,7 @@ def test_cache_key_workspace_in_dependencies(cli, tmpdir, datafiles, strict):
assert key_b1 == key_b2
# Checkout the result
- result = cli.run(
- project=project,
- args=["artifact", "checkout", back_dep_element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", back_dep_element_name, "--directory", checkout],)
result.assert_success()
# Check that the pony.conf from the modified workspace exists
@@ -1185,9 +1030,7 @@ def test_multiple_failed_builds(cli, tmpdir, datafiles):
"kind": "manual",
"config": {"configure-commands": ["unknown_command_that_will_fail"]},
}
- element_name, project, _ = open_workspace(
- cli, tmpdir, datafiles, "git", element_attrs=element_config
- )
+ element_name, project, _ = open_workspace(cli, tmpdir, datafiles, "git", element_attrs=element_config)
for _ in range(2):
result = cli.run(project=project, args=["build", element_name])
@@ -1208,12 +1051,7 @@ def test_external_fetch(cli, datafiles, tmpdir_factory, subdir, guess_element):
create_element_size(depend_element, str(datafiles), "elements", [], 1024)
element_name, project, workspace = open_workspace(
- cli,
- tmpdir,
- datafiles,
- "git",
- no_checkout=True,
- element_attrs={"depends": [depend_element]},
+ cli, tmpdir, datafiles, "git", no_checkout=True, element_attrs={"depends": [depend_element]},
)
arg_elm = [element_name] if not guess_element else []
@@ -1227,9 +1065,7 @@ def test_external_fetch(cli, datafiles, tmpdir_factory, subdir, guess_element):
assert cli.get_element_state(str(datafiles), depend_element) == "fetch needed"
# Fetch the workspaced element
- result = cli.run(
- project=project, args=["-C", call_dir, "source", "fetch", *arg_elm]
- )
+ result = cli.run(project=project, args=["-C", call_dir, "source", "fetch", *arg_elm])
result.assert_success()
# Assert that the depended element has now been fetched
@@ -1250,15 +1086,10 @@ def test_external_push_pull(cli, datafiles, tmpdir_factory, guess_element):
cli.configure({"artifacts": {"url": share.repo, "push": True}})
- result = cli.run(
- project=project, args=["-C", workspace, "artifact", "push", *arg_elm]
- )
+ result = cli.run(project=project, args=["-C", workspace, "artifact", "push", *arg_elm])
result.assert_success()
- result = cli.run(
- project=project,
- args=["-C", workspace, "artifact", "pull", "--deps", "all", *arg_elm],
- )
+ result = cli.run(project=project, args=["-C", workspace, "artifact", "pull", "--deps", "all", *arg_elm],)
result.assert_success()
@@ -1280,9 +1111,7 @@ def test_external_track(cli, datafiles, tmpdir_factory, guess_element):
del element_contents.get_sequence("sources").mapping_at(0)["ref"]
_yaml.roundtrip_dump(element_contents, element_file)
- result = cli.run(
- project=project, args=["-C", workspace, "source", "track", *arg_elm]
- )
+ result = cli.run(project=project, args=["-C", workspace, "source", "track", *arg_elm])
result.assert_success()
# Element is not tracked now
@@ -1290,9 +1119,7 @@ def test_external_track(cli, datafiles, tmpdir_factory, guess_element):
assert "ref" not in element_contents.get_sequence("sources").mapping_at(0)
# close the workspace
- result = cli.run(
- project=project, args=["-C", workspace, "workspace", "close", *arg_elm]
- )
+ result = cli.run(project=project, args=["-C", workspace, "workspace", "close", *arg_elm])
result.assert_success()
# and retrack the element
@@ -1311,12 +1138,8 @@ def test_external_open_other(cli, datafiles, tmpdir_factory):
tmpdir1 = tmpdir_factory.mktemp("")
tmpdir2 = tmpdir_factory.mktemp("")
# Making use of the assumption that it's the same project in both invocations of open_workspace
- _, project, alpha_workspace = open_workspace(
- cli, tmpdir1, datafiles, "git", suffix="-alpha"
- )
- beta_element, _, beta_workspace = open_workspace(
- cli, tmpdir2, datafiles, "git", suffix="-beta"
- )
+ _, project, alpha_workspace = open_workspace(cli, tmpdir1, datafiles, "git", suffix="-alpha")
+ beta_element, _, beta_workspace = open_workspace(cli, tmpdir2, datafiles, "git", suffix="-beta")
# Closing the other element first, because I'm too lazy to create an
# element without opening it
@@ -1325,16 +1148,7 @@ def test_external_open_other(cli, datafiles, tmpdir_factory):
result = cli.run(
project=project,
- args=[
- "-C",
- alpha_workspace,
- "workspace",
- "open",
- "--force",
- "--directory",
- beta_workspace,
- beta_element,
- ],
+ args=["-C", alpha_workspace, "workspace", "open", "--force", "--directory", beta_workspace, beta_element,],
)
result.assert_success()
@@ -1345,15 +1159,10 @@ def test_external_close_other(cli, datafiles, tmpdir_factory):
tmpdir1 = tmpdir_factory.mktemp("")
tmpdir2 = tmpdir_factory.mktemp("")
# Making use of the assumption that it's the same project in both invocations of open_workspace
- _, project, alpha_workspace = open_workspace(
- cli, tmpdir1, datafiles, "git", suffix="-alpha"
- )
+ _, project, alpha_workspace = open_workspace(cli, tmpdir1, datafiles, "git", suffix="-alpha")
beta_element, _, _ = open_workspace(cli, tmpdir2, datafiles, "git", suffix="-beta")
- result = cli.run(
- project=project,
- args=["-C", alpha_workspace, "workspace", "close", beta_element],
- )
+ result = cli.run(project=project, args=["-C", alpha_workspace, "workspace", "close", beta_element],)
result.assert_success()
assert "you can no longer run BuildStream" not in result.stderr
@@ -1365,15 +1174,11 @@ def test_external_close_self(cli, datafiles, tmpdir_factory, guess_element):
tmpdir1 = tmpdir_factory.mktemp("")
tmpdir2 = tmpdir_factory.mktemp("")
# Making use of the assumption that it's the same project in both invocations of open_workspace
- alpha_element, project, alpha_workspace = open_workspace(
- cli, tmpdir1, datafiles, "git", suffix="-alpha"
- )
+ alpha_element, project, alpha_workspace = open_workspace(cli, tmpdir1, datafiles, "git", suffix="-alpha")
_, _, _ = open_workspace(cli, tmpdir2, datafiles, "git", suffix="-beta")
arg_elm = [alpha_element] if not guess_element else []
- result = cli.run(
- project=project, args=["-C", alpha_workspace, "workspace", "close", *arg_elm]
- )
+ result = cli.run(project=project, args=["-C", alpha_workspace, "workspace", "close", *arg_elm])
result.assert_success()
assert "you can no longer run BuildStream" in result.stderr
@@ -1383,15 +1188,10 @@ def test_external_reset_other(cli, datafiles, tmpdir_factory):
tmpdir1 = tmpdir_factory.mktemp("")
tmpdir2 = tmpdir_factory.mktemp("")
# Making use of the assumption that it's the same project in both invocations of open_workspace
- _, project, alpha_workspace = open_workspace(
- cli, tmpdir1, datafiles, "git", suffix="-alpha"
- )
+ _, project, alpha_workspace = open_workspace(cli, tmpdir1, datafiles, "git", suffix="-alpha")
beta_element, _, _ = open_workspace(cli, tmpdir2, datafiles, "git", suffix="-beta")
- result = cli.run(
- project=project,
- args=["-C", alpha_workspace, "workspace", "reset", beta_element],
- )
+ result = cli.run(project=project, args=["-C", alpha_workspace, "workspace", "reset", beta_element],)
result.assert_success()
@@ -1402,9 +1202,7 @@ def test_external_reset_self(cli, datafiles, tmpdir, guess_element):
arg_elm = [element] if not guess_element else []
# Command succeeds
- result = cli.run(
- project=project, args=["-C", workspace, "workspace", "reset", *arg_elm]
- )
+ result = cli.run(project=project, args=["-C", workspace, "workspace", "reset", *arg_elm])
result.assert_success()
# Successive commands still work (i.e. .bstproject.yaml hasn't been deleted)
@@ -1430,19 +1228,13 @@ def test_multisource_workspace(cli, datafiles, tmpdir):
element_name = "multisource.bst"
element = {
"kind": "import",
- "sources": [
- {"kind": "local", "path": "files/bin-files"},
- {"kind": "local", "path": "files/dev-files"},
- ],
+ "sources": [{"kind": "local", "path": "files/bin-files"}, {"kind": "local", "path": "files/dev-files"},],
}
element_path = os.path.join(project, "elements", element_name)
_yaml.roundtrip_dump(element, element_path)
workspace_dir = os.path.join(str(tmpdir), "multisource")
- res = cli.run(
- project=project,
- args=["workspace", "open", "multisource.bst", "--directory", workspace_dir],
- )
+ res = cli.run(project=project, args=["workspace", "open", "multisource.bst", "--directory", workspace_dir],)
res.assert_success()
directories = os.listdir(os.path.join(workspace_dir, "usr"))
@@ -1462,14 +1254,8 @@ TEST_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)))
@pytest.mark.parametrize(
["case", "non_workspaced_elements_state"],
[
- (
- "workspaced-build-dep",
- ["waiting", "waiting", "waiting", "waiting", "waiting"],
- ),
- (
- "workspaced-runtime-dep",
- ["buildable", "buildable", "waiting", "waiting", "waiting"],
- ),
+ ("workspaced-build-dep", ["waiting", "waiting", "waiting", "waiting", "waiting"],),
+ ("workspaced-runtime-dep", ["buildable", "buildable", "waiting", "waiting", "waiting"],),
],
)
@pytest.mark.parametrize("strict", [("strict"), ("non-strict")])
@@ -1492,10 +1278,7 @@ def test_build_all(cli, tmpdir, datafiles, case, strict, non_workspaced_elements
cli.configure({"projects": {"test": {"strict": strict_mode}}})
# First open the workspace
- result = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, "elem1.bst"],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--directory", workspace, "elem1.bst"],)
result.assert_success()
# Ensure all elements are waiting build the first
@@ -1508,9 +1291,7 @@ def test_build_all(cli, tmpdir, datafiles, case, strict, non_workspaced_elements
result.assert_success()
# Assert that the target is built
- assert cli.get_element_states(project, all_elements) == {
- elem: "cached" for elem in all_elements
- }
+ assert cli.get_element_states(project, all_elements) == {elem: "cached" for elem in all_elements}
@pytest.mark.datafiles(DATA_DIR)
@@ -1527,9 +1308,7 @@ def test_show_workspace_logs(cli, tmpdir, datafiles, strict):
cli.configure({"projects": {"test": {"strict": strict_mode}}})
# First open the workspace
- result = cli.run(
- project=project, args=["workspace", "open", "--directory", workspace, target]
- )
+ result = cli.run(project=project, args=["workspace", "open", "--directory", workspace, target])
result.assert_success()
# Build the element
diff --git a/tests/integration/artifact.py b/tests/integration/artifact.py
index 67565b803..d66c86ba9 100644
--- a/tests/integration/artifact.py
+++ b/tests/integration/artifact.py
@@ -45,25 +45,17 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project",)
# Dse this really need a sandbox?
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_cache_buildtrees(cli, tmpdir, datafiles):
project = str(datafiles)
element_name = "autotools/amhello.bst"
cwd = str(tmpdir)
# Create artifact shares for pull & push testing
- with create_artifact_share(
- os.path.join(str(tmpdir), "share1")
- ) as share1, create_artifact_share(
+ with create_artifact_share(os.path.join(str(tmpdir), "share1")) as share1, create_artifact_share(
os.path.join(str(tmpdir), "share2")
- ) as share2, create_artifact_share(
- os.path.join(str(tmpdir), "share3")
- ) as share3:
- cli.configure(
- {"artifacts": {"url": share1.repo, "push": True}, "cachedir": str(tmpdir)}
- )
+ ) as share2, create_artifact_share(os.path.join(str(tmpdir), "share3")) as share3:
+ cli.configure({"artifacts": {"url": share1.repo, "push": True}, "cachedir": str(tmpdir)})
# Build autotools element with the default behavior of caching buildtrees
# only when necessary. The artifact should be successfully pushed to the share1 remote
@@ -86,10 +78,7 @@ def test_cache_buildtrees(cli, tmpdir, datafiles):
shutil.rmtree(os.path.join(str(tmpdir), "cas"))
shutil.rmtree(os.path.join(str(tmpdir), "artifacts"))
assert cli.get_element_state(project, element_name) != "cached"
- result = cli.run(
- project=project,
- args=["--pull-buildtrees", "artifact", "pull", element_name],
- )
+ result = cli.run(project=project, args=["--pull-buildtrees", "artifact", "pull", element_name],)
assert element_name in result.get_pulled_elements()
with cli.artifact.extract_buildtree(cwd, cwd, artifact_name) as buildtreedir:
assert not buildtreedir
@@ -109,13 +98,8 @@ def test_cache_buildtrees(cli, tmpdir, datafiles):
# Repeat building the artifacts, this time with cache-buildtrees set to
# 'always' via the cli, as such the buildtree dir should not be empty
- cli.configure(
- {"artifacts": {"url": share2.repo, "push": True}, "cachedir": str(tmpdir)}
- )
- result = cli.run(
- project=project,
- args=["--cache-buildtrees", "always", "build", element_name],
- )
+ cli.configure({"artifacts": {"url": share2.repo, "push": True}, "cachedir": str(tmpdir)})
+ result = cli.run(project=project, args=["--cache-buildtrees", "always", "build", element_name],)
assert result.exit_code == 0
assert cli.get_element_state(project, element_name) == "cached"
assert share2.get_artifact(cli.get_artifact_name(project, "test", element_name))
@@ -130,10 +114,7 @@ def test_cache_buildtrees(cli, tmpdir, datafiles):
shutil.rmtree(os.path.join(str(tmpdir), "cas"))
shutil.rmtree(os.path.join(str(tmpdir), "artifacts"))
assert cli.get_element_state(project, element_name) != "cached"
- result = cli.run(
- project=project,
- args=["--pull-buildtrees", "artifact", "pull", element_name],
- )
+ result = cli.run(project=project, args=["--pull-buildtrees", "artifact", "pull", element_name],)
assert element_name in result.get_pulled_elements()
with cli.artifact.extract_buildtree(cwd, cwd, artifact_name) as buildtreedir:
assert os.path.isdir(buildtreedir)
diff --git a/tests/integration/autotools.py b/tests/integration/autotools.py
index d270b2a77..47eb9cdd8 100644
--- a/tests/integration/autotools.py
+++ b/tests/integration/autotools.py
@@ -18,9 +18,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
# Test that an autotools build 'works' - we use the autotools sample
# amhello project for this.
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_autotools_build(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -29,10 +27,7 @@ def test_autotools_build(cli, datafiles):
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == 0
assert_contains(
@@ -53,9 +48,7 @@ def test_autotools_build(cli, datafiles):
# Test that an autotools build 'works' - we use the autotools sample
# amhello project for this.
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
def test_autotools_confroot_build(cli, datafiles):
project = str(datafiles)
@@ -65,10 +58,7 @@ def test_autotools_confroot_build(cli, datafiles):
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == 0
assert_contains(
@@ -88,9 +78,7 @@ def test_autotools_confroot_build(cli, datafiles):
# Test running an executable built with autotools
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_autotools_run(cli, datafiles):
project = str(datafiles)
element_name = "autotools/amhello.bst"
diff --git a/tests/integration/build-uid.py b/tests/integration/build-uid.py
index 367cf0248..c38dc4514 100644
--- a/tests/integration/build-uid.py
+++ b/tests/integration/build-uid.py
@@ -14,8 +14,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.skipif(
- not IS_LINUX or HAVE_SANDBOX != "bwrap",
- reason="Only available on linux with bubblewrap",
+ not IS_LINUX or HAVE_SANDBOX != "bwrap", reason="Only available on linux with bubblewrap",
)
@pytest.mark.datafiles(DATA_DIR)
def test_build_uid_overridden(cli, datafiles):
@@ -27,15 +26,12 @@ def test_build_uid_overridden(cli, datafiles):
"sandbox": {"build-uid": 800, "build-gid": 900},
}
- result = cli.run_project_config(
- project=project, project_config=project_config, args=["build", element_name]
- )
+ result = cli.run_project_config(project=project, project_config=project_config, args=["build", element_name])
assert result.exit_code == 0
@pytest.mark.skipif(
- not IS_LINUX or HAVE_SANDBOX != "bwrap",
- reason="Only available on linux with bubbelwrap",
+ not IS_LINUX or HAVE_SANDBOX != "bwrap", reason="Only available on linux with bubbelwrap",
)
@pytest.mark.datafiles(DATA_DIR)
def test_build_uid_in_project(cli, datafiles):
@@ -47,16 +43,12 @@ def test_build_uid_in_project(cli, datafiles):
"sandbox": {"build-uid": 1023, "build-gid": 3490},
}
- result = cli.run_project_config(
- project=project, project_config=project_config, args=["build", element_name]
- )
+ result = cli.run_project_config(project=project, project_config=project_config, args=["build", element_name])
assert result.exit_code == 0
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- HAVE_SANDBOX != "bwrap", reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(HAVE_SANDBOX != "bwrap", reason="Only available with a functioning sandbox")
def test_build_uid_default(cli, datafiles):
project = str(datafiles)
element_name = "build-uid/build-uid-default.bst"
diff --git a/tests/integration/cachedfail.py b/tests/integration/cachedfail.py
index 366346e2b..142e8e15f 100644
--- a/tests/integration/cachedfail.py
+++ b/tests/integration/cachedfail.py
@@ -35,9 +35,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_build_checkout_cached_fail(cli, datafiles):
project = str(datafiles)
element_path = os.path.join(project, "elements", "element.bst")
@@ -59,10 +57,7 @@ def test_build_checkout_cached_fail(cli, datafiles):
assert cli.get_element_state(project, "element.bst") == "failed"
# Now check it out
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "element.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "element.bst", "--directory", checkout],)
result.assert_success()
# Check that the checkout contains the file created before failure
@@ -71,9 +66,7 @@ def test_build_checkout_cached_fail(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_build_depend_on_cached_fail(cli, datafiles):
project = str(datafiles)
dep_path = os.path.join(project, "elements", "dep.bst")
@@ -87,10 +80,7 @@ def test_build_depend_on_cached_fail(cli, datafiles):
_yaml.roundtrip_dump(dep, dep_path)
target = {
"kind": "script",
- "depends": [
- {"filename": "base.bst", "type": "build",},
- {"filename": "dep.bst", "type": "build",},
- ],
+ "depends": [{"filename": "base.bst", "type": "build",}, {"filename": "dep.bst", "type": "build",},],
"config": {"commands": ["test -e /foo",],},
}
_yaml.roundtrip_dump(target, target_path)
@@ -110,9 +100,7 @@ def test_build_depend_on_cached_fail(cli, datafiles):
assert cli.get_element_state(project, "target.bst") == "waiting"
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("on_error", ("continue", "quit"))
def test_push_cached_fail(cli, tmpdir, datafiles, on_error):
@@ -142,10 +130,7 @@ def test_push_cached_fail(cli, tmpdir, datafiles, on_error):
)
# Build the element, continuing to finish active jobs on error.
- result = cli.run(
- project=project,
- args=["--on-error={}".format(on_error), "build", "element.bst"],
- )
+ result = cli.run(project=project, args=["--on-error={}".format(on_error), "build", "element.bst"],)
result.assert_main_error(ErrorDomain.STREAM, None)
# This element should have failed
@@ -154,9 +139,7 @@ def test_push_cached_fail(cli, tmpdir, datafiles, on_error):
assert share.get_artifact(cli.get_artifact_name(project, "test", "element.bst"))
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("on_error", ("continue", "quit"))
def test_push_failed_missing_shell(cli, tmpdir, datafiles, on_error):
@@ -190,10 +173,7 @@ def test_push_failed_missing_shell(cli, tmpdir, datafiles, on_error):
)
# Build the element, continuing to finish active jobs on error.
- result = cli.run(
- project=project,
- args=["--on-error={}".format(on_error), "build", "element.bst"],
- )
+ result = cli.run(project=project, args=["--on-error={}".format(on_error), "build", "element.bst"],)
result.assert_main_error(ErrorDomain.STREAM, None)
# This element should have failed
@@ -202,9 +182,7 @@ def test_push_failed_missing_shell(cli, tmpdir, datafiles, on_error):
assert share.get_artifact(cli.get_artifact_name(project, "test", "element.bst"))
-@pytest.mark.skipif(
- HAVE_SANDBOX != "bwrap", reason="Only available with bubblewrap on Linux"
-)
+@pytest.mark.skipif(HAVE_SANDBOX != "bwrap", reason="Only available with bubblewrap on Linux")
@pytest.mark.datafiles(DATA_DIR)
def test_host_tools_errors_are_not_cached(cli, datafiles, tmp_path):
# Create symlink to buildbox-casd to work with custom PATH
diff --git a/tests/integration/cmake.py b/tests/integration/cmake.py
index 14ee7c967..50e0bf296 100644
--- a/tests/integration/cmake.py
+++ b/tests/integration/cmake.py
@@ -16,9 +16,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_cmake_build(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -27,19 +25,14 @@ def test_cmake_build(cli, datafiles):
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == 0
assert_contains(checkout, ["/usr", "/usr/bin", "/usr/bin/hello"])
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
def test_cmake_confroot_build(cli, datafiles):
project = str(datafiles)
@@ -49,19 +42,14 @@ def test_cmake_confroot_build(cli, datafiles):
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == 0
assert_contains(checkout, ["/usr", "/usr/bin", "/usr/bin/hello"])
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_cmake_run(cli, datafiles):
project = str(datafiles)
element_name = "cmake/cmakehello.bst"
diff --git a/tests/integration/compose.py b/tests/integration/compose.py
index 2d68327e4..55d6674f9 100644
--- a/tests/integration/compose.py
+++ b/tests/integration/compose.py
@@ -107,9 +107,7 @@ def create_compose_element(name, path, config=None):
),
],
)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_compose_include(cli, datafiles, include_domains, exclude_domains, expected):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -127,10 +125,7 @@ def test_compose_include(cli, datafiles, include_domains, exclude_domains, expec
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == 0
assert set(walk_dir(checkout)) == set(expected)
diff --git a/tests/integration/filter.py b/tests/integration/filter.py
index 6e95915ee..ee20ceb25 100644
--- a/tests/integration/filter.py
+++ b/tests/integration/filter.py
@@ -17,9 +17,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.datafiles(os.path.join(DATA_DIR))
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_filter_pass_integration(datafiles, cli):
project = str(datafiles)
@@ -31,15 +29,7 @@ def test_filter_pass_integration(datafiles, cli):
checkout_dir = os.path.join(project, "filter")
result = cli.run(
project=project,
- args=[
- "artifact",
- "checkout",
- "--integrate",
- "--hardlinks",
- "--directory",
- checkout_dir,
- "filter/filter.bst",
- ],
+ args=["artifact", "checkout", "--integrate", "--hardlinks", "--directory", checkout_dir, "filter/filter.bst",],
)
result.assert_success()
diff --git a/tests/integration/import.py b/tests/integration/import.py
index b7f056bac..4faaba687 100644
--- a/tests/integration/import.py
+++ b/tests/integration/import.py
@@ -37,12 +37,7 @@ def create_import_element(name, path, source, target, source_path):
"/",
"/output",
"files/import-source",
- [
- "/output",
- "/output/test.txt",
- "/output/subdir",
- "/output/subdir/test.txt",
- ],
+ ["/output", "/output/test.txt", "/output/subdir", "/output/subdir/test.txt",],
),
],
)
@@ -58,8 +53,7 @@ def test_import(cli, datafiles, source, target, path, expected):
assert res.exit_code == 0
cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
+ project=project, args=["artifact", "checkout", element_name, "--directory", checkout],
)
assert res.exit_code == 0
diff --git a/tests/integration/make.py b/tests/integration/make.py
index 78f4ba8d7..1b303f4b0 100644
--- a/tests/integration/make.py
+++ b/tests/integration/make.py
@@ -18,9 +18,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
# Test that a make build 'works' - we use the make sample
# makehello project for this.
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_make_build(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -29,10 +27,7 @@ def test_make_build(cli, datafiles):
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == 0
assert_contains(checkout, ["/usr", "/usr/bin", "/usr/bin/hello"])
@@ -40,9 +35,7 @@ def test_make_build(cli, datafiles):
# Test running an executable built with make
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_make_run(cli, datafiles):
project = str(datafiles)
element_name = "make/makehello.bst"
diff --git a/tests/integration/manual.py b/tests/integration/manual.py
index 8db8b9671..b6b35600c 100644
--- a/tests/integration/manual.py
+++ b/tests/integration/manual.py
@@ -29,9 +29,7 @@ def create_manual_element(name, path, config, variables, environment):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_manual_element(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -44,10 +42,7 @@ def test_manual_element(cli, datafiles):
{
"configure-commands": ["echo './configure' >> test"],
"build-commands": ["echo 'make' >> test"],
- "install-commands": [
- "echo 'make install' >> test",
- "cp test %{install-root}",
- ],
+ "install-commands": ["echo 'make install' >> test", "cp test %{install-root}",],
"strip-commands": ["echo 'strip' >> %{install-root}/test"],
},
{},
@@ -58,8 +53,7 @@ def test_manual_element(cli, datafiles):
assert res.exit_code == 0
cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
+ project=project, args=["artifact", "checkout", element_name, "--directory", checkout],
)
assert res.exit_code == 0
@@ -77,9 +71,7 @@ strip
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_manual_element_environment(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -87,19 +79,14 @@ def test_manual_element_environment(cli, datafiles):
element_name = "import/import.bst"
create_manual_element(
- element_name,
- element_path,
- {"install-commands": ["echo $V >> test", "cp test %{install-root}"]},
- {},
- {"V": 2},
+ element_name, element_path, {"install-commands": ["echo $V >> test", "cp test %{install-root}"]}, {}, {"V": 2},
)
res = cli.run(project=project, args=["build", element_name])
assert res.exit_code == 0
cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
+ project=project, args=["artifact", "checkout", element_name, "--directory", checkout],
)
assert res.exit_code == 0
@@ -110,9 +97,7 @@ def test_manual_element_environment(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_manual_element_noparallel(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -122,13 +107,7 @@ def test_manual_element_noparallel(cli, datafiles):
create_manual_element(
element_name,
element_path,
- {
- "install-commands": [
- "echo $MAKEFLAGS >> test",
- "echo $V >> test",
- "cp test %{install-root}",
- ]
- },
+ {"install-commands": ["echo $MAKEFLAGS >> test", "echo $V >> test", "cp test %{install-root}",]},
{"notparallel": True},
{"MAKEFLAGS": "-j%{max-jobs} -Wall", "V": 2},
)
@@ -137,8 +116,7 @@ def test_manual_element_noparallel(cli, datafiles):
assert res.exit_code == 0
cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
+ project=project, args=["artifact", "checkout", element_name, "--directory", checkout],
)
assert res.exit_code == 0
@@ -154,9 +132,7 @@ def test_manual_element_noparallel(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_manual_element_logging(cli, datafiles):
project = str(datafiles)
element_path = os.path.join(project, "elements")
diff --git a/tests/integration/messages.py b/tests/integration/messages.py
index 8210664e5..66696772e 100644
--- a/tests/integration/messages.py
+++ b/tests/integration/messages.py
@@ -37,9 +37,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project",)
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_disable_message_lines(cli, datafiles):
project = str(datafiles)
element_path = os.path.join(project, "elements")
@@ -51,9 +49,7 @@ def test_disable_message_lines(cli, datafiles):
"config": {"build-commands": ['echo "Silly message"'], "strip-commands": []},
}
- os.makedirs(
- os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True
- )
+ os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True)
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
# First we check that we get the "Silly message"
@@ -63,17 +59,13 @@ def test_disable_message_lines(cli, datafiles):
# Let's now build it again, but with --message-lines 0
cli.remove_artifact_from_cache(project, element_name)
- result = cli.run(
- project=project, args=["--message-lines", "0", "build", element_name]
- )
+ result = cli.run(project=project, args=["--message-lines", "0", "build", element_name])
result.assert_success()
assert "Message contains " not in result.stderr
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_disable_error_lines(cli, datafiles):
project = str(datafiles)
element_path = os.path.join(project, "elements")
@@ -82,28 +74,19 @@ def test_disable_error_lines(cli, datafiles):
element = {
"kind": "manual",
"depends": [{"filename": "base.bst"}],
- "config": {
- "build-commands": ["This is a syntax error > >"],
- "strip-commands": [],
- },
+ "config": {"build-commands": ["This is a syntax error > >"], "strip-commands": [],},
}
- os.makedirs(
- os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True
- )
+ os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True)
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
# First we check that we get the syntax error
- result = cli.run(
- project=project, args=["--error-lines", "0", "build", element_name]
- )
+ result = cli.run(project=project, args=["--error-lines", "0", "build", element_name])
result.assert_main_error(ErrorDomain.STREAM, None)
assert "This is a syntax error" in result.stderr
# Let's now build it again, but with --error-lines 0
cli.remove_artifact_from_cache(project, element_name)
- result = cli.run(
- project=project, args=["--error-lines", "0", "build", element_name]
- )
+ result = cli.run(project=project, args=["--error-lines", "0", "build", element_name])
result.assert_main_error(ErrorDomain.STREAM, None)
assert "Printing the last" not in result.stderr
diff --git a/tests/integration/pip_element.py b/tests/integration/pip_element.py
index d85cb5f03..5ad6040a5 100644
--- a/tests/integration/pip_element.py
+++ b/tests/integration/pip_element.py
@@ -21,9 +21,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_pip_build(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -42,31 +40,23 @@ def test_pip_build(cli, datafiles):
}
],
}
- os.makedirs(
- os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True
- )
+ os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True)
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == 0
assert_contains(
- checkout,
- ["/usr", "/usr/lib", "/usr/bin", "/usr/bin/hello", "/usr/lib/python3.6"],
+ checkout, ["/usr", "/usr/lib", "/usr/bin", "/usr/bin/hello", "/usr/lib/python3.6"],
)
# Test running an executable built with pip
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_pip_run(cli, datafiles):
# Create and build our test element
test_pip_build(cli, datafiles)
@@ -80,9 +70,7 @@ def test_pip_run(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_pip_element_should_install_pip_deps(cli, datafiles, setup_pypi_repo):
project = str(datafiles)
elements_path = os.path.join(project, "elements")
@@ -104,9 +92,7 @@ def test_pip_element_should_install_pip_deps(cli, datafiles, setup_pypi_repo):
# set up directories
pypi_repo = os.path.join(project, "files", "pypi-repo")
os.makedirs(pypi_repo, exist_ok=True)
- os.makedirs(
- os.path.dirname(os.path.join(elements_path, element_name)), exist_ok=True
- )
+ os.makedirs(os.path.dirname(os.path.join(elements_path, element_name)), exist_ok=True)
setup_pypi_repo(mock_packages, pypi_repo)
# create pip element
@@ -121,11 +107,7 @@ def test_pip_element_should_install_pip_deps(cli, datafiles, setup_pypi_repo):
# FIXME: remove hardcoded ref once issue #1010 is closed
"ref": "ad96570b552498807abec33c06210bf68378d854ced6753b77916c5ed517610d",
},
- {
- "kind": "pip",
- "url": "file://{}".format(os.path.realpath(pypi_repo)),
- "packages": [myreqs_packages],
- },
+ {"kind": "pip", "url": "file://{}".format(os.path.realpath(pypi_repo)), "packages": [myreqs_packages],},
],
}
_yaml.roundtrip_dump(element, os.path.join(elements_path, element_name))
@@ -138,13 +120,8 @@ def test_pip_element_should_install_pip_deps(cli, datafiles, setup_pypi_repo):
# get installed packages in sandbox
installed_packages = set(
- cli.run(
- project=project, args=["shell", element_name, "pip3", "freeze"]
- ).output.split("\n")
+ cli.run(project=project, args=["shell", element_name, "pip3", "freeze"]).output.split("\n")
)
# compare with packages that are expected to be installed
- pip_source_packages = {
- package.replace("_", "-") + "==0.1"
- for package in dependencies + [myreqs_packages]
- }
+ pip_source_packages = {package.replace("_", "-") + "==0.1" for package in dependencies + [myreqs_packages]}
assert pip_source_packages.issubset(installed_packages)
diff --git a/tests/integration/pip_source.py b/tests/integration/pip_source.py
index bc9a4d94c..3a5d64632 100644
--- a/tests/integration/pip_source.py
+++ b/tests/integration/pip_source.py
@@ -48,16 +48,10 @@ def test_pip_source_import_packages(cli, datafiles, setup_pypi_repo):
"kind": "import",
"sources": [
{"kind": "local", "path": "files/pip-source"},
- {
- "kind": "pip",
- "url": "file://{}".format(os.path.realpath(pypi_repo)),
- "packages": [myreqs_packages],
- },
+ {"kind": "pip", "url": "file://{}".format(os.path.realpath(pypi_repo)), "packages": [myreqs_packages],},
],
}
- os.makedirs(
- os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True
- )
+ os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True)
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
result = cli.run(project=project, args=["source", "track", element_name])
@@ -66,10 +60,7 @@ def test_pip_source_import_packages(cli, datafiles, setup_pypi_repo):
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == 0
assert_contains(
@@ -124,9 +115,7 @@ def test_pip_source_import_requirements_files(cli, datafiles, setup_pypi_repo):
},
],
}
- os.makedirs(
- os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True
- )
+ os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True)
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
result = cli.run(project=project, args=["source", "track", element_name])
@@ -135,10 +124,7 @@ def test_pip_source_import_requirements_files(cli, datafiles, setup_pypi_repo):
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == 0
assert_contains(
@@ -158,9 +144,7 @@ def test_pip_source_import_requirements_files(cli, datafiles, setup_pypi_repo):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_pip_source_build(cli, datafiles, setup_pypi_repo):
project = str(datafiles)
element_path = os.path.join(project, "elements")
@@ -203,9 +187,7 @@ def test_pip_source_build(cli, datafiles, setup_pypi_repo):
]
},
}
- os.makedirs(
- os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True
- )
+ os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True)
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
result = cli.run(project=project, args=["source", "track", element_name])
diff --git a/tests/integration/pullbuildtrees.py b/tests/integration/pullbuildtrees.py
index f0cf22f59..e56823f31 100644
--- a/tests/integration/pullbuildtrees.py
+++ b/tests/integration/pullbuildtrees.py
@@ -41,22 +41,16 @@ def default_state(cli, tmpdir, share):
# directory of an element.
@pytest.mark.integration
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_pullbuildtrees(cli2, tmpdir, datafiles):
project = str(datafiles)
element_name = "autotools/amhello.bst"
cwd = str(tmpdir)
# Create artifact shares for pull & push testing
- with create_artifact_share(
- os.path.join(str(tmpdir), "share1")
- ) as share1, create_artifact_share(
+ with create_artifact_share(os.path.join(str(tmpdir), "share1")) as share1, create_artifact_share(
os.path.join(str(tmpdir), "share2")
- ) as share2, create_artifact_share(
- os.path.join(str(tmpdir), "share3")
- ) as share3:
+ ) as share2, create_artifact_share(os.path.join(str(tmpdir), "share3")) as share3:
cli2.configure(
{
"artifacts": {"url": share1.repo, "push": True},
@@ -69,9 +63,7 @@ def test_pullbuildtrees(cli2, tmpdir, datafiles):
result = cli2.run(project=project, args=["build", element_name])
assert result.exit_code == 0
assert cli2.get_element_state(project, element_name) == "cached"
- assert share1.get_artifact(
- cli2.get_artifact_name(project, "test", element_name)
- )
+ assert share1.get_artifact(cli2.get_artifact_name(project, "test", element_name))
default_state(cli2, tmpdir, share1)
# Pull artifact with default config, assert that pulling again
@@ -94,10 +86,7 @@ def test_pullbuildtrees(cli2, tmpdir, datafiles):
artifact_name = cli2.get_artifact_name(project, "test", element_name)
with cli2.artifact.extract_buildtree(cwd, cwd, artifact_name) as buildtreedir:
assert not buildtreedir
- result = cli2.run(
- project=project,
- args=["--pull-buildtrees", "artifact", "pull", element_name],
- )
+ result = cli2.run(project=project, args=["--pull-buildtrees", "artifact", "pull", element_name],)
assert element_name in result.get_pulled_elements()
with cli2.artifact.extract_buildtree(cwd, cwd, artifact_name) as buildtreedir:
assert os.path.isdir(buildtreedir)
@@ -111,20 +100,14 @@ def test_pullbuildtrees(cli2, tmpdir, datafiles):
assert element_name in result.get_pulled_elements()
result = cli2.run(project=project, args=["artifact", "pull", element_name])
assert element_name not in result.get_pulled_elements()
- result = cli2.run(
- project=project,
- args=["--pull-buildtrees", "artifact", "pull", element_name],
- )
+ result = cli2.run(project=project, args=["--pull-buildtrees", "artifact", "pull", element_name],)
assert element_name not in result.get_pulled_elements()
default_state(cli2, tmpdir, share1)
# Pull artifact with default config and buildtrees cli flag set, then assert
# that pulling with pullbuildtrees set in user config doesn't create a pull
# job.
- result = cli2.run(
- project=project,
- args=["--pull-buildtrees", "artifact", "pull", element_name],
- )
+ result = cli2.run(project=project, args=["--pull-buildtrees", "artifact", "pull", element_name],)
assert element_name in result.get_pulled_elements()
cli2.configure({"cache": {"pull-buildtrees": True}})
result = cli2.run(project=project, args=["artifact", "pull", element_name])
@@ -140,25 +123,18 @@ def test_pullbuildtrees(cli2, tmpdir, datafiles):
cli2.configure({"artifacts": {"url": share2.repo, "push": True}})
result = cli2.run(project=project, args=["artifact", "push", element_name])
assert element_name not in result.get_pushed_elements()
- assert not share2.get_artifact(
- cli2.get_artifact_name(project, "test", element_name)
- )
+ assert not share2.get_artifact(cli2.get_artifact_name(project, "test", element_name))
# Assert that after pulling the missing buildtree the element artifact can be
# successfully pushed to the remote. This will attempt to pull the buildtree
# from share1 and then a 'complete' push to share2
cli2.configure({"artifacts": {"url": share1.repo, "push": False}})
- result = cli2.run(
- project=project,
- args=["--pull-buildtrees", "artifact", "pull", element_name],
- )
+ result = cli2.run(project=project, args=["--pull-buildtrees", "artifact", "pull", element_name],)
assert element_name in result.get_pulled_elements()
cli2.configure({"artifacts": {"url": share2.repo, "push": True}})
result = cli2.run(project=project, args=["artifact", "push", element_name])
assert element_name in result.get_pushed_elements()
- assert share2.get_artifact(
- cli2.get_artifact_name(project, "test", element_name)
- )
+ assert share2.get_artifact(cli2.get_artifact_name(project, "test", element_name))
default_state(cli2, tmpdir, share1)
# Assert that bst artifact push will automatically attempt to pull a missing buildtree
@@ -168,50 +144,31 @@ def test_pullbuildtrees(cli2, tmpdir, datafiles):
result = cli2.run(project=project, args=["artifact", "pull", element_name])
assert element_name in result.get_pulled_elements()
cli2.configure({"artifacts": {"url": share3.repo, "push": True}})
- result = cli2.run(
- project=project,
- args=["--pull-buildtrees", "artifact", "push", element_name],
- )
+ result = cli2.run(project=project, args=["--pull-buildtrees", "artifact", "push", element_name],)
assert "Attempting to fetch missing artifact buildtrees" in result.stderr
assert element_name not in result.get_pulled_elements()
with cli2.artifact.extract_buildtree(cwd, cwd, artifact_name) as buildtreedir:
assert not buildtreedir
assert element_name not in result.get_pushed_elements()
- assert not share3.get_artifact(
- cli2.get_artifact_name(project, "test", element_name)
- )
+ assert not share3.get_artifact(cli2.get_artifact_name(project, "test", element_name))
# Assert that if we add an extra remote that has the buildtree artfact cached, bst artifact push will
# automatically attempt to pull it and will be successful, leading to the full artifact being pushed
# to the empty share3. This gives the ability to attempt push currently partial artifacts to a remote,
# without exlipictly requiring a bst artifact pull.
- cli2.configure(
- {
- "artifacts": [
- {"url": share1.repo, "push": False},
- {"url": share3.repo, "push": True},
- ]
- }
- )
- result = cli2.run(
- project=project,
- args=["--pull-buildtrees", "artifact", "push", element_name],
- )
+ cli2.configure({"artifacts": [{"url": share1.repo, "push": False}, {"url": share3.repo, "push": True},]})
+ result = cli2.run(project=project, args=["--pull-buildtrees", "artifact", "push", element_name],)
assert "Attempting to fetch missing artifact buildtrees" in result.stderr
assert element_name in result.get_pulled_elements()
with cli2.artifact.extract_buildtree(cwd, cwd, artifact_name) as buildtreedir:
assert os.path.isdir(buildtreedir)
assert element_name in result.get_pushed_elements()
- assert share3.get_artifact(
- cli2.get_artifact_name(project, "test", element_name)
- )
+ assert share3.get_artifact(cli2.get_artifact_name(project, "test", element_name))
# Ensure that only valid pull-buildtrees boolean options make it through the loading
# process.
-@pytest.mark.parametrize(
- "value,success", [(True, True), (False, True), ("pony", False), ("1", False)]
-)
+@pytest.mark.parametrize("value,success", [(True, True), (False, True), ("pony", False), ("1", False)])
@pytest.mark.datafiles(DATA_DIR)
def test_invalid_cache_pullbuildtrees(cli, datafiles, value, success):
project = str(datafiles)
diff --git a/tests/integration/sandbox-bwrap.py b/tests/integration/sandbox-bwrap.py
index 6f33275e9..0c84ba888 100644
--- a/tests/integration/sandbox-bwrap.py
+++ b/tests/integration/sandbox-bwrap.py
@@ -33,8 +33,7 @@ def test_sandbox_bwrap_cleanup_build(cli, datafiles):
@pytest.mark.skipif(HAVE_SANDBOX != "bwrap", reason="Only available with bubblewrap")
@pytest.mark.skipif(
- not HAVE_BWRAP_JSON_STATUS,
- reason="Only available with bubblewrap supporting --json-status-fd",
+ not HAVE_BWRAP_JSON_STATUS, reason="Only available with bubblewrap supporting --json-status-fd",
)
@pytest.mark.datafiles(DATA_DIR)
def test_sandbox_bwrap_distinguish_setup_error(cli, datafiles):
@@ -42,9 +41,7 @@ def test_sandbox_bwrap_distinguish_setup_error(cli, datafiles):
element_name = "sandbox-bwrap/non-executable-shell.bst"
result = cli.run(project=project, args=["build", element_name])
- result.assert_task_error(
- error_domain=ErrorDomain.SANDBOX, error_reason="bwrap-sandbox-fail"
- )
+ result.assert_task_error(error_domain=ErrorDomain.SANDBOX, error_reason="bwrap-sandbox-fail")
@pytest.mark.skipif(HAVE_SANDBOX != "bwrap", reason="Only available with bubblewrap")
@@ -58,10 +55,5 @@ def test_sandbox_bwrap_return_subprocess(cli, datafiles):
)
result = cli.run(project=project, args=["build", element_name])
- result.assert_task_error(
- error_domain=ErrorDomain.SANDBOX, error_reason="command-failed"
- )
- assert (
- "sandbox-bwrap/command-exit-42.bst|Command failed with exitcode 42"
- in result.stderr
- )
+ result.assert_task_error(error_domain=ErrorDomain.SANDBOX, error_reason="command-failed")
+ assert "sandbox-bwrap/command-exit-42.bst|Command failed with exitcode 42" in result.stderr
diff --git a/tests/integration/script.py b/tests/integration/script.py
index 4f44feae4..964cab384 100644
--- a/tests/integration/script.py
+++ b/tests/integration/script.py
@@ -33,9 +33,7 @@ def create_script_element(name, path, config=None, variables=None):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_script(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -45,21 +43,13 @@ def test_script(cli, datafiles):
create_script_element(
element_name,
element_path,
- config={
- "commands": [
- "mkdir -p %{install-root}",
- "echo 'Hi' > %{install-root}/test",
- ],
- },
+ config={"commands": ["mkdir -p %{install-root}", "echo 'Hi' > %{install-root}/test",],},
)
res = cli.run(project=project, args=["build", element_name])
assert res.exit_code == 0
- res = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ res = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert res.exit_code == 0
with open(os.path.join(checkout, "test")) as f:
@@ -69,9 +59,7 @@ def test_script(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_script_root(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -96,10 +84,7 @@ def test_script_root(cli, datafiles):
res = cli.run(project=project, args=["build", element_name])
assert res.exit_code == 0
- res = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ res = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert res.exit_code == 0
with open(os.path.join(checkout, "test")) as f:
@@ -109,9 +94,7 @@ def test_script_root(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
def test_script_no_root(cli, datafiles):
project = str(datafiles)
@@ -138,9 +121,7 @@ def test_script_no_root(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
def test_script_cwd(cli, datafiles):
project = str(datafiles)
@@ -151,19 +132,14 @@ def test_script_cwd(cli, datafiles):
create_script_element(
element_name,
element_path,
- config={
- "commands": ["echo 'test' > test", "cp /buildstream/test %{install-root}"],
- },
+ config={"commands": ["echo 'test' > test", "cp /buildstream/test %{install-root}"],},
variables={"cwd": "/buildstream"},
)
res = cli.run(project=project, args=["build", element_name])
assert res.exit_code == 0
- res = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ res = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert res.exit_code == 0
with open(os.path.join(checkout, "test")) as f:
@@ -173,9 +149,7 @@ def test_script_cwd(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_script_layout(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -185,8 +159,7 @@ def test_script_layout(cli, datafiles):
assert res.exit_code == 0
cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
+ project=project, args=["artifact", "checkout", element_name, "--directory", checkout],
)
assert res.exit_code == 0
@@ -197,9 +170,7 @@ def test_script_layout(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_regression_cache_corruption(cli, datafiles):
project = str(datafiles)
checkout_original = os.path.join(cli.directory, "checkout-original")
@@ -211,14 +182,7 @@ def test_regression_cache_corruption(cli, datafiles):
assert res.exit_code == 0
res = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- canary_element_name,
- "--directory",
- checkout_original,
- ],
+ project=project, args=["artifact", "checkout", canary_element_name, "--directory", checkout_original,],
)
assert res.exit_code == 0
@@ -228,16 +192,7 @@ def test_regression_cache_corruption(cli, datafiles):
res = cli.run(project=project, args=["build", element_name])
assert res.exit_code == 0
- res = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- canary_element_name,
- "--directory",
- checkout_after,
- ],
- )
+ res = cli.run(project=project, args=["artifact", "checkout", canary_element_name, "--directory", checkout_after,],)
assert res.exit_code == 0
with open(os.path.join(checkout_after, "canary")) as f:
@@ -245,9 +200,7 @@ def test_regression_cache_corruption(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_regression_tmpdir(cli, datafiles):
project = str(datafiles)
element_name = "script/tmpdir.bst"
@@ -257,9 +210,7 @@ def test_regression_tmpdir(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_regression_cache_corruption_2(cli, datafiles):
project = str(datafiles)
checkout_original = os.path.join(cli.directory, "checkout-original")
@@ -271,14 +222,7 @@ def test_regression_cache_corruption_2(cli, datafiles):
assert res.exit_code == 0
res = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- canary_element_name,
- "--directory",
- checkout_original,
- ],
+ project=project, args=["artifact", "checkout", canary_element_name, "--directory", checkout_original,],
)
assert res.exit_code == 0
@@ -288,16 +232,7 @@ def test_regression_cache_corruption_2(cli, datafiles):
res = cli.run(project=project, args=["build", element_name])
assert res.exit_code == 0
- res = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- canary_element_name,
- "--directory",
- checkout_after,
- ],
- )
+ res = cli.run(project=project, args=["artifact", "checkout", canary_element_name, "--directory", checkout_after,],)
assert res.exit_code == 0
with open(os.path.join(checkout_after, "canary")) as f:
diff --git a/tests/integration/shell.py b/tests/integration/shell.py
index 124770aad..e03b38563 100644
--- a/tests/integration/shell.py
+++ b/tests/integration/shell.py
@@ -32,13 +32,9 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
# element (str): The element to build and run a shell with
# isolate (bool): Whether to pass --isolate to `bst shell`
#
-def execute_shell(
- cli, project, command, *, config=None, mount=None, element="base.bst", isolate=False
-):
+def execute_shell(cli, project, command, *, config=None, mount=None, element="base.bst", isolate=False):
# Ensure the element is built
- result = cli.run_project_config(
- project=project, project_config=config, args=["build", element]
- )
+ result = cli.run_project_config(project=project, project_config=config, args=["build", element])
assert result.exit_code == 0
args = ["shell"]
@@ -55,9 +51,7 @@ def execute_shell(
# Test running something through a shell, allowing it to find the
# executable
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_shell(cli, datafiles):
project = str(datafiles)
@@ -68,9 +62,7 @@ def test_shell(cli, datafiles):
# Test running an executable directly
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_executable(cli, datafiles):
project = str(datafiles)
@@ -82,19 +74,14 @@ def test_executable(cli, datafiles):
# Test shell environment variable explicit assignments
@pytest.mark.parametrize("animal", [("Horse"), ("Pony")])
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
# This test seems to fail or pass depending on if this file is run or the hole test suite
def test_env_assign(cli, datafiles, animal):
project = str(datafiles)
expected = animal + "\n"
result = execute_shell(
- cli,
- project,
- ["/bin/sh", "-c", "echo ${ANIMAL}"],
- config={"shell": {"environment": {"ANIMAL": animal}}},
+ cli, project, ["/bin/sh", "-c", "echo ${ANIMAL}"], config={"shell": {"environment": {"ANIMAL": animal}}},
)
assert result.exit_code == 0
@@ -104,9 +91,7 @@ def test_env_assign(cli, datafiles, animal):
# Test shell environment variable explicit assignments with host env var expansion
@pytest.mark.parametrize("animal", [("Horse"), ("Pony")])
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
# This test seems to fail or pass depending on if this file is run or the hole test suite
def test_env_assign_expand_host_environ(cli, datafiles, animal):
project = str(datafiles)
@@ -129,9 +114,7 @@ def test_env_assign_expand_host_environ(cli, datafiles, animal):
# when running an isolated shell
@pytest.mark.parametrize("animal", [("Horse"), ("Pony")])
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
# This test seems to faili or pass depending on if this file is run or the hole test suite
def test_env_assign_isolated(cli, datafiles, animal):
project = str(datafiles)
@@ -150,9 +133,7 @@ def test_env_assign_isolated(cli, datafiles, animal):
# Test running an executable in a runtime with no shell (i.e., no
# /bin/sh)
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_no_shell(cli, datafiles):
project = str(datafiles)
element_path = os.path.join(project, "elements")
@@ -165,14 +146,10 @@ def test_no_shell(cli, datafiles):
"variables": {"install-root": "/"},
"config": {"commands": ["rm /bin/sh"]},
}
- os.makedirs(
- os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True
- )
+ os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True)
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
- result = execute_shell(
- cli, project, ["/bin/echo", "Pegasissies!"], element=element_name
- )
+ result = execute_shell(cli, project, ["/bin/echo", "Pegasissies!"], element=element_name)
assert result.exit_code == 0
assert result.output == "Pegasissies!\n"
@@ -180,18 +157,13 @@ def test_no_shell(cli, datafiles):
# Test that bind mounts defined in project.conf work
@pytest.mark.parametrize("path", [("/etc/pony.conf"), ("/usr/share/pony/pony.txt")])
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
def test_host_files(cli, datafiles, path):
project = str(datafiles)
ponyfile = os.path.join(project, "files", "shell-mount", "pony.txt")
result = execute_shell(
- cli,
- project,
- ["cat", path],
- config={"shell": {"host-files": [{"host_path": ponyfile, "path": path}]}},
+ cli, project, ["cat", path], config={"shell": {"host-files": [{"host_path": ponyfile, "path": path}]}},
)
assert result.exit_code == 0
assert result.output == "pony\n"
@@ -200,9 +172,7 @@ def test_host_files(cli, datafiles, path):
# Test that bind mounts defined in project.conf work
@pytest.mark.parametrize("path", [("/etc"), ("/usr/share/pony")])
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
def test_host_files_expand_environ(cli, datafiles, path):
project = str(datafiles)
@@ -217,14 +187,7 @@ def test_host_files_expand_environ(cli, datafiles, path):
project,
["cat", fullpath],
config={
- "shell": {
- "host-files": [
- {
- "host_path": "${HOST_PONY_PATH}/pony.txt",
- "path": "${BASE_PONY}/pony.txt",
- }
- ]
- }
+ "shell": {"host-files": [{"host_path": "${HOST_PONY_PATH}/pony.txt", "path": "${BASE_PONY}/pony.txt",}]}
},
)
assert result.exit_code == 0
@@ -234,9 +197,7 @@ def test_host_files_expand_environ(cli, datafiles, path):
# Test that bind mounts defined in project.conf dont mount in isolation
@pytest.mark.parametrize("path", [("/etc/pony.conf"), ("/usr/share/pony/pony.txt")])
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_isolated_no_mount(cli, datafiles, path):
project = str(datafiles)
ponyfile = os.path.join(project, "files", "shell-mount", "pony.txt")
@@ -256,9 +217,7 @@ def test_isolated_no_mount(cli, datafiles, path):
# declared as optional, and that there is no warning if it is optional
@pytest.mark.parametrize("optional", [("mandatory"), ("optional")])
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_host_files_missing(cli, datafiles, optional):
project = str(datafiles)
ponyfile = os.path.join(project, "files", "shell-mount", "horsy.txt")
@@ -270,17 +229,7 @@ def test_host_files_missing(cli, datafiles, optional):
cli,
project,
["echo", "Hello"],
- config={
- "shell": {
- "host-files": [
- {
- "host_path": ponyfile,
- "path": "/etc/pony.conf",
- "optional": option,
- }
- ]
- }
- },
+ config={"shell": {"host-files": [{"host_path": ponyfile, "path": "/etc/pony.conf", "optional": option,}]}},
)
assert result.exit_code == 0
assert result.output == "Hello\n"
@@ -296,9 +245,7 @@ def test_host_files_missing(cli, datafiles, optional):
# Test that bind mounts defined in project.conf work
@pytest.mark.parametrize("path", [("/etc/pony.conf"), ("/usr/share/pony/pony.txt")])
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
def test_cli_mount(cli, datafiles, path):
project = str(datafiles)
@@ -311,9 +258,7 @@ def test_cli_mount(cli, datafiles, path):
# Test that we can see the workspace files in a shell
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_workspace_visible(cli, datafiles):
project = str(datafiles)
workspace = os.path.join(cli.directory, "workspace")
@@ -321,10 +266,7 @@ def test_workspace_visible(cli, datafiles):
# Open a workspace on our build failing element
#
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
assert res.exit_code == 0
# Ensure the dependencies of our build failing element are built
@@ -341,18 +283,14 @@ def test_workspace_visible(cli, datafiles):
# Cat the hello.c file from a bst shell command, and assert
# that we got the same content here
#
- result = cli.run(
- project=project, args=["shell", "--build", element_name, "--", "cat", "hello.c"]
- )
+ result = cli.run(project=project, args=["shell", "--build", element_name, "--", "cat", "hello.c"])
assert result.exit_code == 0
assert result.output == workspace_hello
# Test that '--sysroot' works
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
def test_sysroot(cli, tmpdir, datafiles):
project = str(datafiles)
@@ -365,10 +303,7 @@ def test_sysroot(cli, tmpdir, datafiles):
# Build and check out a sysroot
res = cli.run(project=project, args=["build", base_element])
res.assert_success()
- res = cli.run(
- project=project,
- args=["artifact", "checkout", base_element, "--directory", checkout_dir],
- )
+ res = cli.run(project=project, args=["artifact", "checkout", base_element, "--directory", checkout_dir],)
res.assert_success()
# Mutate the sysroot
@@ -397,9 +332,7 @@ def test_sysroot(cli, tmpdir, datafiles):
# Test system integration commands can access devices in /dev
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_integration_devices(cli, datafiles):
project = str(datafiles)
element_name = "integration.bst"
@@ -412,12 +345,8 @@ def test_integration_devices(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("build_shell", [("build"), ("nobuild")])
@pytest.mark.parametrize("guess_element", [True, False], ids=["guess", "no-guess"])
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
-def test_integration_external_workspace(
- cli, tmpdir_factory, datafiles, build_shell, guess_element
-):
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
+def test_integration_external_workspace(cli, tmpdir_factory, datafiles, build_shell, guess_element):
tmpdir = tmpdir_factory.mktemp("")
project = str(datafiles)
element_name = "autotools/amhello.bst"
@@ -430,10 +359,7 @@ def test_integration_external_workspace(
with open(project_file, "a") as f:
f.write(config_text)
- result = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace_dir, element_name],
- )
+ result = cli.run(project=project, args=["workspace", "open", "--directory", workspace_dir, element_name],)
result.assert_success()
result = cli.run(project=project, args=["-C", workspace_dir, "build", element_name])
@@ -449,9 +375,7 @@ def test_integration_external_workspace(
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_integration_partial_artifact(cli, datafiles, tmpdir, integration_cache):
project = str(datafiles)
@@ -484,9 +408,7 @@ def test_integration_partial_artifact(cli, datafiles, tmpdir, integration_cache)
],
)
result.assert_success()
- digest = utils.sha256sum(
- os.path.join(str(tmpdir), "tmp", "usr", "bin", "hello")
- )
+ digest = utils.sha256sum(os.path.join(str(tmpdir), "tmp", "usr", "bin", "hello"))
# Remove the binary from the CAS
cachedir = cli.config["cachedir"]
@@ -498,8 +420,6 @@ def test_integration_partial_artifact(cli, datafiles, tmpdir, integration_cache)
result.assert_main_error(ErrorDomain.APP, None)
# check the artifact gets completed with '--pull' specified
- result = cli.run(
- project=project, args=["shell", "--pull", element_name, "--", "hello"]
- )
+ result = cli.run(project=project, args=["shell", "--pull", element_name, "--", "hello"])
result.assert_success()
assert "autotools/amhello.bst" in result.get_pulled_elements()
diff --git a/tests/integration/shellbuildtrees.py b/tests/integration/shellbuildtrees.py
index 6ed6770a4..7144d4bb2 100644
--- a/tests/integration/shellbuildtrees.py
+++ b/tests/integration/shellbuildtrees.py
@@ -20,9 +20,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_buildtree_staged(cli_integration, datafiles):
# We can only test the non interacitve case
# The non interactive case defaults to not using buildtrees
@@ -30,52 +28,32 @@ def test_buildtree_staged(cli_integration, datafiles):
project = str(datafiles)
element_name = "build-shell/buildtree.bst"
- res = cli_integration.run(
- project=project, args=["--cache-buildtrees", "always", "build", element_name]
- )
+ res = cli_integration.run(project=project, args=["--cache-buildtrees", "always", "build", element_name])
res.assert_success()
- res = cli_integration.run(
- project=project, args=["shell", "--build", element_name, "--", "cat", "test"]
- )
+ res = cli_integration.run(project=project, args=["shell", "--build", element_name, "--", "cat", "test"])
res.assert_shell_error()
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_buildtree_staged_forced_true(cli_integration, datafiles):
# Test that if we ask for a build tree it is there.
project = str(datafiles)
element_name = "build-shell/buildtree.bst"
- res = cli_integration.run(
- project=project, args=["--cache-buildtrees", "always", "build", element_name]
- )
+ res = cli_integration.run(project=project, args=["--cache-buildtrees", "always", "build", element_name])
res.assert_success()
res = cli_integration.run(
- project=project,
- args=[
- "shell",
- "--build",
- "--use-buildtree",
- "always",
- element_name,
- "--",
- "cat",
- "test",
- ],
+ project=project, args=["shell", "--build", "--use-buildtree", "always", element_name, "--", "cat", "test",],
)
res.assert_success()
assert "Hi" in res.output
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_buildtree_staged_warn_empty_cached(cli_integration, tmpdir, datafiles):
# Test that if we stage a cached and empty buildtree, we warn the user.
project = str(datafiles)
@@ -89,103 +67,49 @@ def test_buildtree_staged_warn_empty_cached(cli_integration, tmpdir, datafiles):
res.assert_success()
res = cli_integration.run(
- project=project,
- args=[
- "shell",
- "--build",
- "--use-buildtree",
- "always",
- element_name,
- "--",
- "cat",
- "test",
- ],
+ project=project, args=["shell", "--build", "--use-buildtree", "always", element_name, "--", "cat", "test",],
)
res.assert_main_error(ErrorDomain.APP, None)
- assert (
- "Artifact was created without buildtree, unable to launch shell with it"
- in res.stderr
- )
+ assert "Artifact was created without buildtree, unable to launch shell with it" in res.stderr
# Now attempt the same with the try option, this should not attempt to find a buildtree
# and just launch the shell, however the cat should still fail.
res = cli_integration.run(
- project=project,
- args=[
- "shell",
- "--build",
- "--use-buildtree",
- "try",
- element_name,
- "--",
- "cat",
- "test",
- ],
- )
- assert (
- "Artifact created without buildtree, shell will be loaded without it"
- in res.stderr
+ project=project, args=["shell", "--build", "--use-buildtree", "try", element_name, "--", "cat", "test",],
)
+ assert "Artifact created without buildtree, shell will be loaded without it" in res.stderr
assert "Hi" not in res.output
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_buildtree_staged_if_available(cli_integration, datafiles):
# Test that a build tree can be correctly detected.
project = str(datafiles)
element_name = "build-shell/buildtree.bst"
- res = cli_integration.run(
- project=project, args=["--cache-buildtrees", "always", "build", element_name]
- )
+ res = cli_integration.run(project=project, args=["--cache-buildtrees", "always", "build", element_name])
res.assert_success()
res = cli_integration.run(
- project=project,
- args=[
- "shell",
- "--build",
- "--use-buildtree",
- "try",
- element_name,
- "--",
- "cat",
- "test",
- ],
+ project=project, args=["shell", "--build", "--use-buildtree", "try", element_name, "--", "cat", "test",],
)
res.assert_success()
assert "Hi" in res.output
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_buildtree_staged_forced_false(cli_integration, datafiles):
# Test that if we ask not to have a build tree it is not there
project = str(datafiles)
element_name = "build-shell/buildtree.bst"
- res = cli_integration.run(
- project=project, args=["--cache-buildtrees", "always", "build", element_name]
- )
+ res = cli_integration.run(project=project, args=["--cache-buildtrees", "always", "build", element_name])
res.assert_success()
res = cli_integration.run(
- project=project,
- args=[
- "shell",
- "--build",
- "--use-buildtree",
- "never",
- element_name,
- "--",
- "cat",
- "test",
- ],
+ project=project, args=["shell", "--build", "--use-buildtree", "never", element_name, "--", "cat", "test",],
)
res.assert_shell_error()
@@ -193,9 +117,7 @@ def test_buildtree_staged_forced_false(cli_integration, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_buildtree_from_failure(cli_integration, datafiles):
# Test that we can use a build tree after a failure
project = str(datafiles)
@@ -206,17 +128,7 @@ def test_buildtree_from_failure(cli_integration, datafiles):
# Assert that file has expected contents
res = cli_integration.run(
- project=project,
- args=[
- "shell",
- "--build",
- element_name,
- "--use-buildtree",
- "always",
- "--",
- "cat",
- "test",
- ],
+ project=project, args=["shell", "--build", element_name, "--use-buildtree", "always", "--", "cat", "test",],
)
res.assert_success()
assert "WARNING: using a buildtree from a failed build" in res.stderr
@@ -224,9 +136,7 @@ def test_buildtree_from_failure(cli_integration, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_buildtree_from_failure_option_never(cli_integration, tmpdir, datafiles):
project = str(datafiles)
@@ -236,35 +146,18 @@ def test_buildtree_from_failure_option_never(cli_integration, tmpdir, datafiles)
# without caching a buildtree explicitly
cli_integration.configure({"cachedir": str(tmpdir)})
- res = cli_integration.run(
- project=project, args=["--cache-buildtrees", "never", "build", element_name]
- )
+ res = cli_integration.run(project=project, args=["--cache-buildtrees", "never", "build", element_name])
res.assert_main_error(ErrorDomain.STREAM, None)
res = cli_integration.run(
- project=project,
- args=[
- "shell",
- "--build",
- element_name,
- "--use-buildtree",
- "always",
- "--",
- "cat",
- "test",
- ],
+ project=project, args=["shell", "--build", element_name, "--use-buildtree", "always", "--", "cat", "test",],
)
res.assert_main_error(ErrorDomain.APP, None)
- assert (
- "Artifact was created without buildtree, unable to launch shell with it"
- in res.stderr
- )
+ assert "Artifact was created without buildtree, unable to launch shell with it" in res.stderr
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_buildtree_from_failure_option_always(cli_integration, tmpdir, datafiles):
project = str(datafiles)
@@ -275,23 +168,11 @@ def test_buildtree_from_failure_option_always(cli_integration, tmpdir, datafiles
# cached with content.
cli_integration.configure({"cachedir": str(tmpdir)})
- res = cli_integration.run(
- project=project, args=["--cache-buildtrees", "always", "build", element_name]
- )
+ res = cli_integration.run(project=project, args=["--cache-buildtrees", "always", "build", element_name])
res.assert_main_error(ErrorDomain.STREAM, None)
res = cli_integration.run(
- project=project,
- args=[
- "shell",
- "--build",
- element_name,
- "--use-buildtree",
- "always",
- "--",
- "cat",
- "test",
- ],
+ project=project, args=["shell", "--build", element_name, "--use-buildtree", "always", "--", "cat", "test",],
)
res.assert_success()
assert "WARNING: using a buildtree from a failed build" in res.stderr
@@ -301,9 +182,7 @@ def test_buildtree_from_failure_option_always(cli_integration, tmpdir, datafiles
# Check that build shells work when pulled from a remote cache
# This is to roughly simulate remote execution
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_buildtree_pulled(cli, tmpdir, datafiles):
project = str(datafiles)
element_name = "build-shell/buildtree.bst"
@@ -311,10 +190,7 @@ def test_buildtree_pulled(cli, tmpdir, datafiles):
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
# Build the element to push it to cache
cli.configure({"artifacts": {"url": share.repo, "push": True}})
- result = cli.run(
- project=project,
- args=["--cache-buildtrees", "always", "build", element_name],
- )
+ result = cli.run(project=project, args=["--cache-buildtrees", "always", "build", element_name],)
result.assert_success()
assert cli.get_element_state(project, element_name) == "cached"
@@ -325,40 +201,21 @@ def test_buildtree_pulled(cli, tmpdir, datafiles):
# Pull from cache, ensuring cli options is set to pull the buildtree
result = cli.run(
- project=project,
- args=[
- "--pull-buildtrees",
- "artifact",
- "pull",
- "--deps",
- "all",
- element_name,
- ],
+ project=project, args=["--pull-buildtrees", "artifact", "pull", "--deps", "all", element_name,],
)
result.assert_success()
# Check it's using the cached build tree
res = cli.run(
project=project,
- args=[
- "shell",
- "--build",
- element_name,
- "--use-buildtree",
- "always",
- "--",
- "cat",
- "test",
- ],
+ args=["shell", "--build", element_name, "--use-buildtree", "always", "--", "cat", "test",],
)
res.assert_success()
# This test checks for correct behaviour if a buildtree is not present in the local cache.
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_buildtree_options(cli, tmpdir, datafiles):
project = str(datafiles)
element_name = "build-shell/buildtree.bst"
@@ -366,10 +223,7 @@ def test_buildtree_options(cli, tmpdir, datafiles):
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
# Build the element to push it to cache
cli.configure({"artifacts": {"url": share.repo, "push": True}})
- result = cli.run(
- project=project,
- args=["--cache-buildtrees", "always", "build", element_name],
- )
+ result = cli.run(project=project, args=["--cache-buildtrees", "always", "build", element_name],)
result.assert_success()
assert cli.get_element_state(project, element_name) == "cached"
assert share.get_artifact(cli.get_artifact_name(project, "test", element_name))
@@ -380,51 +234,26 @@ def test_buildtree_options(cli, tmpdir, datafiles):
assert cli.get_element_state(project, element_name) != "cached"
# Pull from cache, but do not include buildtrees.
- result = cli.run(
- project=project, args=["artifact", "pull", "--deps", "all", element_name]
- )
+ result = cli.run(project=project, args=["artifact", "pull", "--deps", "all", element_name])
result.assert_success()
# Check it's not using the cached build tree
res = cli.run(
- project=project,
- args=[
- "shell",
- "--build",
- element_name,
- "--use-buildtree",
- "never",
- "--",
- "cat",
- "test",
- ],
+ project=project, args=["shell", "--build", element_name, "--use-buildtree", "never", "--", "cat", "test",],
)
res.assert_shell_error()
assert "Hi" not in res.output
# Check it's not using the cached build tree, default is to ask, and fall back to not
# for non interactive behavior
- res = cli.run(
- project=project,
- args=["shell", "--build", element_name, "--", "cat", "test"],
- )
+ res = cli.run(project=project, args=["shell", "--build", element_name, "--", "cat", "test"],)
res.assert_shell_error()
assert "Hi" not in res.output
# Check correctly handling the lack of buildtree, with 'try' not attempting to
# pull the buildtree as the user context is by default set to not pull them
res = cli.run(
- project=project,
- args=[
- "shell",
- "--build",
- element_name,
- "--use-buildtree",
- "try",
- "--",
- "cat",
- "test",
- ],
+ project=project, args=["shell", "--build", element_name, "--use-buildtree", "try", "--", "cat", "test",],
)
assert "Hi" not in res.output
assert "Attempting to fetch missing artifact buildtrees" not in res.stderr
@@ -454,22 +283,11 @@ def test_buildtree_options(cli, tmpdir, datafiles):
# Check it's not loading the shell at all with always set for the buildtree, when the
# user context does not allow for buildtree pulling
- result = cli.run(
- project=project, args=["artifact", "pull", "--deps", "all", element_name]
- )
+ result = cli.run(project=project, args=["artifact", "pull", "--deps", "all", element_name])
result.assert_success()
res = cli.run(
project=project,
- args=[
- "shell",
- "--build",
- element_name,
- "--use-buildtree",
- "always",
- "--",
- "cat",
- "test",
- ],
+ args=["shell", "--build", element_name, "--use-buildtree", "always", "--", "cat", "test",],
)
res.assert_main_error(ErrorDomain.APP, None)
assert "Buildtree is not cached locally or in available remotes" in res.stderr
@@ -493,18 +311,13 @@ def test_buildtree_options(cli, tmpdir, datafiles):
],
)
assert "Hi" in res.output
- assert (
- "buildtree is not cached locally, will attempt to pull from available remotes"
- in res.stderr
- )
+ assert "buildtree is not cached locally, will attempt to pull from available remotes" in res.stderr
assert "Attempting to fetch missing artifact buildtree" in res.stderr
# Tests running pull and pull-buildtree options at the same time.
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_pull_buildtree_pulled(cli, tmpdir, datafiles):
project = str(datafiles)
element_name = "build-shell/buildtree.bst"
@@ -512,10 +325,7 @@ def test_pull_buildtree_pulled(cli, tmpdir, datafiles):
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
# Build the element to push it to cache
cli.configure({"artifacts": {"url": share.repo, "push": True}})
- result = cli.run(
- project=project,
- args=["--cache-buildtrees", "always", "build", element_name],
- )
+ result = cli.run(project=project, args=["--cache-buildtrees", "always", "build", element_name],)
result.assert_success()
assert cli.get_element_state(project, element_name) == "cached"
diff --git a/tests/integration/sockets.py b/tests/integration/sockets.py
index 6f0757ff4..3fb656e95 100644
--- a/tests/integration/sockets.py
+++ b/tests/integration/sockets.py
@@ -14,9 +14,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_builddir_socket_ignored(cli, datafiles):
project = str(datafiles)
element_name = "sockets/make-builddir-socket.bst"
@@ -26,9 +24,7 @@ def test_builddir_socket_ignored(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_install_root_socket_ignored(cli, datafiles):
project = str(datafiles)
element_name = "sockets/make-install-root-socket.bst"
diff --git a/tests/integration/source-determinism.py b/tests/integration/source-determinism.py
index b3a4dd96b..657ad0a67 100644
--- a/tests/integration/source-determinism.py
+++ b/tests/integration/source-determinism.py
@@ -28,9 +28,7 @@ def create_test_directory(*path, mode=0o644):
@pytest.mark.integration
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_deterministic_source_local(cli, tmpdir, datafiles):
"""Only user rights should be considered for local source.
"""
@@ -63,10 +61,7 @@ def test_deterministic_source_local(cli, tmpdir, datafiles):
result = cli.run(project=project, args=["build", element_name])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkoutdir],)
result.assert_success()
with open(os.path.join(checkoutdir, "ls-l"), "r") as f:
diff --git a/tests/integration/stack.py b/tests/integration/stack.py
index d208a8ce1..d17bd9fd2 100644
--- a/tests/integration/stack.py
+++ b/tests/integration/stack.py
@@ -15,9 +15,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_stack(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -27,8 +25,7 @@ def test_stack(cli, datafiles):
assert res.exit_code == 0
cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
+ project=project, args=["artifact", "checkout", element_name, "--directory", checkout],
)
assert res.exit_code == 0
diff --git a/tests/integration/symlinks.py b/tests/integration/symlinks.py
index bc9675ecf..6904f4b65 100644
--- a/tests/integration/symlinks.py
+++ b/tests/integration/symlinks.py
@@ -15,9 +15,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_absolute_symlinks(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -26,10 +24,7 @@ def test_absolute_symlinks(cli, datafiles):
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == 0
symlink = os.path.join(checkout, "opt", "orgname")
@@ -41,9 +36,7 @@ def test_absolute_symlinks(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_disallow_overlaps_inside_symlink_with_dangling_target(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -52,18 +45,13 @@ def test_disallow_overlaps_inside_symlink_with_dangling_target(cli, datafiles):
result = cli.run(project=project, args=["build", element_name])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == -1
assert "Destination is a symlink, not a directory: /opt/orgname" in result.stderr
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_detect_symlink_overlaps_pointing_outside_sandbox(cli, datafiles):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
@@ -76,20 +64,13 @@ def test_detect_symlink_overlaps_pointing_outside_sandbox(cli, datafiles):
# ...but when we compose them together, the overlaps create paths that
# point outside the sandbox which BuildStream needs to detect before it
# tries to actually write there.
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
assert result.exit_code == -1
- assert (
- "Destination is a symlink, not a directory: /opt/escape-hatch" in result.stderr
- )
+ assert "Destination is a symlink, not a directory: /opt/escape-hatch" in result.stderr
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_symlink_in_sandbox_path(cli, datafiles):
project = str(datafiles)
element_name = "symlinks/link-on-path-use.bst"
diff --git a/tests/integration/workspace.py b/tests/integration/workspace.py
index 127a9358d..a5f9eded5 100644
--- a/tests/integration/workspace.py
+++ b/tests/integration/workspace.py
@@ -17,18 +17,13 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_workspace_stages_once(cli, datafiles):
project = str(datafiles)
workspace = os.path.join(cli.directory, "workspace")
element_name = "workspace/workspace-mount.bst"
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
assert res.exit_code == 0
assert cli.get_element_key(project, element_name) != "{:?<64}".format("")
res = cli.run(project=project, args=["build", element_name])
@@ -36,18 +31,13 @@ def test_workspace_stages_once(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_workspace_mount(cli, datafiles):
project = str(datafiles)
workspace = os.path.join(cli.directory, "workspace")
element_name = "workspace/workspace-mount.bst"
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
assert res.exit_code == 0
res = cli.run(project=project, args=["build", element_name])
@@ -66,30 +56,20 @@ def test_workspace_mount_on_read_only_directory(cli, datafiles):
# make directory RO
os.chmod(workspace, 0o555)
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
assert res.exit_code == 0
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
-@pytest.mark.xfail(
- reason="Incremental builds are currently incompatible with workspace source plugin."
-)
+@pytest.mark.xfail(reason="Incremental builds are currently incompatible with workspace source plugin.")
def test_workspace_commanddir(cli, datafiles):
project = str(datafiles)
workspace = os.path.join(cli.directory, "workspace")
element_name = "workspace/workspace-commanddir.bst"
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
assert res.exit_code == 0
res = cli.run(project=project, args=["build", element_name])
@@ -100,9 +80,7 @@ def test_workspace_commanddir(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_workspace_updated_dependency(cli, datafiles):
project = str(datafiles)
workspace = os.path.join(cli.directory, "workspace")
@@ -124,10 +102,7 @@ def test_workspace_updated_dependency(cli, datafiles):
_yaml.roundtrip_dump(dependency, os.path.join(element_path, dep_name))
# First open the workspace
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
assert res.exit_code == 0
# We build the workspaced element, so that we have an artifact
@@ -156,9 +131,7 @@ def test_workspace_updated_dependency(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_workspace_update_dependency_failed(cli, datafiles):
project = str(datafiles)
workspace = os.path.join(cli.directory, "workspace")
@@ -181,10 +154,7 @@ def test_workspace_update_dependency_failed(cli, datafiles):
_yaml.roundtrip_dump(dependency, os.path.join(element_path, dep_name))
# First open the workspace
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
assert res.exit_code == 0
# We build the workspaced element, so that we have an artifact
@@ -233,9 +203,7 @@ def test_workspace_update_dependency_failed(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_updated_dependency_nested(cli, datafiles):
project = str(datafiles)
workspace = os.path.join(cli.directory, "workspace")
@@ -258,10 +226,7 @@ def test_updated_dependency_nested(cli, datafiles):
_yaml.roundtrip_dump(dependency, os.path.join(element_path, dep_name))
# First open the workspace
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
assert res.exit_code == 0
# We build the workspaced element, so that we have an artifact
@@ -288,13 +253,9 @@ def test_updated_dependency_nested(cli, datafiles):
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
-@pytest.mark.xfail(
- reason="Incremental builds are currently incompatible with workspace source plugin."
-)
+@pytest.mark.xfail(reason="Incremental builds are currently incompatible with workspace source plugin.")
def test_incremental_configure_commands_run_only_once(cli, datafiles):
project = str(datafiles)
workspace = os.path.join(cli.directory, "workspace")
@@ -310,10 +271,7 @@ def test_incremental_configure_commands_run_only_once(cli, datafiles):
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
# We open a workspace on the above element
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
res.assert_success()
# Then we build, and check whether the configure step succeeded
@@ -339,9 +297,7 @@ def test_incremental_configure_commands_run_only_once(cli, datafiles):
# part of a cleanup job.
#
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.xfail(HAVE_SANDBOX == "buildbox", reason="Not working with BuildBox")
def test_workspace_missing_last_successful(cli, datafiles):
project = str(datafiles)
@@ -349,10 +305,7 @@ def test_workspace_missing_last_successful(cli, datafiles):
element_name = "workspace/workspace-commanddir.bst"
# Open workspace
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
assert res.exit_code == 0
# Build first, this will record the last successful build in local state
@@ -370,19 +323,14 @@ def test_workspace_missing_last_successful(cli, datafiles):
# Check that we can still read failed workspace logs
@pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_workspace_failed_logs(cli, datafiles):
project = str(datafiles)
workspace = os.path.join(cli.directory, "failing_amhello")
element_name = "autotools/amhello-failure.bst"
# Open workspace
- res = cli.run(
- project=project,
- args=["workspace", "open", "--directory", workspace, element_name],
- )
+ res = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name],)
res.assert_success()
# Try to build and ensure the build fails
diff --git a/tests/internals/cascache.py b/tests/internals/cascache.py
index a35d50040..81273aeaf 100644
--- a/tests/internals/cascache.py
+++ b/tests/internals/cascache.py
@@ -47,9 +47,7 @@ def test_report_when_cascache_exist_not_cleanly(tmp_path, monkeypatch):
def test_report_when_cascache_is_forcefully_killed(tmp_path, monkeypatch):
dummy_buildbox_casd = tmp_path.joinpath("buildbox-casd")
- dummy_buildbox_casd.write_text(
- "#!/bin/bash\ntrap 'echo hello' SIGTERM\nwhile :\ndo\nsleep 60\ndone"
- )
+ dummy_buildbox_casd.write_text("#!/bin/bash\ntrap 'echo hello' SIGTERM\nwhile :\ndo\nsleep 60\ndone")
dummy_buildbox_casd.chmod(0o777)
monkeypatch.setenv("PATH", str(tmp_path), prepend=os.pathsep)
diff --git a/tests/internals/context.py b/tests/internals/context.py
index c219d5f5d..c2ee1efb5 100644
--- a/tests/internals/context.py
+++ b/tests/internals/context.py
@@ -53,9 +53,7 @@ def test_context_load_envvar(context_fixture):
assert isinstance(context, Context)
context.load(config=os.devnull)
- assert context.sourcedir == os.path.join(
- "/", "some", "path", "buildstream", "sources"
- )
+ assert context.sourcedir == os.path.join("/", "some", "path", "buildstream", "sources")
assert context.builddir == os.path.join("/", "some", "path", "buildstream", "build")
assert context.cachedir == os.path.join("/", "some", "path", "buildstream")
assert context.logdir == os.path.join("/", "some", "path", "buildstream", "logs")
diff --git a/tests/internals/pluginfactory.py b/tests/internals/pluginfactory.py
index 13c204752..f8d02ccc5 100644
--- a/tests/internals/pluginfactory.py
+++ b/tests/internals/pluginfactory.py
@@ -44,11 +44,7 @@ def test_element_factory(plugin_fixture):
def test_custom_source(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = SourceFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -62,11 +58,7 @@ def test_custom_source(plugin_fixture, datafiles):
def test_custom_element(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = ElementFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -102,11 +94,7 @@ def test_missing_element(plugin_fixture):
def test_source_notatype(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = SourceFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -119,11 +107,7 @@ def test_source_notatype(plugin_fixture, datafiles):
def test_element_notatype(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = ElementFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -137,11 +121,7 @@ def test_element_notatype(plugin_fixture, datafiles):
def test_source_wrongtype(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = SourceFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -155,11 +135,7 @@ def test_source_wrongtype(plugin_fixture, datafiles):
def test_element_wrongtype(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = ElementFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -172,11 +148,7 @@ def test_element_wrongtype(plugin_fixture, datafiles):
def test_source_missing_setup(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = SourceFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -189,11 +161,7 @@ def test_source_missing_setup(plugin_fixture, datafiles):
def test_element_missing_setup(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = ElementFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -207,11 +175,7 @@ def test_element_missing_setup(plugin_fixture, datafiles):
def test_source_bad_setup(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = SourceFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -225,11 +189,7 @@ def test_source_bad_setup(plugin_fixture, datafiles):
def test_element_bad_setup(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = ElementFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -243,11 +203,7 @@ def test_element_bad_setup(plugin_fixture, datafiles):
def test_source_badversion(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = SourceFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -261,11 +217,7 @@ def test_source_badversion(plugin_fixture, datafiles):
def test_element_badversion(plugin_fixture, datafiles):
plugins = [
Node.from_dict(
- {
- "origin": "local",
- "path": os.path.join(datafiles.dirname, datafiles.basename),
- "plugins": ["foo"],
- }
+ {"origin": "local", "path": os.path.join(datafiles.dirname, datafiles.basename), "plugins": ["foo"],}
)
]
factory = ElementFactory(plugin_fixture["base"], plugin_origins=plugins)
@@ -290,9 +242,7 @@ def test_source_multicontext(plugin_fixture, datafiles):
plugins2 = Node.from_dict(
{
"origin": "local",
- "path": os.path.join(
- datafiles.dirname, datafiles.basename, "anothersource"
- ),
+ "path": os.path.join(datafiles.dirname, datafiles.basename, "anothersource"),
"plugins": ["foo"],
}
)
@@ -314,18 +264,14 @@ def test_element_multicontext(plugin_fixture, datafiles):
plugins1 = Node.from_dict(
{
"origin": "local",
- "path": os.path.join(
- datafiles.dirname, datafiles.basename, "customelement"
- ),
+ "path": os.path.join(datafiles.dirname, datafiles.basename, "customelement"),
"plugins": ["foo"],
}
)
plugins2 = Node.from_dict(
{
"origin": "local",
- "path": os.path.join(
- datafiles.dirname, datafiles.basename, "anotherelement"
- ),
+ "path": os.path.join(datafiles.dirname, datafiles.basename, "anotherelement"),
"plugins": ["foo"],
}
)
diff --git a/tests/internals/pluginloading.py b/tests/internals/pluginloading.py
index 5527bf5cc..83944bbd9 100644
--- a/tests/internals/pluginloading.py
+++ b/tests/internals/pluginloading.py
@@ -43,9 +43,7 @@ def test_customelement(datafiles, tmpdir):
def test_badversionsource(datafiles, tmpdir):
basedir = str(datafiles)
- with pytest.raises(LoadError) as exc, create_pipeline(
- tmpdir, basedir, "simple.bst"
- ):
+ with pytest.raises(LoadError) as exc, create_pipeline(tmpdir, basedir, "simple.bst"):
pass
assert exc.value.reason == LoadErrorReason.UNSUPPORTED_PLUGIN
@@ -55,9 +53,7 @@ def test_badversionsource(datafiles, tmpdir):
def test_badversionelement(datafiles, tmpdir):
basedir = str(datafiles)
- with pytest.raises(LoadError) as exc, create_pipeline(
- tmpdir, basedir, "simple.bst"
- ):
+ with pytest.raises(LoadError) as exc, create_pipeline(tmpdir, basedir, "simple.bst"):
pass
assert exc.value.reason == LoadErrorReason.UNSUPPORTED_PLUGIN
diff --git a/tests/internals/storage.py b/tests/internals/storage.py
index 27dd7e88b..8aa7f4a17 100644
--- a/tests/internals/storage.py
+++ b/tests/internals/storage.py
@@ -47,9 +47,7 @@ def test_modified_file_list(tmpdir, datafiles, backend):
c.import_files(overlay)
- print(
- "List of all paths in imported results: {}".format(c.list_relative_paths())
- )
+ print("List of all paths in imported results: {}".format(c.list_relative_paths()))
assert "bin/bash" in c.list_relative_paths()
assert "bin/bash" in c.list_modified_paths()
assert "bin/hello" not in c.list_modified_paths()
diff --git a/tests/internals/storage_vdir_import.py b/tests/internals/storage_vdir_import.py
index 225191b43..63ab8bc2f 100644
--- a/tests/internals/storage_vdir_import.py
+++ b/tests/internals/storage_vdir_import.py
@@ -194,9 +194,7 @@ def _import_test(tmpdir, original, overlay, generator_function, verify_contents=
d2 = create_new_casdir(overlay, cas_cache, tmpdir)
d.import_files(d2)
export_dir = os.path.join(tmpdir, "output-{}-{}".format(original, overlay))
- roundtrip_dir = os.path.join(
- tmpdir, "roundtrip-{}-{}".format(original, overlay)
- )
+ roundtrip_dir = os.path.join(tmpdir, "roundtrip-{}-{}".format(original, overlay))
d2.export_files(roundtrip_dir)
d.export_files(export_dir)
@@ -209,9 +207,7 @@ def _import_test(tmpdir, original, overlay, generator_function, verify_contents=
# The file should not have overwritten the directory in this case.
pass
else:
- assert os.path.isfile(
- realpath
- ), "{} did not exist in the combined virtual directory".format(
+ assert os.path.isfile(realpath), "{} did not exist in the combined virtual directory".format(
path
)
assert file_contents_are(realpath, content)
@@ -241,17 +237,13 @@ def _import_test(tmpdir, original, overlay, generator_function, verify_contents=
@pytest.mark.parametrize("original", range(1, len(root_filesets) + 1))
@pytest.mark.parametrize("overlay", range(1, len(root_filesets) + 1))
def test_fixed_cas_import(tmpdir, original, overlay):
- _import_test(
- str(tmpdir), original, overlay, generate_import_roots, verify_contents=True
- )
+ _import_test(str(tmpdir), original, overlay, generate_import_roots, verify_contents=True)
@pytest.mark.parametrize("original", range(1, NUM_RANDOM_TESTS + 1))
@pytest.mark.parametrize("overlay", range(1, NUM_RANDOM_TESTS + 1))
def test_random_cas_import(tmpdir, original, overlay):
- _import_test(
- str(tmpdir), original, overlay, generate_random_root, verify_contents=False
- )
+ _import_test(str(tmpdir), original, overlay, generate_random_root, verify_contents=False)
def _listing_test(tmpdir, root, generator_function):
diff --git a/tests/internals/yaml.py b/tests/internals/yaml.py
index e25cf70b3..0958bc156 100644
--- a/tests/internals/yaml.py
+++ b/tests/internals/yaml.py
@@ -197,44 +197,12 @@ def test_composite_preserve_originals(datafiles):
("listappend.yaml", 0, 9, "silly", "basics.yaml", 8, 8),
("listappend.yaml", 6, 9, "sleepy", "basics.yaml", 20, 8),
# Test results of compositing with both (<) and (>) directives
- (
- "listappendprepend.yaml",
- 0,
- 11,
- "prepended1",
- "listappendprepend.yaml",
- 5,
- 10,
- ),
- (
- "listappendprepend.yaml",
- 1,
- 11,
- "prepended2",
- "listappendprepend.yaml",
- 7,
- 10,
- ),
+ ("listappendprepend.yaml", 0, 11, "prepended1", "listappendprepend.yaml", 5, 10,),
+ ("listappendprepend.yaml", 1, 11, "prepended2", "listappendprepend.yaml", 7, 10,),
("listappendprepend.yaml", 2, 11, "silly", "basics.yaml", 8, 8),
("listappendprepend.yaml", 8, 11, "sleepy", "basics.yaml", 20, 8),
- (
- "listappendprepend.yaml",
- 9,
- 11,
- "appended1",
- "listappendprepend.yaml",
- 10,
- 10,
- ),
- (
- "listappendprepend.yaml",
- 10,
- 11,
- "appended2",
- "listappendprepend.yaml",
- 12,
- 10,
- ),
+ ("listappendprepend.yaml", 9, 11, "appended1", "listappendprepend.yaml", 10, 10,),
+ ("listappendprepend.yaml", 10, 11, "appended2", "listappendprepend.yaml", 12, 10,),
# Test results of compositing with the (=) overwrite directive
("listoverwrite.yaml", 0, 2, "overwrite1", "listoverwrite.yaml", 5, 10),
("listoverwrite.yaml", 1, 2, "overwrite2", "listoverwrite.yaml", 7, 10),
@@ -243,9 +211,7 @@ def test_composite_preserve_originals(datafiles):
("implicitoverwrite.yaml", 1, 2, "overwrite2", "implicitoverwrite.yaml", 6, 8),
],
)
-def test_list_composition(
- datafiles, filename, tmpdir, index, length, mood, prov_file, prov_line, prov_col
-):
+def test_list_composition(datafiles, filename, tmpdir, index, length, mood, prov_file, prov_line, prov_col):
base_file = os.path.join(datafiles.dirname, datafiles.basename, "basics.yaml")
overlay_file = os.path.join(datafiles.dirname, datafiles.basename, filename)
@@ -266,9 +232,7 @@ def test_list_composition(
@pytest.mark.datafiles(os.path.join(DATA_DIR))
def test_list_deletion(datafiles):
base = os.path.join(datafiles.dirname, datafiles.basename, "basics.yaml")
- overlay = os.path.join(
- datafiles.dirname, datafiles.basename, "listoverwriteempty.yaml"
- )
+ overlay = os.path.join(datafiles.dirname, datafiles.basename, "listoverwriteempty.yaml")
base = _yaml.load(base, shortname="basics.yaml")
overlay = _yaml.load(overlay, shortname="listoverwriteempty.yaml")
@@ -309,362 +273,56 @@ def test_list_deletion(datafiles):
"filename1,filename2,index,length,mood,prov_file,prov_line,prov_col",
[
# Test results of compositing literal list with (>) and then (<)
- (
- "listprepend.yaml",
- "listappend.yaml",
- 0,
- 11,
- "prepended1",
- "listprepend.yaml",
- 5,
- 10,
- ),
- (
- "listprepend.yaml",
- "listappend.yaml",
- 1,
- 11,
- "prepended2",
- "listprepend.yaml",
- 7,
- 10,
- ),
+ ("listprepend.yaml", "listappend.yaml", 0, 11, "prepended1", "listprepend.yaml", 5, 10,),
+ ("listprepend.yaml", "listappend.yaml", 1, 11, "prepended2", "listprepend.yaml", 7, 10,),
("listprepend.yaml", "listappend.yaml", 2, 11, "silly", "basics.yaml", 8, 8),
("listprepend.yaml", "listappend.yaml", 8, 11, "sleepy", "basics.yaml", 20, 8),
- (
- "listprepend.yaml",
- "listappend.yaml",
- 9,
- 11,
- "appended1",
- "listappend.yaml",
- 5,
- 10,
- ),
- (
- "listprepend.yaml",
- "listappend.yaml",
- 10,
- 11,
- "appended2",
- "listappend.yaml",
- 7,
- 10,
- ),
+ ("listprepend.yaml", "listappend.yaml", 9, 11, "appended1", "listappend.yaml", 5, 10,),
+ ("listprepend.yaml", "listappend.yaml", 10, 11, "appended2", "listappend.yaml", 7, 10,),
# Test results of compositing literal list with (<) and then (>)
- (
- "listappend.yaml",
- "listprepend.yaml",
- 0,
- 11,
- "prepended1",
- "listprepend.yaml",
- 5,
- 10,
- ),
- (
- "listappend.yaml",
- "listprepend.yaml",
- 1,
- 11,
- "prepended2",
- "listprepend.yaml",
- 7,
- 10,
- ),
+ ("listappend.yaml", "listprepend.yaml", 0, 11, "prepended1", "listprepend.yaml", 5, 10,),
+ ("listappend.yaml", "listprepend.yaml", 1, 11, "prepended2", "listprepend.yaml", 7, 10,),
("listappend.yaml", "listprepend.yaml", 2, 11, "silly", "basics.yaml", 8, 8),
("listappend.yaml", "listprepend.yaml", 8, 11, "sleepy", "basics.yaml", 20, 8),
- (
- "listappend.yaml",
- "listprepend.yaml",
- 9,
- 11,
- "appended1",
- "listappend.yaml",
- 5,
- 10,
- ),
- (
- "listappend.yaml",
- "listprepend.yaml",
- 10,
- 11,
- "appended2",
- "listappend.yaml",
- 7,
- 10,
- ),
+ ("listappend.yaml", "listprepend.yaml", 9, 11, "appended1", "listappend.yaml", 5, 10,),
+ ("listappend.yaml", "listprepend.yaml", 10, 11, "appended2", "listappend.yaml", 7, 10,),
# Test results of compositing literal list with (>) and then (>)
("listappend.yaml", "secondappend.yaml", 0, 11, "silly", "basics.yaml", 8, 8),
("listappend.yaml", "secondappend.yaml", 6, 11, "sleepy", "basics.yaml", 20, 8),
- (
- "listappend.yaml",
- "secondappend.yaml",
- 7,
- 11,
- "appended1",
- "listappend.yaml",
- 5,
- 10,
- ),
- (
- "listappend.yaml",
- "secondappend.yaml",
- 8,
- 11,
- "appended2",
- "listappend.yaml",
- 7,
- 10,
- ),
- (
- "listappend.yaml",
- "secondappend.yaml",
- 9,
- 11,
- "secondappend1",
- "secondappend.yaml",
- 5,
- 10,
- ),
- (
- "listappend.yaml",
- "secondappend.yaml",
- 10,
- 11,
- "secondappend2",
- "secondappend.yaml",
- 7,
- 10,
- ),
+ ("listappend.yaml", "secondappend.yaml", 7, 11, "appended1", "listappend.yaml", 5, 10,),
+ ("listappend.yaml", "secondappend.yaml", 8, 11, "appended2", "listappend.yaml", 7, 10,),
+ ("listappend.yaml", "secondappend.yaml", 9, 11, "secondappend1", "secondappend.yaml", 5, 10,),
+ ("listappend.yaml", "secondappend.yaml", 10, 11, "secondappend2", "secondappend.yaml", 7, 10,),
# Test results of compositing literal list with (>) and then (>)
- (
- "listprepend.yaml",
- "secondprepend.yaml",
- 0,
- 11,
- "secondprepend1",
- "secondprepend.yaml",
- 5,
- 10,
- ),
- (
- "listprepend.yaml",
- "secondprepend.yaml",
- 1,
- 11,
- "secondprepend2",
- "secondprepend.yaml",
- 7,
- 10,
- ),
- (
- "listprepend.yaml",
- "secondprepend.yaml",
- 2,
- 11,
- "prepended1",
- "listprepend.yaml",
- 5,
- 10,
- ),
- (
- "listprepend.yaml",
- "secondprepend.yaml",
- 3,
- 11,
- "prepended2",
- "listprepend.yaml",
- 7,
- 10,
- ),
+ ("listprepend.yaml", "secondprepend.yaml", 0, 11, "secondprepend1", "secondprepend.yaml", 5, 10,),
+ ("listprepend.yaml", "secondprepend.yaml", 1, 11, "secondprepend2", "secondprepend.yaml", 7, 10,),
+ ("listprepend.yaml", "secondprepend.yaml", 2, 11, "prepended1", "listprepend.yaml", 5, 10,),
+ ("listprepend.yaml", "secondprepend.yaml", 3, 11, "prepended2", "listprepend.yaml", 7, 10,),
("listprepend.yaml", "secondprepend.yaml", 4, 11, "silly", "basics.yaml", 8, 8),
- (
- "listprepend.yaml",
- "secondprepend.yaml",
- 10,
- 11,
- "sleepy",
- "basics.yaml",
- 20,
- 8,
- ),
+ ("listprepend.yaml", "secondprepend.yaml", 10, 11, "sleepy", "basics.yaml", 20, 8,),
# Test results of compositing literal list with (>) or (<) and then another literal list
- (
- "listappend.yaml",
- "implicitoverwrite.yaml",
- 0,
- 2,
- "overwrite1",
- "implicitoverwrite.yaml",
- 4,
- 8,
- ),
- (
- "listappend.yaml",
- "implicitoverwrite.yaml",
- 1,
- 2,
- "overwrite2",
- "implicitoverwrite.yaml",
- 6,
- 8,
- ),
- (
- "listprepend.yaml",
- "implicitoverwrite.yaml",
- 0,
- 2,
- "overwrite1",
- "implicitoverwrite.yaml",
- 4,
- 8,
- ),
- (
- "listprepend.yaml",
- "implicitoverwrite.yaml",
- 1,
- 2,
- "overwrite2",
- "implicitoverwrite.yaml",
- 6,
- 8,
- ),
+ ("listappend.yaml", "implicitoverwrite.yaml", 0, 2, "overwrite1", "implicitoverwrite.yaml", 4, 8,),
+ ("listappend.yaml", "implicitoverwrite.yaml", 1, 2, "overwrite2", "implicitoverwrite.yaml", 6, 8,),
+ ("listprepend.yaml", "implicitoverwrite.yaml", 0, 2, "overwrite1", "implicitoverwrite.yaml", 4, 8,),
+ ("listprepend.yaml", "implicitoverwrite.yaml", 1, 2, "overwrite2", "implicitoverwrite.yaml", 6, 8,),
# Test results of compositing literal list with (>) or (<) and then an explicit (=) overwrite
- (
- "listappend.yaml",
- "listoverwrite.yaml",
- 0,
- 2,
- "overwrite1",
- "listoverwrite.yaml",
- 5,
- 10,
- ),
- (
- "listappend.yaml",
- "listoverwrite.yaml",
- 1,
- 2,
- "overwrite2",
- "listoverwrite.yaml",
- 7,
- 10,
- ),
- (
- "listprepend.yaml",
- "listoverwrite.yaml",
- 0,
- 2,
- "overwrite1",
- "listoverwrite.yaml",
- 5,
- 10,
- ),
- (
- "listprepend.yaml",
- "listoverwrite.yaml",
- 1,
- 2,
- "overwrite2",
- "listoverwrite.yaml",
- 7,
- 10,
- ),
+ ("listappend.yaml", "listoverwrite.yaml", 0, 2, "overwrite1", "listoverwrite.yaml", 5, 10,),
+ ("listappend.yaml", "listoverwrite.yaml", 1, 2, "overwrite2", "listoverwrite.yaml", 7, 10,),
+ ("listprepend.yaml", "listoverwrite.yaml", 0, 2, "overwrite1", "listoverwrite.yaml", 5, 10,),
+ ("listprepend.yaml", "listoverwrite.yaml", 1, 2, "overwrite2", "listoverwrite.yaml", 7, 10,),
# Test results of compositing literal list an explicit overwrite (=) and then with (>) or (<)
- (
- "listoverwrite.yaml",
- "listappend.yaml",
- 0,
- 4,
- "overwrite1",
- "listoverwrite.yaml",
- 5,
- 10,
- ),
- (
- "listoverwrite.yaml",
- "listappend.yaml",
- 1,
- 4,
- "overwrite2",
- "listoverwrite.yaml",
- 7,
- 10,
- ),
- (
- "listoverwrite.yaml",
- "listappend.yaml",
- 2,
- 4,
- "appended1",
- "listappend.yaml",
- 5,
- 10,
- ),
- (
- "listoverwrite.yaml",
- "listappend.yaml",
- 3,
- 4,
- "appended2",
- "listappend.yaml",
- 7,
- 10,
- ),
- (
- "listoverwrite.yaml",
- "listprepend.yaml",
- 0,
- 4,
- "prepended1",
- "listprepend.yaml",
- 5,
- 10,
- ),
- (
- "listoverwrite.yaml",
- "listprepend.yaml",
- 1,
- 4,
- "prepended2",
- "listprepend.yaml",
- 7,
- 10,
- ),
- (
- "listoverwrite.yaml",
- "listprepend.yaml",
- 2,
- 4,
- "overwrite1",
- "listoverwrite.yaml",
- 5,
- 10,
- ),
- (
- "listoverwrite.yaml",
- "listprepend.yaml",
- 3,
- 4,
- "overwrite2",
- "listoverwrite.yaml",
- 7,
- 10,
- ),
+ ("listoverwrite.yaml", "listappend.yaml", 0, 4, "overwrite1", "listoverwrite.yaml", 5, 10,),
+ ("listoverwrite.yaml", "listappend.yaml", 1, 4, "overwrite2", "listoverwrite.yaml", 7, 10,),
+ ("listoverwrite.yaml", "listappend.yaml", 2, 4, "appended1", "listappend.yaml", 5, 10,),
+ ("listoverwrite.yaml", "listappend.yaml", 3, 4, "appended2", "listappend.yaml", 7, 10,),
+ ("listoverwrite.yaml", "listprepend.yaml", 0, 4, "prepended1", "listprepend.yaml", 5, 10,),
+ ("listoverwrite.yaml", "listprepend.yaml", 1, 4, "prepended2", "listprepend.yaml", 7, 10,),
+ ("listoverwrite.yaml", "listprepend.yaml", 2, 4, "overwrite1", "listoverwrite.yaml", 5, 10,),
+ ("listoverwrite.yaml", "listprepend.yaml", 3, 4, "overwrite2", "listoverwrite.yaml", 7, 10,),
],
)
def test_list_composition_twice(
- datafiles,
- tmpdir,
- filename1,
- filename2,
- index,
- length,
- mood,
- prov_file,
- prov_line,
- prov_col,
+ datafiles, tmpdir, filename1, filename2, index, length, mood, prov_file, prov_line, prov_col,
):
file_base = os.path.join(datafiles.dirname, datafiles.basename, "basics.yaml")
file1 = os.path.join(datafiles.dirname, datafiles.basename, filename1)
@@ -707,9 +365,7 @@ def test_list_composition_twice(
@pytest.mark.datafiles(os.path.join(DATA_DIR))
def test_convert_value_to_string(datafiles):
- conf_file = os.path.join(
- datafiles.dirname, datafiles.basename, "convert_value_to_str.yaml"
- )
+ conf_file = os.path.join(datafiles.dirname, datafiles.basename, "convert_value_to_str.yaml")
# Run file through yaml to convert it
test_dict = _yaml.load(conf_file)
@@ -733,9 +389,7 @@ def test_convert_value_to_string(datafiles):
@pytest.mark.datafiles(os.path.join(DATA_DIR))
def test_value_doesnt_match_expected(datafiles):
- conf_file = os.path.join(
- datafiles.dirname, datafiles.basename, "convert_value_to_str.yaml"
- )
+ conf_file = os.path.join(datafiles.dirname, datafiles.basename, "convert_value_to_str.yaml")
# Run file through yaml to convert it
test_dict = _yaml.load(conf_file)
@@ -748,9 +402,7 @@ def test_value_doesnt_match_expected(datafiles):
@pytest.mark.datafiles(os.path.join(DATA_DIR))
@pytest.mark.parametrize("fromdisk", [(True), (False)])
def test_roundtrip_dump(datafiles, fromdisk):
- filename = os.path.join(
- datafiles.dirname, datafiles.basename, "roundtrip-test.yaml"
- )
+ filename = os.path.join(datafiles.dirname, datafiles.basename, "roundtrip-test.yaml")
with open(filename, "r") as fh:
rt_raw = fh.read()
if fromdisk:
@@ -787,9 +439,7 @@ def test_roundtrip_dump(datafiles, fromdisk):
@pytest.mark.datafiles(os.path.join(DATA_DIR))
-@pytest.mark.parametrize(
- "case", [["a", "b", "c"], ["foo", 1], ["stuff", 0, "colour"], ["bird", 0, 1],]
-)
+@pytest.mark.parametrize("case", [["a", "b", "c"], ["foo", 1], ["stuff", 0, "colour"], ["bird", 0, 1],])
def test_node_find_target(datafiles, case):
filename = os.path.join(datafiles.dirname, datafiles.basename, "traversal.yaml")
# We set copy_tree in order to ensure that the nodes in `loaded`
diff --git a/tests/plugins/deprecationwarnings/deprecationwarnings.py b/tests/plugins/deprecationwarnings/deprecationwarnings.py
index 6f20eae8c..628faea68 100644
--- a/tests/plugins/deprecationwarnings/deprecationwarnings.py
+++ b/tests/plugins/deprecationwarnings/deprecationwarnings.py
@@ -11,9 +11,7 @@ from buildstream.testing import cli # pylint: disable=unused-import
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
_DEPRECATION_MESSAGE = "Here is some detail."
-_DEPRECATION_WARNING = "Using deprecated plugin deprecated_plugin: {}".format(
- _DEPRECATION_MESSAGE
-)
+_DEPRECATION_WARNING = "Using deprecated plugin deprecated_plugin: {}".format(_DEPRECATION_MESSAGE)
@pytest.mark.datafiles(DATA_DIR)
@@ -29,11 +27,7 @@ def test_suppress_deprecation_warning(cli, datafiles):
project = str(datafiles)
cli.run(project=project, args=["show", "manual.bst"])
- element_overrides = (
- "elements:\n"
- " deprecated_plugin:\n"
- " suppress-deprecation-warnings : True\n"
- )
+ element_overrides = "elements:\n" " deprecated_plugin:\n" " suppress-deprecation-warnings : True\n"
project_conf = os.path.join(project, "project.conf")
with open(project_conf, "a") as f:
diff --git a/tests/remoteexecution/buildfail.py b/tests/remoteexecution/buildfail.py
index bd1c81891..8802a311c 100644
--- a/tests/remoteexecution/buildfail.py
+++ b/tests/remoteexecution/buildfail.py
@@ -23,9 +23,7 @@ import pytest
from buildstream._exceptions import ErrorDomain
from buildstream import _yaml
-from buildstream.testing import (
- cli_remote_execution as cli,
-) # pylint: disable=unused-import
+from buildstream.testing import cli_remote_execution as cli # pylint: disable=unused-import
pytestmark = pytest.mark.remoteexecution
@@ -54,10 +52,7 @@ def test_build_remote_failure(cli, datafiles):
result = cli.run(project=project, args=["build", "element.bst"])
result.assert_main_error(ErrorDomain.STREAM, None)
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "element.bst", "--directory", checkout_path],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "element.bst", "--directory", checkout_path],)
result.assert_success()
# check that the file created before the failure exists
diff --git a/tests/remoteexecution/buildtree.py b/tests/remoteexecution/buildtree.py
index 86efb29f1..7c763e1fb 100644
--- a/tests/remoteexecution/buildtree.py
+++ b/tests/remoteexecution/buildtree.py
@@ -21,9 +21,7 @@ import os
import shutil
import pytest
-from buildstream.testing import (
- cli_remote_execution as cli,
-) # pylint: disable=unused-import
+from buildstream.testing import cli_remote_execution as cli # pylint: disable=unused-import
from tests.testutils import create_artifact_share
@@ -44,16 +42,10 @@ def test_buildtree_remote(cli, tmpdir, datafiles):
with create_artifact_share(share_path) as share:
cli.configure(
- {
- "artifacts": {"url": share.repo, "push": True},
- "cache": {"pull-buildtrees": False},
- }
+ {"artifacts": {"url": share.repo, "push": True}, "cache": {"pull-buildtrees": False},}
)
- res = cli.run(
- project=project,
- args=["--cache-buildtrees", "always", "build", element_name],
- )
+ res = cli.run(project=project, args=["--cache-buildtrees", "always", "build", element_name],)
res.assert_success()
# remove local cache
@@ -61,45 +53,21 @@ def test_buildtree_remote(cli, tmpdir, datafiles):
shutil.rmtree(os.path.join(str(tmpdir), "cache", "artifacts"))
# pull without buildtree
- res = cli.run(
- project=project, args=["artifact", "pull", "--deps", "all", element_name]
- )
+ res = cli.run(project=project, args=["artifact", "pull", "--deps", "all", element_name])
res.assert_success()
# check shell doesn't work
- res = cli.run(
- project=project,
- args=["shell", "--build", element_name, "--", "cat", "test"],
- )
+ res = cli.run(project=project, args=["shell", "--build", element_name, "--", "cat", "test"],)
res.assert_shell_error()
# pull with buildtree
- res = cli.run(
- project=project,
- args=[
- "--pull-buildtrees",
- "artifact",
- "pull",
- "--deps",
- "all",
- element_name,
- ],
- )
+ res = cli.run(project=project, args=["--pull-buildtrees", "artifact", "pull", "--deps", "all", element_name,],)
res.assert_success()
# check it works this time
res = cli.run(
project=project,
- args=[
- "shell",
- "--build",
- element_name,
- "--use-buildtree",
- "always",
- "--",
- "cat",
- "test",
- ],
+ args=["shell", "--build", element_name, "--use-buildtree", "always", "--", "cat", "test",],
)
res.assert_success()
assert "Hi" in res.output
diff --git a/tests/remoteexecution/junction.py b/tests/remoteexecution/junction.py
index dd8d4d4e0..46bfaa8af 100644
--- a/tests/remoteexecution/junction.py
+++ b/tests/remoteexecution/junction.py
@@ -20,9 +20,7 @@
import os
import pytest
-from buildstream.testing import (
- cli_remote_execution as cli,
-) # pylint: disable=unused-import
+from buildstream.testing import cli_remote_execution as cli # pylint: disable=unused-import
from buildstream.testing import create_repo
from buildstream import _yaml
from tests.testutils import generate_junction
@@ -72,19 +70,12 @@ def test_junction_build_remote(cli, tmpdir, datafiles):
# Create a trackable element to depend on the cross junction element,
# this one has it's ref resolved already
create_element(
- repo,
- "sub-target.bst",
- subproject_element_path,
- ["autotools/amhello.bst"],
- ref=ref,
+ repo, "sub-target.bst", subproject_element_path, ["autotools/amhello.bst"], ref=ref,
)
# Create a trackable element to depend on the cross junction element
create_element(
- repo,
- "target.bst",
- element_path,
- [{"junction": "junction.bst", "filename": "sub-target.bst"}],
+ repo, "target.bst", element_path, [{"junction": "junction.bst", "filename": "sub-target.bst"}],
)
# Create a repo to hold the subproject and generate a junction element for it
@@ -106,9 +97,7 @@ def test_junction_build_remote(cli, tmpdir, datafiles):
result.assert_success()
# track target to ensure we have refs
- result = cli.run(
- project=project, args=["source", "track", "--deps", "all", "composed.bst"]
- )
+ result = cli.run(project=project, args=["source", "track", "--deps", "all", "composed.bst"])
result.assert_success()
# build
diff --git a/tests/remoteexecution/partial.py b/tests/remoteexecution/partial.py
index 13b6ff853..9a9357b60 100644
--- a/tests/remoteexecution/partial.py
+++ b/tests/remoteexecution/partial.py
@@ -5,9 +5,7 @@ import os
import pytest
from buildstream._exceptions import ErrorDomain
-from buildstream.testing import (
- cli_remote_execution as cli,
-) # pylint: disable=unused-import
+from buildstream.testing import cli_remote_execution as cli # pylint: disable=unused-import
from buildstream.testing.integration import assert_contains
from tests.testutils.artifactshare import create_artifact_share
@@ -24,9 +22,7 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("pull_artifact_files", [True, False])
@pytest.mark.parametrize("build_all", [True, False])
-def test_build_dependency_partial_local_cas(
- cli, datafiles, pull_artifact_files, build_all
-):
+def test_build_dependency_partial_local_cas(cli, datafiles, pull_artifact_files, build_all):
project = str(datafiles)
element_name = "no-runtime-deps.bst"
builddep_element_name = "autotools/amhello.bst"
@@ -45,10 +41,7 @@ def test_build_dependency_partial_local_cas(
result.assert_success()
# Verify artifact is pulled bar files when ensure artifact files is set
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
if pull_artifact_files:
result.assert_success()
assert_contains(checkout, ["/test"])
@@ -57,14 +50,7 @@ def test_build_dependency_partial_local_cas(
# Verify build dependencies is pulled for ALL and BUILD
result = cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- builddep_element_name,
- "--directory",
- builddep_checkout,
- ],
+ project=project, args=["artifact", "checkout", builddep_element_name, "--directory", builddep_checkout,],
)
if build_all and pull_artifact_files:
result.assert_success()
diff --git a/tests/remoteexecution/simple.py b/tests/remoteexecution/simple.py
index a0625038e..36371b1f3 100644
--- a/tests/remoteexecution/simple.py
+++ b/tests/remoteexecution/simple.py
@@ -4,9 +4,7 @@
import os
import pytest
-from buildstream.testing import (
- cli_remote_execution as cli,
-) # pylint: disable=unused-import
+from buildstream.testing import cli_remote_execution as cli # pylint: disable=unused-import
from buildstream.testing.integration import assert_contains
@@ -29,10 +27,7 @@ def test_remote_autotools_build(cli, datafiles):
result = cli.run(project=project, args=["build", element_name])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", element_name, "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout],)
result.assert_success()
assert_contains(
diff --git a/tests/sandboxes/missing_dependencies.py b/tests/sandboxes/missing_dependencies.py
index 16754747e..b4967727f 100644
--- a/tests/sandboxes/missing_dependencies.py
+++ b/tests/sandboxes/missing_dependencies.py
@@ -12,9 +12,7 @@ from buildstream.testing import cli # pylint: disable=unused-import
# Project directory
-DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), "missing-dependencies",
-)
+DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "missing-dependencies",)
def _symlink_host_tools_to_dir(host_tools, dir_):
@@ -44,9 +42,7 @@ def test_missing_bwrap_has_nice_error_message(cli, datafiles, tmp_path):
# Build without access to host tools, this should fail with a nice error
result = cli.run(
- project=project,
- args=["build", "element.bst"],
- env={"PATH": str(bin_dir), "BST_FORCE_SANDBOX": None},
+ project=project, args=["build", "element.bst"], env={"PATH": str(bin_dir), "BST_FORCE_SANDBOX": None},
)
result.assert_task_error(ErrorDomain.SANDBOX, "unavailable-local-sandbox")
assert "not found" in result.stderr
diff --git a/tests/sandboxes/remote-exec-config.py b/tests/sandboxes/remote-exec-config.py
index 889448954..623dcf1b5 100644
--- a/tests/sandboxes/remote-exec-config.py
+++ b/tests/sandboxes/remote-exec-config.py
@@ -9,9 +9,7 @@ from buildstream import _yaml
from buildstream._exceptions import ErrorDomain, LoadErrorReason
from buildstream.testing.runcli import cli # pylint: disable=unused-import
-DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), "remote-exec-config"
-)
+DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "remote-exec-config")
# Tests that we get a useful error message when supplying invalid
# remote execution configurations.
@@ -38,9 +36,7 @@ def test_old_and_new_configs(cli, datafiles):
#
# This does not happen for a simple `bst show`.
result = cli.run(project=project, args=["artifact", "pull", "element.bst"])
- result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA, "specify one"
- )
+ result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA, "specify one")
# Assert that if either the client key or client cert is specified
@@ -48,8 +44,7 @@ def test_old_and_new_configs(cli, datafiles):
# instead of an unhandled exception.
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
- "config_key, config_value",
- [("client-cert", "client.crt"), ("client-key", "client.key")],
+ "config_key, config_value", [("client-cert", "client.crt"), ("client-key", "client.key")],
)
def test_missing_certs(cli, datafiles, config_key, config_value):
project = os.path.join(datafiles.dirname, datafiles.basename, "missing-certs")
@@ -58,10 +53,7 @@ def test_missing_certs(cli, datafiles, config_key, config_value):
"name": "test",
"remote-execution": {
"execution-service": {"url": "http://localhost:8088"},
- "storage-service": {
- "url": "http://charactron:11001",
- config_key: config_value,
- },
+ "storage-service": {"url": "http://charactron:11001", config_key: config_value,},
},
}
project_conf_file = os.path.join(project, "project.conf")
@@ -71,9 +63,7 @@ def test_missing_certs(cli, datafiles, config_key, config_value):
#
# This does not happen for a simple `bst show`.
result = cli.run(project=project, args=["show", "element.bst"])
- result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA, "Your config is missing"
- )
+ result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA, "Your config is missing")
# Assert that if incomplete information is supplied we get a sensible error message.
@@ -89,6 +79,4 @@ def test_empty_config(cli, datafiles):
#
# This does not happen for a simple `bst show`.
result = cli.run(project=project, args=["artifact", "pull", "element.bst"])
- result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA, "specify one"
- )
+ result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA, "specify one")
diff --git a/tests/sandboxes/selection.py b/tests/sandboxes/selection.py
index 70fbdac70..9275961ce 100644
--- a/tests/sandboxes/selection.py
+++ b/tests/sandboxes/selection.py
@@ -43,11 +43,7 @@ def test_force_sandbox(cli, datafiles):
_yaml.roundtrip_dump(element, element_path)
# Build without access to host tools, this will fail
- result = cli.run(
- project=project,
- args=["build", "element.bst"],
- env={"PATH": "", "BST_FORCE_SANDBOX": "bwrap"},
- )
+ result = cli.run(project=project, args=["build", "element.bst"], env={"PATH": "", "BST_FORCE_SANDBOX": "bwrap"},)
result.assert_main_error(ErrorDomain.PLATFORM, None)
assert "Bubblewrap not found" in result.stderr
# we have asked for a spesific sand box, but it is not avalble so
diff --git a/tests/sourcecache/cache.py b/tests/sourcecache/cache.py
index a4878e73d..bbc3d8329 100644
--- a/tests/sourcecache/cache.py
+++ b/tests/sourcecache/cache.py
@@ -108,9 +108,7 @@ def test_source_cache_key(cli, datafiles):
assert len(os.listdir(patch_protos)) == 1
# modify hello-patch file and check tracking updates refs
- with open(
- os.path.join(file_path, "dev-files", "usr", "include", "pony.h"), "a"
- ) as f:
+ with open(os.path.join(file_path, "dev-files", "usr", "include", "pony.h"), "a") as f:
f.write("\nappending nonsense")
res = cli.run(project=project_dir, args=["source", "track", element_name])
diff --git a/tests/sourcecache/config.py b/tests/sourcecache/config.py
index aaf46459e..ab1cf2f03 100644
--- a/tests/sourcecache/config.py
+++ b/tests/sourcecache/config.py
@@ -37,19 +37,14 @@ DATA_DIR = os.path.dirname(os.path.realpath(__file__))
# instead of an unhandled exception.
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
- "config_key, config_value",
- [("client-cert", "client.crt"), ("client-key", "client.key")],
+ "config_key, config_value", [("client-cert", "client.crt"), ("client-key", "client.key")],
)
def test_missing_certs(cli, datafiles, config_key, config_value):
project = os.path.join(datafiles.dirname, datafiles.basename, "missing-certs")
project_conf = {
"name": "test",
- "source-caches": {
- "url": "https://cache.example.com:12345",
- "push": "true",
- config_key: config_value,
- },
+ "source-caches": {"url": "https://cache.example.com:12345", "push": "true", config_key: config_value,},
}
project_conf_file = os.path.join(project, "project.conf")
_yaml.roundtrip_dump(project_conf, project_conf_file)
diff --git a/tests/sourcecache/fetch.py b/tests/sourcecache/fetch.py
index bc3f32e66..0c347ebbf 100644
--- a/tests/sourcecache/fetch.py
+++ b/tests/sourcecache/fetch.py
@@ -92,9 +92,7 @@ def test_source_fetch(cli, tmpdir, datafiles):
res = cli.run(project=project_dir, args=["build", element_name])
res.assert_success()
- assert (
- os.listdir(os.path.join(str(tmpdir), "cache", "sources", "git")) != []
- )
+ assert os.listdir(os.path.join(str(tmpdir), "cache", "sources", "git")) != []
# get root digest of source
sourcecache = context.sourcecache
@@ -115,9 +113,7 @@ def test_source_fetch(cli, tmpdir, datafiles):
# check that we have the source in the cas now and it's not fetched
assert element._source_cached()
- assert (
- os.listdir(os.path.join(str(tmpdir), "cache", "sources", "git")) == []
- )
+ assert os.listdir(os.path.join(str(tmpdir), "cache", "sources", "git")) == []
@pytest.mark.datafiles(DATA_DIR)
@@ -145,13 +141,9 @@ def test_fetch_fallback(cli, tmpdir, datafiles):
res.assert_success()
brief_key = source._get_brief_display_key()
assert (
- "Remote source service ({}) does not have source {} cached".format(
- share.repo, brief_key
- )
- ) in res.stderr
- assert (
- "SUCCESS Fetching from {}".format(repo.source_config(ref=ref)["url"])
+ "Remote source service ({}) does not have source {} cached".format(share.repo, brief_key)
) in res.stderr
+ assert ("SUCCESS Fetching from {}".format(repo.source_config(ref=ref)["url"])) in res.stderr
# Check that the source in both in the source dir and the local CAS
assert element._source_cached()
@@ -210,9 +202,7 @@ def test_source_pull_partial_fallback_fetch(cli, tmpdir, datafiles):
res = cli.run(project=project_dir, args=["build", element_name])
res.assert_success()
- assert (
- os.listdir(os.path.join(str(tmpdir), "cache", "sources", "git")) != []
- )
+ assert os.listdir(os.path.join(str(tmpdir), "cache", "sources", "git")) != []
# get root digest of source
sourcecache = context.sourcecache
@@ -221,9 +211,7 @@ def test_source_pull_partial_fallback_fetch(cli, tmpdir, datafiles):
move_local_cas_to_remote_source_share(str(cache_dir), share.directory)
# Remove the cas content, only keep the proto and such around
- shutil.rmtree(
- os.path.join(str(tmpdir), "sourceshare", "repo", "cas", "objects")
- )
+ shutil.rmtree(os.path.join(str(tmpdir), "sourceshare", "repo", "cas", "objects"))
# check the share doesn't have the object
assert not share.has_object(digest)
@@ -234,6 +222,4 @@ def test_source_pull_partial_fallback_fetch(cli, tmpdir, datafiles):
res = cli.run(project=project_dir, args=["source", "fetch", element_name])
res.assert_success()
- assert (
- "SUCCESS Fetching from {}".format(repo.source_config(ref=ref)["url"])
- ) in res.stderr
+ assert ("SUCCESS Fetching from {}".format(repo.source_config(ref=ref)["url"])) in res.stderr
diff --git a/tests/sourcecache/source-checkout.py b/tests/sourcecache/source-checkout.py
index 2d2e71565..f5096cefb 100644
--- a/tests/sourcecache/source-checkout.py
+++ b/tests/sourcecache/source-checkout.py
@@ -49,10 +49,7 @@ def test_source_checkout(tmpdir, datafiles, cli):
repo = create_element_size("target.bst", project_dir, element_path, [], 100000)
# check implicit fetching
- res = cli.run(
- project=project_dir,
- args=["source", "checkout", "--directory", target_dir, "target.bst"],
- )
+ res = cli.run(project=project_dir, args=["source", "checkout", "--directory", target_dir, "target.bst"],)
res.assert_success()
assert "Fetching from" in res.stderr
@@ -62,10 +59,7 @@ def test_source_checkout(tmpdir, datafiles, cli):
shutil.rmtree(target_dir)
shutil.rmtree(source_dir)
- res = cli.run(
- project=project_dir,
- args=["source", "checkout", "--directory", target_dir, "target.bst"],
- )
+ res = cli.run(project=project_dir, args=["source", "checkout", "--directory", target_dir, "target.bst"],)
res.assert_success()
assert "Fetching from" not in res.stderr
@@ -73,8 +67,5 @@ def test_source_checkout(tmpdir, datafiles, cli):
shutil.rmtree(target_dir)
shutil.rmtree(os.path.join(cache_dir, "cas"))
- res = cli.run(
- project=project_dir,
- args=["source", "checkout", "--directory", target_dir, "target.bst"],
- )
+ res = cli.run(project=project_dir, args=["source", "checkout", "--directory", target_dir, "target.bst"],)
res.assert_task_error(ErrorDomain.PLUGIN, None)
diff --git a/tests/sourcecache/workspace.py b/tests/sourcecache/workspace.py
index 3b6e265e9..42661edeb 100644
--- a/tests/sourcecache/workspace.py
+++ b/tests/sourcecache/workspace.py
@@ -55,10 +55,7 @@ def test_workspace_source_fetch(tmpdir, datafiles, cli):
shutil.rmtree(source_dir)
# Open a workspace and check that fetches the original sources
- res = cli.run(
- project=project_dir,
- args=["workspace", "open", "target.bst", "--directory", workspace],
- )
+ res = cli.run(project=project_dir, args=["workspace", "open", "target.bst", "--directory", workspace],)
res.assert_success()
assert "Fetching from" in res.stderr
@@ -75,11 +72,7 @@ def test_workspace_open_no_source_push(tmpdir, datafiles, cli):
with create_artifact_share(share_dir) as share:
cli.configure(
- {
- "cachedir": cache_dir,
- "scheduler": {"pushers": 1},
- "source-caches": {"url": share.repo, "push": True,},
- }
+ {"cachedir": cache_dir, "scheduler": {"pushers": 1}, "source-caches": {"url": share.repo, "push": True,},}
)
# Fetch as in previous test and check it pushes the source
@@ -91,10 +84,7 @@ def test_workspace_open_no_source_push(tmpdir, datafiles, cli):
# clear the cas and open a workspace
shutil.rmtree(os.path.join(cache_dir, "cas"))
- res = cli.run(
- project=project_dir,
- args=["workspace", "open", "target.bst", "--directory", workspace],
- )
+ res = cli.run(project=project_dir, args=["workspace", "open", "target.bst", "--directory", workspace],)
res.assert_success()
# Check that this time it does not push the sources
diff --git a/tests/sources/bzr.py b/tests/sources/bzr.py
index 7df4d7471..694b30a7f 100644
--- a/tests/sources/bzr.py
+++ b/tests/sources/bzr.py
@@ -31,10 +31,7 @@ def test_fetch_checkout(cli, tmpdir, datafiles):
assert result.exit_code == 0
result = cli.run(project=project, args=["build", "target.bst"])
assert result.exit_code == 0
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
assert result.exit_code == 0
# Assert we checked out the file as it was commited
diff --git a/tests/sources/deb.py b/tests/sources/deb.py
index 5d1fdcceb..656a65052 100644
--- a/tests/sources/deb.py
+++ b/tests/sources/deb.py
@@ -19,9 +19,7 @@ deb_name = "a_deb.deb"
def generate_project(project_dir, tmpdir):
project_file = os.path.join(project_dir, "project.conf")
- _yaml.roundtrip_dump(
- {"name": "foo", "aliases": {"tmpdir": "file:///" + str(tmpdir)}}, project_file
- )
+ _yaml.roundtrip_dump({"name": "foo", "aliases": {"tmpdir": "file:///" + str(tmpdir)}}, project_file)
def _copy_deb(start_location, tmpdir):
@@ -102,10 +100,7 @@ def test_stage_default_basedir(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the content of the first directory is checked out (base-dir: '')
@@ -133,10 +128,7 @@ def test_stage_no_basedir(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the full content of the tarball is checked out (base-dir: '')
@@ -164,10 +156,7 @@ def test_stage_explicit_basedir(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the content of the first directory is checked out (base-dir: '')
diff --git a/tests/sources/git.py b/tests/sources/git.py
index 6a6f95364..fb8a30e3f 100644
--- a/tests/sources/git.py
+++ b/tests/sources/git.py
@@ -84,10 +84,7 @@ def test_submodule_fetch_checkout(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Assert we checked out both files at their expected location
@@ -124,10 +121,7 @@ def test_submodule_fetch_source_enable_explicit(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Assert we checked out both files at their expected location
@@ -164,10 +158,7 @@ def test_submodule_fetch_source_disable(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Assert we checked out both files at their expected location
@@ -204,10 +195,7 @@ def test_submodule_fetch_submodule_does_override(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Assert we checked out both files at their expected location
@@ -249,10 +237,7 @@ def test_submodule_fetch_submodule_individual_checkout(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Assert we checked out files at their expected location
@@ -281,9 +266,7 @@ def test_submodule_fetch_submodule_individual_checkout_explicit(cli, tmpdir, dat
# Add a submodule pointing to the one we created
repo.add_submodule("subdir", "file://" + subrepo.repo, checkout=False)
- ref = repo.add_submodule(
- "othersubdir", "file://" + other_subrepo.repo, checkout=True
- )
+ ref = repo.add_submodule("othersubdir", "file://" + other_subrepo.repo, checkout=True)
# Write out our test target
element = {
@@ -297,10 +280,7 @@ def test_submodule_fetch_submodule_individual_checkout_explicit(cli, tmpdir, dat
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Assert we checked out files at their expected location
@@ -335,10 +315,7 @@ def test_submodule_fetch_project_override(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Assert we checked out both files at their expected location
@@ -674,9 +651,7 @@ def test_track_invalid_submodule(cli, tmpdir, datafiles, fail):
@pytest.mark.skipif(HAVE_GIT is False, reason="git is not available")
@pytest.mark.datafiles(os.path.join(DATA_DIR, "template"))
@pytest.mark.parametrize("ref_format", ["sha1", "git-describe"])
-@pytest.mark.parametrize(
- "tag,extra_commit", [(False, False), (True, False), (True, True)]
-)
+@pytest.mark.parametrize("tag,extra_commit", [(False, False), (True, False), (True, True)])
def test_track_fetch(cli, tmpdir, datafiles, ref_format, tag, extra_commit):
project = str(datafiles)
@@ -781,9 +756,7 @@ def test_git_describe(cli, tmpdir, datafiles, ref_storage, tag_type):
result = cli.run(project=project, args=["source", "track", "target.bst"])
result.assert_success()
else:
- result = cli.run(
- project=project, args=["source", "track", "target.bst", "--deps", "all"]
- )
+ result = cli.run(project=project, args=["source", "track", "target.bst", "--deps", "all"])
result.assert_success()
if ref_storage == "inline":
@@ -805,31 +778,22 @@ def test_git_describe(cli, tmpdir, datafiles, ref_storage, tag_type):
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkout],)
result.assert_success()
if tag_type == "annotated":
options = []
else:
options = ["--tags"]
- describe = subprocess.check_output(
- ["git", "describe", *options], cwd=checkout, universal_newlines=True
- )
+ describe = subprocess.check_output(["git", "describe", *options], cwd=checkout, universal_newlines=True)
assert describe.startswith("tag2-2-")
describe_fp = subprocess.check_output(
- ["git", "describe", "--first-parent", *options],
- cwd=checkout,
- universal_newlines=True,
+ ["git", "describe", "--first-parent", *options], cwd=checkout, universal_newlines=True,
)
assert describe_fp.startswith("tag1-2-")
- tags = subprocess.check_output(
- ["git", "tag"], cwd=checkout, universal_newlines=True
- )
+ tags = subprocess.check_output(["git", "tag"], cwd=checkout, universal_newlines=True)
tags = set(tags.splitlines())
assert tags == set(["tag1", "tag2"])
@@ -901,9 +865,7 @@ def test_git_describe_head_is_tagged(cli, tmpdir, datafiles, ref_storage, tag_ty
result = cli.run(project=project, args=["source", "track", "target.bst"])
result.assert_success()
else:
- result = cli.run(
- project=project, args=["source", "track", "target.bst", "--deps", "all"]
- )
+ result = cli.run(project=project, args=["source", "track", "target.bst", "--deps", "all"])
result.assert_success()
if ref_storage == "inline":
@@ -926,30 +888,21 @@ def test_git_describe_head_is_tagged(cli, tmpdir, datafiles, ref_storage, tag_ty
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkout],)
result.assert_success()
if tag_type == "annotated":
options = []
else:
options = ["--tags"]
- describe = subprocess.check_output(
- ["git", "describe", *options], cwd=checkout, universal_newlines=True
- )
+ describe = subprocess.check_output(["git", "describe", *options], cwd=checkout, universal_newlines=True)
assert describe.startswith("tag")
- tags = subprocess.check_output(
- ["git", "tag"], cwd=checkout, universal_newlines=True
- )
+ tags = subprocess.check_output(["git", "tag"], cwd=checkout, universal_newlines=True)
tags = set(tags.splitlines())
assert tags == set(["tag"])
- rev_list = subprocess.check_output(
- ["git", "rev-list", "--all"], cwd=checkout, universal_newlines=True
- )
+ rev_list = subprocess.check_output(["git", "rev-list", "--all"], cwd=checkout, universal_newlines=True)
assert set(rev_list.splitlines()) == set([tagged_ref])
@@ -1013,29 +966,20 @@ def test_git_describe_relevant_history(cli, tmpdir, datafiles):
element_path = os.path.join(project, "target.bst")
_yaml.roundtrip_dump(element, element_path)
- result = cli.run(
- project=project, args=["source", "track", "target.bst", "--deps", "all"]
- )
+ result = cli.run(project=project, args=["source", "track", "target.bst", "--deps", "all"])
result.assert_success()
checkout = os.path.join(str(tmpdir), "checkout")
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkout],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkout],)
result.assert_success()
- describe = subprocess.check_output(
- ["git", "describe"], cwd=checkout, universal_newlines=True
- )
+ describe = subprocess.check_output(["git", "describe"], cwd=checkout, universal_newlines=True)
assert describe.startswith("tag1-2-")
- rev_list = subprocess.check_output(
- ["git", "rev-list", "--all"], cwd=checkout, universal_newlines=True
- )
+ rev_list = subprocess.check_output(["git", "rev-list", "--all"], cwd=checkout, universal_newlines=True)
assert set(rev_list.splitlines()) == set([head, tagged_ref, branch_boundary])
@@ -1102,9 +1046,7 @@ def test_overwrite_rogue_tag_multiple_remotes(cli, tmpdir, datafiles):
repodir, reponame = os.path.split(repo.repo)
project_config = _yaml.load(os.path.join(project, "project.conf"))
project_config["aliases"] = Node.from_dict({"repo": "http://example.com/"})
- project_config["mirrors"] = [
- {"name": "middle-earth", "aliases": {"repo": ["file://{}/".format(repodir)]}}
- ]
+ project_config["mirrors"] = [{"name": "middle-earth", "aliases": {"repo": ["file://{}/".format(repodir)]}}]
_yaml.roundtrip_dump(project_config, os.path.join(project, "project.conf"))
repo.add_annotated_tag("tag", "tag")
diff --git a/tests/sources/local.py b/tests/sources/local.py
index 08c508bfe..da68f1f75 100644
--- a/tests/sources/local.py
+++ b/tests/sources/local.py
@@ -37,9 +37,7 @@ def test_non_regular_file_or_directory(cli, datafiles):
elif os.path.isfile(localfile) and not os.path.islink(localfile):
result.assert_success()
else:
- result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND
- )
+ result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
@@ -75,10 +73,7 @@ def test_stage_file(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the checkout contains the expected file
@@ -93,10 +88,7 @@ def test_stage_directory(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the checkout contains the expected file and directory and other file
@@ -121,10 +113,7 @@ def test_stage_symlink(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the checkout contains the expected file and directory and other file
@@ -154,25 +143,18 @@ def test_stage_directory_symlink(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the checkout contains the expected directory and directory symlink
assert os.path.exists(os.path.join(checkoutdir, "subdir", "anotherfile.txt"))
- assert os.path.exists(
- os.path.join(checkoutdir, "symlink-to-subdir", "anotherfile.txt")
- )
+ assert os.path.exists(os.path.join(checkoutdir, "symlink-to-subdir", "anotherfile.txt"))
assert os.path.islink(os.path.join(checkoutdir, "symlink-to-subdir"))
@pytest.mark.integration
@pytest.mark.datafiles(os.path.join(DATA_DIR, "deterministic-umask"))
-@pytest.mark.skipif(
- not HAVE_SANDBOX, reason="Only available with a functioning sandbox"
-)
+@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
def test_deterministic_source_umask(cli, tmpdir, datafiles):
def create_test_file(*path, mode=0o644, content="content\n"):
path = os.path.join(*path)
diff --git a/tests/sources/patch.py b/tests/sources/patch.py
index da93684db..64d082797 100644
--- a/tests/sources/patch.py
+++ b/tests/sources/patch.py
@@ -33,9 +33,7 @@ def test_non_regular_file_patch(cli, datafiles):
if os.path.isfile(patch_path) and not os.path.islink(patch_path):
result.assert_success()
else:
- result.assert_main_error(
- ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND
- )
+ result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
@@ -70,10 +68,7 @@ def test_stage_and_patch(cli, tmpdir, datafiles):
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Test the file.txt was patched and changed
@@ -109,10 +104,7 @@ def test_stage_separate_patch_dir(cli, tmpdir, datafiles):
# Track, fetch, build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Test the file.txt was patched and changed
@@ -128,10 +120,7 @@ def test_stage_multiple_patches(cli, tmpdir, datafiles):
# Track, fetch, build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Test the file.txt was patched and changed
@@ -147,10 +136,7 @@ def test_patch_strip_level(cli, tmpdir, datafiles):
# Track, fetch, build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Test the file.txt was patched and changed
diff --git a/tests/sources/previous_source_access.py b/tests/sources/previous_source_access.py
index 3ff91b5fd..c42a9a6fb 100644
--- a/tests/sources/previous_source_access.py
+++ b/tests/sources/previous_source_access.py
@@ -7,9 +7,7 @@ import pytest
from buildstream import _yaml
from buildstream.testing import cli # pylint: disable=unused-import
-DATA_DIR = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), "previous_source_access"
-)
+DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "previous_source_access")
##################################################################
@@ -38,10 +36,7 @@ def test_custom_transform_source(cli, datafiles):
# Ensure we get correct output from foo_transform
cli.run(project=project, args=["build", "target.bst"])
destpath = os.path.join(cli.directory, "checkout")
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", destpath],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", destpath],)
result.assert_success()
# Assert that files from both sources exist, and that they have
# the same content
diff --git a/tests/sources/previous_source_access/plugins/sources/foo_transform.py b/tests/sources/previous_source_access/plugins/sources/foo_transform.py
index d59eaeca7..9e6ef3ad4 100644
--- a/tests/sources/previous_source_access/plugins/sources/foo_transform.py
+++ b/tests/sources/previous_source_access/plugins/sources/foo_transform.py
@@ -80,8 +80,7 @@ class FooTransformSource(Source):
def stage(self, directory):
# Simply stage the "filetransform" file
utils.safe_copy(
- os.path.join(self.mirror, "filetransform"),
- os.path.join(directory, "filetransform"),
+ os.path.join(self.mirror, "filetransform"), os.path.join(directory, "filetransform"),
)
diff --git a/tests/sources/remote.py b/tests/sources/remote.py
index 685f6bfba..a02601215 100644
--- a/tests/sources/remote.py
+++ b/tests/sources/remote.py
@@ -15,16 +15,12 @@ DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "remote",)
def generate_project(project_dir, tmpdir):
project_file = os.path.join(project_dir, "project.conf")
- _yaml.roundtrip_dump(
- {"name": "foo", "aliases": {"tmpdir": "file:///" + str(tmpdir)}}, project_file
- )
+ _yaml.roundtrip_dump({"name": "foo", "aliases": {"tmpdir": "file:///" + str(tmpdir)}}, project_file)
def generate_project_file_server(server, project_dir):
project_file = os.path.join(project_dir, "project.conf")
- _yaml.roundtrip_dump(
- {"name": "foo", "aliases": {"tmpdir": server.base_url()}}, project_file
- )
+ _yaml.roundtrip_dump({"name": "foo", "aliases": {"tmpdir": server.base_url()}}, project_file)
# Test that without ref, consistency is set appropriately.
@@ -74,10 +70,7 @@ def test_simple_file_build(cli, tmpdir, datafiles):
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Note that the url of the file in target.bst is actually /dir/file
# but this tests confirms we take the basename
@@ -104,10 +97,7 @@ def test_simple_file_custom_name_build(cli, tmpdir, datafiles):
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
assert not os.path.exists(os.path.join(checkoutdir, "file"))
assert os.path.exists(os.path.join(checkoutdir, "custom-file"))
@@ -120,9 +110,7 @@ def test_unique_key(cli, tmpdir, datafiles):
"""
project = str(datafiles)
generate_project(project, tmpdir)
- states = cli.get_element_states(
- project, ["target.bst", "target-custom.bst", "target-custom-executable.bst"]
- )
+ states = cli.get_element_states(project, ["target.bst", "target-custom.bst", "target-custom-executable.bst"])
assert states["target.bst"] == "fetch needed"
assert states["target-custom.bst"] == "fetch needed"
assert states["target-custom-executable.bst"] == "fetch needed"
@@ -131,9 +119,7 @@ def test_unique_key(cli, tmpdir, datafiles):
cli.run(project=project, args=["source", "fetch", "target.bst"])
# We should download the file only once
- states = cli.get_element_states(
- project, ["target.bst", "target-custom.bst", "target-custom-executable.bst"]
- )
+ states = cli.get_element_states(project, ["target.bst", "target-custom.bst", "target-custom-executable.bst"])
assert states["target.bst"] == "buildable"
assert states["target-custom.bst"] == "buildable"
assert states["target-custom-executable.bst"] == "buildable"
@@ -153,21 +139,12 @@ def test_executable(cli, tmpdir, datafiles):
project = str(datafiles)
generate_project(project, tmpdir)
checkoutdir = os.path.join(str(tmpdir), "checkout")
- assert (
- cli.get_element_state(project, "target-custom-executable.bst") == "fetch needed"
- )
+ assert cli.get_element_state(project, "target-custom-executable.bst") == "fetch needed"
# Try to fetch it
cli.run(project=project, args=["build", "target-custom-executable.bst"])
cli.run(
- project=project,
- args=[
- "artifact",
- "checkout",
- "target-custom-executable.bst",
- "--directory",
- checkoutdir,
- ],
+ project=project, args=["artifact", "checkout", "target-custom-executable.bst", "--directory", checkoutdir,],
)
mode = os.stat(os.path.join(checkoutdir, "some-custom-file")).st_mode
assert mode & stat.S_IEXEC
@@ -200,10 +177,7 @@ def test_use_netrc(cli, datafiles, server_type, tmpdir):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
checkout_file = os.path.join(checkoutdir, "file")
diff --git a/tests/sources/tar.py b/tests/sources/tar.py
index cdd2328fd..ab493f5df 100644
--- a/tests/sources/tar.py
+++ b/tests/sources/tar.py
@@ -42,9 +42,7 @@ def _assemble_tar_lz(workingdir, srcdir, dstfile):
def generate_project(project_dir, tmpdir):
project_file = os.path.join(project_dir, "project.conf")
- _yaml.roundtrip_dump(
- {"name": "foo", "aliases": {"tmpdir": "file:///" + str(tmpdir)}}, project_file
- )
+ _yaml.roundtrip_dump({"name": "foo", "aliases": {"tmpdir": "file:///" + str(tmpdir)}}, project_file)
def generate_project_file_server(base_url, project_dir):
@@ -124,10 +122,7 @@ def test_stage_default_basedir(cli, tmpdir, datafiles, srcdir):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the content of the first directory is checked out (base-dir: '*')
@@ -156,10 +151,7 @@ def test_stage_no_basedir(cli, tmpdir, datafiles, srcdir):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the full content of the tarball is checked out (base-dir: '')
@@ -188,10 +180,7 @@ def test_stage_explicit_basedir(cli, tmpdir, datafiles, srcdir):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the content of the first directory is checked out (base-dir: '*')
@@ -214,15 +203,10 @@ def test_stage_contains_links(cli, tmpdir, datafiles):
# Create a hardlink, we wont trust git to store that info for us
os.makedirs(
- os.path.join(str(datafiles), "content", "base-directory", "subdir2"),
- exist_ok=True,
- )
- file1 = os.path.join(
- str(datafiles), "content", "base-directory", "subdir1", "file.txt"
- )
- file2 = os.path.join(
- str(datafiles), "content", "base-directory", "subdir2", "file.txt"
+ os.path.join(str(datafiles), "content", "base-directory", "subdir2"), exist_ok=True,
)
+ file1 = os.path.join(str(datafiles), "content", "base-directory", "subdir1", "file.txt")
+ file2 = os.path.join(str(datafiles), "content", "base-directory", "subdir2", "file.txt")
os.link(file1, file2)
_assemble_tar(os.path.join(str(datafiles), "content"), "base-directory", src_tar)
@@ -234,10 +218,7 @@ def test_stage_contains_links(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the content of the first directory is checked out (base-dir: '*')
@@ -266,10 +247,7 @@ def test_stage_default_basedir_lzip(cli, tmpdir, datafiles, srcdir):
result.assert_success()
result = cli.run(project=project, args=["build", "target-lz.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target-lz.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target-lz.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the content of the first directory is checked out (base-dir: '*')
@@ -297,12 +275,7 @@ def test_read_only_dir(cli, tmpdir, datafiles, tar_name, base_dir):
{
"kind": "import",
"sources": [
- {
- "kind": "tar",
- "url": "tmpdir:/{}".format(tar_file),
- "ref": "foo",
- "base-dir": base_dir,
- }
+ {"kind": "tar", "url": "tmpdir:/{}".format(tar_file), "ref": "foo", "base-dir": base_dir,}
],
},
bst_path,
@@ -326,13 +299,9 @@ def test_read_only_dir(cli, tmpdir, datafiles, tar_name, base_dir):
env = {"TMP": tmpdir_str}
# Track, fetch, build, checkout
- result = cli.run(
- project=project, args=["source", "track", "target.bst"], env=env
- )
+ result = cli.run(project=project, args=["source", "track", "target.bst"], env=env)
result.assert_success()
- result = cli.run(
- project=project, args=["source", "fetch", "target.bst"], env=env
- )
+ result = cli.run(project=project, args=["source", "fetch", "target.bst"], env=env)
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"], env=env)
result.assert_success()
@@ -382,10 +351,7 @@ def test_use_netrc(cli, datafiles, server_type, tmpdir):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
original_dir = os.path.join(str(datafiles), "content", "a")
@@ -413,9 +379,7 @@ def test_netrc_already_specified_user(cli, datafiles, server_type, tmpdir):
with create_file_server(server_type) as server:
server.add_user("otheruser", "12345", file_server_files)
parts = urllib.parse.urlsplit(server.base_url())
- base_url = urllib.parse.urlunsplit(
- [parts[0], "otheruser@{}".format(parts[1]), *parts[2:]]
- )
+ base_url = urllib.parse.urlunsplit([parts[0], "otheruser@{}".format(parts[1]), *parts[2:]])
generate_project_file_server(base_url, project)
src_tar = os.path.join(file_server_files, "a.tar.gz")
@@ -440,9 +404,7 @@ def test_homeless_environment(cli, tmpdir, datafiles):
_assemble_tar(os.path.join(str(datafiles), "content"), "a", src_tar)
# Use a track, make sure the plugin tries to find a ~/.netrc
- result = cli.run(
- project=project, args=["source", "track", "target.bst"], env={"HOME": None}
- )
+ result = cli.run(project=project, args=["source", "track", "target.bst"], env={"HOME": None})
result.assert_success()
@@ -472,9 +434,7 @@ def test_out_of_basedir_hardlinks(cli, tmpdir, datafiles):
# attributes set
with tarfile.open(src_tar, "r:gz") as tar:
assert any(
- member.islnk()
- and member.path == "contents/to_extract/a"
- and member.linkname == "contents/elsewhere/a"
+ member.islnk() and member.path == "contents/to_extract/a" and member.linkname == "contents/elsewhere/a"
for member in tar.getmembers()
)
@@ -485,10 +445,7 @@ def test_out_of_basedir_hardlinks(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
original_dir = os.path.join(str(datafiles), "contents", "to_extract")
diff --git a/tests/sources/zip.py b/tests/sources/zip.py
index d1b000167..0a5f6eed3 100644
--- a/tests/sources/zip.py
+++ b/tests/sources/zip.py
@@ -29,16 +29,12 @@ def _assemble_zip(workingdir, dstfile):
def generate_project(project_dir, tmpdir):
project_file = os.path.join(project_dir, "project.conf")
- _yaml.roundtrip_dump(
- {"name": "foo", "aliases": {"tmpdir": "file:///" + str(tmpdir)}}, project_file
- )
+ _yaml.roundtrip_dump({"name": "foo", "aliases": {"tmpdir": "file:///" + str(tmpdir)}}, project_file)
def generate_project_file_server(server, project_dir):
project_file = os.path.join(project_dir, "project.conf")
- _yaml.roundtrip_dump(
- {"name": "foo", "aliases": {"tmpdir": server.base_url()}}, project_file
- )
+ _yaml.roundtrip_dump({"name": "foo", "aliases": {"tmpdir": server.base_url()}}, project_file)
# Test that without ref, consistency is set appropriately.
@@ -112,10 +108,7 @@ def test_stage_default_basedir(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the content of the first directory is checked out (base-dir: '*')
@@ -143,10 +136,7 @@ def test_stage_no_basedir(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the full content of the tarball is checked out (base-dir: '')
@@ -174,10 +164,7 @@ def test_stage_explicit_basedir(cli, tmpdir, datafiles):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
# Check that the content of the first directory is checked out (base-dir: '*')
@@ -219,10 +206,7 @@ def test_use_netrc(cli, datafiles, server_type, tmpdir):
result.assert_success()
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
- result = cli.run(
- project=project,
- args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],
- )
+ result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir],)
result.assert_success()
original_dir = os.path.join(str(datafiles), "content", "a")
diff --git a/tests/testutils/artifactshare.py b/tests/testutils/artifactshare.py
index 39cc7da10..2038bfedd 100644
--- a/tests/testutils/artifactshare.py
+++ b/tests/testutils/artifactshare.py
@@ -80,12 +80,7 @@ class ArtifactShare:
cleanup_on_sigterm()
server = stack.enter_context(
- create_server(
- self.repodir,
- quota=self.quota,
- enable_push=True,
- index_only=self.index_only,
- )
+ create_server(self.repodir, quota=self.quota, enable_push=True, index_only=self.index_only,)
)
port = server.add_insecure_port("localhost:0")
server.start()
@@ -133,9 +128,7 @@ class ArtifactShare:
reachable = set()
def reachable_dir(digest):
- self.cas._reachable_refs_dir(
- reachable, digest, update_mtime=False, check_exists=True
- )
+ self.cas._reachable_refs_dir(reachable, digest, update_mtime=False, check_exists=True)
try:
if str(artifact_proto.files):
@@ -219,13 +212,9 @@ statvfs_result = namedtuple("statvfs_result", "f_blocks f_bfree f_bsize f_bavail
# Assert that a given artifact is in the share
#
def assert_shared(cli, share, project, element_name, *, project_name="test"):
- if not share.get_artifact(
- cli.get_artifact_name(project, project_name, element_name)
- ):
+ if not share.get_artifact(cli.get_artifact_name(project, project_name, element_name)):
raise AssertionError(
- "Artifact share at {} does not contain the expected element {}".format(
- share.repo, element_name
- )
+ "Artifact share at {} does not contain the expected element {}".format(share.repo, element_name)
)
@@ -234,7 +223,5 @@ def assert_shared(cli, share, project, element_name, *, project_name="test"):
def assert_not_shared(cli, share, project, element_name, *, project_name="test"):
if share.get_artifact(cli.get_artifact_name(project, project_name, element_name)):
raise AssertionError(
- "Artifact share at {} unexpectedly contains the element {}".format(
- share.repo, element_name
- )
+ "Artifact share at {} unexpectedly contains the element {}".format(share.repo, element_name)
)
diff --git a/tests/testutils/context.py b/tests/testutils/context.py
index 5d8294fd2..821adef0a 100644
--- a/tests/testutils/context.py
+++ b/tests/testutils/context.py
@@ -55,9 +55,7 @@ class _DummyTask:
@contextmanager
-def _get_dummy_task(
- self, activity_name, *, element_name=None, full_name=None, silent_nested=False
-):
+def _get_dummy_task(self, activity_name, *, element_name=None, full_name=None, silent_nested=False):
yield _DummyTask("state", activity_name, full_name, 0)
diff --git a/tests/testutils/http_server.py b/tests/testutils/http_server.py
index f333f28b2..8591159f8 100644
--- a/tests/testutils/http_server.py
+++ b/tests/testutils/http_server.py
@@ -44,9 +44,7 @@ class RequestHandler(SimpleHTTPRequestHandler):
body = content.encode("UTF-8", "replace")
self.send_header("Content-Type", self.error_content_type)
self.send_header("Content-Length", str(len(body)))
- self.send_header(
- "WWW-Authenticate", 'Basic realm="{}"'.format(self.server.realm)
- )
+ self.send_header("WWW-Authenticate", 'Basic realm="{}"'.format(self.server.realm))
self.end_headers()
self.end_headers()
diff --git a/tests/testutils/patch.py b/tests/testutils/patch.py
index 6dec68ca9..85b38def8 100644
--- a/tests/testutils/patch.py
+++ b/tests/testutils/patch.py
@@ -5,9 +5,7 @@ def apply(file, patch):
try:
subprocess.check_output(["patch", file, patch])
except subprocess.CalledProcessError as e:
- message = "Patch failed with exit code {}\n Output:\n {}".format(
- e.returncode, e.output
- )
+ message = "Patch failed with exit code {}\n Output:\n {}".format(e.returncode, e.output)
print(message)
raise
@@ -16,8 +14,6 @@ def remove(file, patch):
try:
subprocess.check_output(["patch", "--reverse", file, patch])
except subprocess.CalledProcessError as e:
- message = "patch --reverse failed with exit code {}\n Output:\n {}".format(
- e.returncode, e.output
- )
+ message = "patch --reverse failed with exit code {}\n Output:\n {}".format(e.returncode, e.output)
print(message)
raise
diff --git a/tests/testutils/python_repo.py b/tests/testutils/python_repo.py
index 13e9f6209..7d9ae4e47 100644
--- a/tests/testutils/python_repo.py
+++ b/tests/testutils/python_repo.py
@@ -76,11 +76,7 @@ def generate_pip_package(tmpdir, pypi, name, version="0.1", dependencies=None):
setup_file = os.path.join(tmpdir, "setup.py")
pkgdirname = re.sub("[^0-9a-zA-Z]+", "", name)
with open(setup_file, "w") as f:
- f.write(
- SETUP_TEMPLATE.format(
- name=name, version=version, pkgdirname=pkgdirname, pkgdeps=dependencies
- )
- )
+ f.write(SETUP_TEMPLATE.format(name=name, version=version, pkgdirname=pkgdirname, pkgdeps=dependencies))
os.chmod(setup_file, 0o755)
package = os.path.join(tmpdir, pkgdirname)
@@ -128,9 +124,7 @@ def setup_pypi_repo(tmpdir):
def add_packages(packages, pypi_repo):
for package, dependencies in packages.items():
pkgdir = create_pkgdir(package)
- generate_pip_package(
- pkgdir, pypi_repo, package, dependencies=list(dependencies.keys())
- )
+ generate_pip_package(pkgdir, pypi_repo, package, dependencies=list(dependencies.keys()))
for dependency, dependency_dependencies in dependencies.items():
add_packages({dependency: dependency_dependencies}, pypi_repo)
diff --git a/tests/testutils/repo/bzr.py b/tests/testutils/repo/bzr.py
index 246a3eb35..f5d8653b6 100644
--- a/tests/testutils/repo/bzr.py
+++ b/tests/testutils/repo/bzr.py
@@ -29,9 +29,7 @@ class Bzr(Repo):
self.copy_directory(directory, branch_dir)
subprocess.call([self.bzr, "add", "."], env=self.env, cwd=branch_dir)
subprocess.call(
- [self.bzr, "commit", '--message="Initial commit"'],
- env=self.env,
- cwd=branch_dir,
+ [self.bzr, "commit", '--message="Initial commit"'], env=self.env, cwd=branch_dir,
)
return self.latest_commit()
@@ -45,13 +43,7 @@ class Bzr(Repo):
def latest_commit(self):
return subprocess.check_output(
- [
- self.bzr,
- "version-info",
- "--custom",
- "--template={revno}",
- os.path.join(self.repo, "trunk"),
- ],
+ [self.bzr, "version-info", "--custom", "--template={revno}", os.path.join(self.repo, "trunk"),],
env=self.env,
universal_newlines=True,
).strip()
diff --git a/tests/testutils/repo/git.py b/tests/testutils/repo/git.py
index 19ab91601..b9360e9cd 100644
--- a/tests/testutils/repo/git.py
+++ b/tests/testutils/repo/git.py
@@ -54,9 +54,7 @@ class Git(Repo):
def modify_file(self, new_file, path):
shutil.copy(new_file, os.path.join(self.repo, path))
- self._run_git(
- "commit", path, "-m", "Modified {}".format(os.path.basename(path))
- )
+ self._run_git("commit", path, "-m", "Modified {}".format(os.path.basename(path)))
return self.latest_commit()
def add_submodule(self, subdir, url=None, checkout=None):
@@ -92,9 +90,7 @@ class Git(Repo):
return config
def latest_commit(self):
- return self._run_git(
- "rev-parse", "HEAD", stdout=subprocess.PIPE, universal_newlines=True,
- ).stdout.strip()
+ return self._run_git("rev-parse", "HEAD", stdout=subprocess.PIPE, universal_newlines=True,).stdout.strip()
def branch(self, branch_name):
self._run_git("checkout", "-b", branch_name)
@@ -110,6 +106,4 @@ class Git(Repo):
return self.latest_commit()
def rev_parse(self, rev):
- return self._run_git(
- "rev-parse", rev, stdout=subprocess.PIPE, universal_newlines=True,
- ).stdout.strip()
+ return self._run_git("rev-parse", rev, stdout=subprocess.PIPE, universal_newlines=True,).stdout.strip()
diff --git a/tests/testutils/setuptools.py b/tests/testutils/setuptools.py
index 119979da6..0f7f30f91 100644
--- a/tests/testutils/setuptools.py
+++ b/tests/testutils/setuptools.py
@@ -10,9 +10,7 @@ class MockDist:
self.module_name = module_name
def get_resource_filename(self, *_args, **_kwargs):
- return os.path.join(
- self.datafiles.dirname, self.datafiles.basename, self.module_name
- )
+ return os.path.join(self.datafiles.dirname, self.datafiles.basename, self.module_name)
# A mock setuptools entry object.