1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
|
# Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import pexpect
import pytest
from buildstream.testing import runcli
from buildstream.testing._utils.site import HAVE_SANDBOX
from tests.testutils.constants import PEXPECT_TIMEOUT_SHORT, PEXPECT_TIMEOUT_LONG
pytestmark = pytest.mark.integration
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
# This fixture launches a `bst build` of given element, and returns a
# `pexpect.spawn` object for the interactive session.
@pytest.fixture
def build_session(datafiles, element_name):
project = str(datafiles)
# Spawn interactive session using `configured()` context manager in order
# to get the same config file as the `cli` fixture.
with runcli.configured(project) as config_file:
session = pexpect.spawn(
"bst",
["--directory", project, "--config", config_file, "--no-colors", "build", element_name,],
timeout=PEXPECT_TIMEOUT_SHORT,
)
yield session
# Verify that BuildStream exits cleanly on any of the following choices.
#
# In our simple test case, there is no practical difference between the
# following choices. In future, we'd like to test their behavior separately.
# Currently, this just verifies that BuildStream doesn't choke on any of these
# choices.
@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("element_name", ["interactive/failed-build.bst"])
@pytest.mark.parametrize("choice", ["continue", "quit", "terminate", "retry"])
def test_failed_build_quit(element_name, build_session, choice):
build_session.expect_exact("Choice: [continue]:", timeout=PEXPECT_TIMEOUT_LONG)
build_session.sendline(choice)
build_session.expect_exact(pexpect.EOF)
build_session.close()
assert build_session.exitstatus == 255
@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("element_name", ["interactive/failed-build.bst"])
def test_failed_build_log(element_name, build_session):
build_session.expect_exact("Choice: [continue]:", timeout=PEXPECT_TIMEOUT_LONG)
build_session.sendline("log")
# Send a few carriage returns to get to the end of the pager
build_session.sendline(os.linesep * 20)
# Assert that we got something from the logs
build_session.expect("FAILURE interactive/failed-build.bst: Running (build-)?commands")
# Quit the pager
build_session.send("q")
# Quit the session
build_session.expect_exact("Choice: [continue]:")
build_session.sendline("quit")
build_session.expect_exact(pexpect.EOF)
build_session.close()
assert build_session.exitstatus == 255
@pytest.mark.skipif(not HAVE_SANDBOX, reason="Only available with a functioning sandbox")
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("element_name", ["interactive/failed-build.bst"])
def test_failed_build_shell(element_name, build_session):
build_session.expect_exact("Choice: [continue]:", timeout=PEXPECT_TIMEOUT_LONG)
build_session.sendline("shell")
# Wait for shell prompt
build_session.expect_exact("interactive/failed-build.bst:/buildstream/test/interactive/failed-build.bst]")
# Verify that we have our sources
build_session.sendline("ls")
build_session.expect_exact("test.txt")
# Quit the shell
build_session.sendline("exit")
# Quit the session
build_session.expect_exact("Choice: [continue]:", timeout=PEXPECT_TIMEOUT_LONG)
build_session.sendline("quit")
build_session.expect_exact(pexpect.EOF)
build_session.close()
assert build_session.exitstatus == 255
|