summaryrefslogtreecommitdiff
path: root/tests/frontend/order.py
blob: 5eb5b299d26523bee4ebf0cb99e19b8cdcce56f4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name

import os

import pytest
from buildstream.plugintestutils import create_repo
from buildstream.plugintestutils import cli  # pylint: disable=unused-import
from buildstream import _yaml

# Project directory
DATA_DIR = os.path.join(
    os.path.dirname(os.path.realpath(__file__)),
    "project",
)


# create_element()
#
# Args:
#    project (str): The project directory where testing is happening
#    name (str): The element name to create
#    dependencies (list): The list of dependencies to dump into YAML format
#
# Returns:
#    (Repo): The corresponding git repository created for the element
def create_element(project, name, dependencies):
    dev_files_path = os.path.join(project, 'files', 'dev-files')
    element_path = os.path.join(project, 'elements')
    repo = create_repo('git', project, "{}-repo".format(name))
    ref = repo.create(dev_files_path)

    element = {
        'kind': 'import',
        'sources': [
            repo.source_config(ref=ref)
        ],
        'depends': dependencies
    }
    _yaml.dump(element, os.path.join(element_path, name))

    return repo


# This tests a variety of scenarios and checks that the order in
# which things are processed remains stable.
#
# This is especially important in order to ensure that our
# depth sorting and optimization of which elements should be
# processed first is doing it's job right, and that we are
# promoting elements to the build queue as soon as possible
#
# Parameters:
#    targets (target elements): The targets to invoke bst with
#    template (dict): The project template dictionary, for create_element()
#    expected (list): A list of element names in the expected order
#
@pytest.mark.datafiles(os.path.join(DATA_DIR))
@pytest.mark.parametrize("target,template,expected", [
    # First simple test
    ('3.bst', {
        '0.bst': ['1.bst'],
        '1.bst': [],
        '2.bst': ['0.bst'],
        '3.bst': ['0.bst', '1.bst', '2.bst']
    }, ['1.bst', '0.bst', '2.bst', '3.bst']),

    # A more complicated test with build of build dependencies
    ('target.bst', {
        'a.bst': [],
        'base.bst': [],
        'timezones.bst': [],
        'middleware.bst': [{'filename': 'base.bst', 'type': 'build'}],
        'app.bst': [{'filename': 'middleware.bst', 'type': 'build'}],
        'target.bst': ['a.bst', 'base.bst', 'middleware.bst', 'app.bst', 'timezones.bst']
    }, ['base.bst', 'middleware.bst', 'a.bst', 'app.bst', 'timezones.bst', 'target.bst']),
])
@pytest.mark.parametrize("operation", [('show'), ('fetch'), ('build')])
def test_order(cli, datafiles, operation, target, template, expected):
    project = str(datafiles)

    # Configure to only allow one fetcher at a time, make it easy to
    # determine what is being planned in what order.
    cli.configure({
        'scheduler': {
            'fetchers': 1,
            'builders': 1
        }
    })

    # Build the project from the template, make import elements
    # all with the same repo
    #
    for element, dependencies in template.items():
        create_element(project, element, dependencies)

    # Run test and collect results
    if operation == 'show':
        result = cli.run(args=['show', '--deps', 'plan', '--format', '%{name}', target], project=project, silent=True)
        result.assert_success()
        results = result.output.splitlines()
    else:
        if operation == 'fetch':
            result = cli.run(args=['source', 'fetch', target], project=project, silent=True)
        else:
            result = cli.run(args=[operation, target], project=project, silent=True)
        result.assert_success()
        results = result.get_start_order(operation)

    # Assert the order
    print("Expected order: {}".format(expected))
    print("Observed result order: {}".format(results))
    assert results == expected