diff options
-rw-r--r-- | .zuul.yaml | 19 | ||||
-rw-r--r-- | nova/api/openstack/wsgi_app.py | 24 | ||||
-rw-r--r-- | nova/cmd/manage.py | 2 | ||||
-rw-r--r-- | nova/console/websocketproxy.py | 18 | ||||
-rw-r--r-- | nova/db/sqlalchemy/api.py | 221 | ||||
-rw-r--r-- | nova/network/neutron.py | 1 | ||||
-rw-r--r-- | nova/test.py | 23 | ||||
-rw-r--r-- | nova/tests/functional/db/test_archive.py | 59 | ||||
-rw-r--r-- | nova/tests/functional/test_nova_manage.py | 2 | ||||
-rw-r--r-- | nova/tests/unit/api/openstack/test_wsgi_app.py | 85 | ||||
-rw-r--r-- | nova/tests/unit/console/test_websocketproxy.py | 67 | ||||
-rw-r--r-- | nova/tests/unit/db/test_db_api.py | 50 | ||||
-rw-r--r-- | nova/tests/unit/network/test_neutron.py | 2 | ||||
-rw-r--r-- | nova/tests/unit/test_utils.py | 98 | ||||
-rw-r--r-- | nova/tests/unit/virt/libvirt/test_vif.py | 48 | ||||
-rw-r--r-- | nova/utils.py | 46 | ||||
-rw-r--r-- | nova/virt/libvirt/vif.py | 3 | ||||
-rw-r--r-- | releasenotes/notes/bug-1939604-547c729b7741831b.yaml | 5 | ||||
-rw-r--r-- | releasenotes/notes/console-proxy-reject-open-redirect-4ac0a7895acca7eb.yaml | 19 | ||||
-rwxr-xr-x | tools/check-cherry-picks.sh | 5 | ||||
-rw-r--r-- | tox.ini | 18 |
21 files changed, 671 insertions, 144 deletions
diff --git a/.zuul.yaml b/.zuul.yaml index 2ec47a6f30..675283d51f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -79,6 +79,17 @@ timeout: 3600 - job: + name: nova-tox-validate-backport + parent: openstack-tox + description: | + Determine whether a backport is ready to be merged by checking whether it + has already been merged to master or more recent stable branches. + + Uses tox with the ``validate-backport`` environment. + vars: + tox_envlist: validate-backport + +- job: name: nova-live-migration parent: tempest-multinode-full-py3 description: | @@ -373,7 +384,6 @@ - check-requirements - integrated-gate-compute - openstack-cover-jobs - - openstack-lower-constraints-jobs - openstack-python3-ussuri-jobs - periodic-stable-jobs - publish-openstack-docs-pti @@ -400,6 +410,8 @@ - nova-multi-cell - nova-next - nova-tox-functional-py36 + - nova-tox-validate-backport: + voting: false - tempest-integrated-compute: # NOTE(gmann): Policies changes do not need to run all the # integration test jobs. Running only tempest and grenade @@ -419,6 +431,8 @@ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ + - openstack-tox-lower-constraints: + voting: false - nova-grenade-multinode: irrelevant-files: *policies-irrelevant-files - tempest-ipv6-only: @@ -434,8 +448,11 @@ - nova-tox-functional-py36 - nova-multi-cell - nova-next + - nova-tox-validate-backport - tempest-integrated-compute: irrelevant-files: *policies-irrelevant-files + - openstack-tox-lower-constraints: + voting: false - nova-grenade-multinode: irrelevant-files: *policies-irrelevant-files - tempest-ipv6-only: diff --git a/nova/api/openstack/wsgi_app.py b/nova/api/openstack/wsgi_app.py index 481ce71834..aa9af49743 100644 --- a/nova/api/openstack/wsgi_app.py +++ b/nova/api/openstack/wsgi_app.py @@ -30,6 +30,8 @@ CONF = cfg.CONF CONFIG_FILES = ['api-paste.ini', 'nova.conf'] +LOG = logging.getLogger(__name__) + objects.register_all() @@ -78,8 +80,12 @@ def error_application(exc, name): return application -def init_application(name): - conf_files = _get_config_files() +@utils.run_once('Global data already initialized, not re-initializing.', + LOG.info) +def init_global_data(conf_files): + # NOTE(melwitt): parse_args initializes logging and calls global rpc.init() + # and db_api.configure(). The db_api.configure() call does not initiate any + # connection to the database. config.parse_args([], default_config_files=conf_files) logging.setup(CONF, "nova") @@ -94,11 +100,25 @@ def init_application(name): logging.getLogger(__name__), logging.DEBUG) + +def init_application(name): + conf_files = _get_config_files() + + # NOTE(melwitt): The init_application method can be called multiple times + # within a single python interpreter instance if any exception is raised + # during it (example: DBConnectionError while setting up the service) and + # apache/mod_wsgi reloads the init_application script. So, we initialize + # global data separately and decorate the method to run only once in a + # python interpreter instance. + init_global_data(conf_files) + try: _setup_service(CONF.host, name) except exception.ServiceTooOld as exc: return error_application(exc, name) + # This global init is safe because if we got here, we already successfully + # set up the service and setting up the profile cannot fail. service.setup_profiler(name, CONF.host) conf = conf_files[0] diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py index 2e4e135ee0..cbf6cb5e45 100644 --- a/nova/cmd/manage.py +++ b/nova/cmd/manage.py @@ -2652,7 +2652,7 @@ class PlacementCommands(object): """ url = '/resource_providers' if 'uuid' in kwargs: - url += '&uuid=%s' % kwargs['uuid'] + url += '?uuid=%s' % kwargs['uuid'] resp = placement.get(url, global_request_id=context.global_id, version='1.14') diff --git a/nova/console/websocketproxy.py b/nova/console/websocketproxy.py index b8e9e99c38..4c5596032f 100644 --- a/nova/console/websocketproxy.py +++ b/nova/console/websocketproxy.py @@ -19,6 +19,8 @@ Leverages websockify.py by Joel Martin ''' import copy +from http import HTTPStatus +import os import socket import sys @@ -288,6 +290,22 @@ class NovaProxyRequestHandler(websockify.ProxyRequestHandler): def socket(self, *args, **kwargs): return websockifyserver.WebSockifyServer.socket(*args, **kwargs) + def send_head(self): + # This code is copied from this example patch: + # https://bugs.python.org/issue32084#msg306545 + path = self.translate_path(self.path) + if os.path.isdir(path): + parts = urlparse.urlsplit(self.path) + if not parts.path.endswith('/'): + # Browsers interpret "Location: //uri" as an absolute URI + # like "http://URI" + if self.path.startswith('//'): + self.send_error(HTTPStatus.BAD_REQUEST, + "URI must not start with //") + return None + + return super(NovaProxyRequestHandler, self).send_head() + class NovaWebSocketProxy(websockify.WebSocketProxy): def __init__(self, *args, **kwargs): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index af1c7c3d6a..1fa7f5a322 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -4080,64 +4080,132 @@ def task_log_end_task(context, task_name, period_beginning, period_ending, ################## -def _archive_if_instance_deleted(table, shadow_table, instances, conn, - max_rows, before): - """Look for records that pertain to deleted instances, but may not be - deleted themselves. This catches cases where we delete an instance, - but leave some residue because of a failure in a cleanup path or - similar. - - Logic is: if I have a column called instance_uuid, and that instance - is deleted, then I can be deleted. - """ - - # NOTE(jake): handle instance_actions_events differently as it relies on - # instance_actions.id not instances.uuid - if table.name == "instance_actions_events": - instance_actions = models.BASE.metadata.tables["instance_actions"] - query_select = sql.select( - [table], - and_(instances.c.deleted != instances.c.deleted.default.arg, - instances.c.uuid == instance_actions.c.instance_uuid, - instance_actions.c.id == table.c.action_id)) +def _get_tables_with_fk_to_table(table): + """Get a list of tables that refer to the given table by foreign key (FK). - else: - query_select = sql.select( - [table], - and_(instances.c.deleted != instances.c.deleted.default.arg, - instances.c.uuid == table.c.instance_uuid)) - - if before: - query_select = query_select.where(instances.c.deleted_at < before) + :param table: Table object (parent) for which to find references by FK - query_select = query_select.order_by(table.c.id).limit(max_rows) - - query_insert = shadow_table.insert(inline=True).\ - from_select([c.name for c in table.c], query_select) - - delete_statement = DeleteFromSelect(table, query_select, - table.c.id) + :returns: A list of Table objects that refer to the specified table by FK + """ + tables = [] + for t in models.BASE.metadata.tables.values(): + for fk in t.foreign_keys: + if fk.references(table): + tables.append(t) + return tables + + +def _get_fk_stmts(metadata, conn, table, column, records): + """Find records related to this table by foreign key (FK) and create and + return insert/delete statements for them. + + Logic is: find the tables that reference the table passed to this method + and walk the tree of references by FK. As child records are found, prepend + them to deques to execute later in a single database transaction (to avoid + orphaning related records if any one insert/delete fails or the archive + process is otherwise interrupted). + + :param metadata: Metadata object to use to construct a shadow Table object + :param conn: Connection object to use to select records related by FK + :param table: Table object (parent) for which to find references by FK + :param column: Column object (parent) to use to select records related by + FK + :param records: A list of records (column values) to use to select records + related by FK + + :returns: tuple of (insert statements, delete statements) for records + related by FK to insert into shadow tables and delete from main tables + """ + inserts = collections.deque() + deletes = collections.deque() + fk_tables = _get_tables_with_fk_to_table(table) + for fk_table in fk_tables: + # Create the shadow table for the referencing table. + fk_shadow_tablename = _SHADOW_TABLE_PREFIX + fk_table.name + try: + fk_shadow_table = Table(fk_shadow_tablename, metadata, + autoload=True) + except NoSuchTableError: + # No corresponding shadow table; skip it. + continue - try: - with conn.begin(): - conn.execute(query_insert) - result_delete = conn.execute(delete_statement) - return result_delete.rowcount - except db_exc.DBReferenceError as ex: - LOG.warning('Failed to archive %(table)s: %(error)s', - {'table': table.name, - 'error': six.text_type(ex)}) - return 0 + # TODO(stephenfin): Drop this when we drop the table + if fk_table.name == "dns_domains": + # We have one table (dns_domains) where the key is called + # "domain" rather than "id" + fk_column = fk_table.c.domain + else: + fk_column = fk_table.c.id + + for fk in fk_table.foreign_keys: + # We need to find the records in the referring (child) table that + # correspond to the records in our (parent) table so we can archive + # them. + + # First, select the column in the parent referenced by the child + # table that corresponds to the parent table records that were + # passed in. + # Example: table = 'instances' and fk_table = 'instance_extra' + # fk.parent = instance_extra.instance_uuid + # fk.column = instances.uuid + # SELECT instances.uuid FROM instances, instance_extra + # WHERE instance_extra.instance_uuid = instances.uuid + # AND instance.id IN (<ids>) + # We need the instance uuids for the <ids> in order to + # look up the matching instance_extra records. + select = sql.select([fk.column]).where( + sql.and_(fk.parent == fk.column, column.in_(records))) + rows = conn.execute(select).fetchall() + p_records = [r[0] for r in rows] + # Then, select rows in the child table that correspond to the + # parent table records that were passed in. + # Example: table = 'instances' and fk_table = 'instance_extra' + # fk.parent = instance_extra.instance_uuid + # fk.column = instances.uuid + # SELECT instance_extra.id FROM instance_extra, instances + # WHERE instance_extra.instance_uuid = instances.uuid + # AND instances.uuid IN (<uuids>) + # We will get the instance_extra ids we need to archive + # them. + fk_select = sql.select([fk_column]).where( + sql.and_(fk.parent == fk.column, fk.column.in_(p_records))) + fk_rows = conn.execute(fk_select).fetchall() + fk_records = [r[0] for r in fk_rows] + if fk_records: + # If we found any records in the child table, create shadow + # table insert statements for them and prepend them to the + # deque. + fk_columns = [c.name for c in fk_table.c] + fk_insert = fk_shadow_table.insert(inline=True).\ + from_select(fk_columns, sql.select([fk_table], + fk_column.in_(fk_records))) + inserts.appendleft(fk_insert) + # Create main table delete statements and prepend them to the + # deque. + fk_delete = fk_table.delete().where(fk_column.in_(fk_records)) + deletes.appendleft(fk_delete) + # Repeat for any possible nested child tables. + i, d = _get_fk_stmts(metadata, conn, fk_table, fk_column, fk_records) + inserts.extendleft(i) + deletes.extendleft(d) + + return inserts, deletes def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before): """Move up to max_rows rows from one tables to the corresponding shadow table. - :returns: 2-item tuple: + Will also follow FK constraints and archive all referring rows. + Example: archving a record from the 'instances' table will also archive + the 'instance_extra' record before archiving the 'instances' record. + + :returns: 3-item tuple: - number of rows archived - list of UUIDs of instances that were archived + - number of extra rows archived (due to FK constraints) + dict of {tablename: rows_archived} """ conn = metadata.bind.connect() # NOTE(tdurakov): table metadata should be received @@ -4153,7 +4221,7 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before): shadow_table = Table(shadow_tablename, metadata, autoload=True) except NoSuchTableError: # No corresponding shadow table; skip it. - return rows_archived, deleted_instance_uuids + return rows_archived, deleted_instance_uuids, {} # TODO(stephenfin): Drop this when we drop the table if tablename == "dns_domains": @@ -4176,10 +4244,29 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before): rows = conn.execute(select).fetchall() records = [r[0] for r in rows] + # We will archive deleted rows for this table and also generate insert and + # delete statements for extra rows we may archive by following FK + # relationships. Because we are iterating over the sorted_tables (list of + # Table objects sorted in order of foreign key dependency), new inserts and + # deletes ("leaves") will be added to the fronts of the deques created in + # _get_fk_stmts. This way, we make sure we delete child table records + # before we delete their parent table records. + + # Keep track of any extra tablenames to number of rows that we archive by + # following FK relationships. + # {tablename: extra_rows_archived} + extras = collections.defaultdict(int) if records: insert = shadow_table.insert(inline=True).\ from_select(columns, sql.select([table], column.in_(records))) delete = table.delete().where(column.in_(records)) + # Walk FK relationships and add insert/delete statements for rows that + # refer to this table via FK constraints. fk_inserts and fk_deletes + # will be prepended to by _get_fk_stmts if referring rows are found by + # FK constraints. + fk_inserts, fk_deletes = _get_fk_stmts( + metadata, conn, table, column, records) + # NOTE(tssurya): In order to facilitate the deletion of records from # instance_mappings, request_specs and instance_group_member tables in # the nova_api DB, the rows of deleted instances from the instances @@ -4193,9 +4280,14 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before): try: # Group the insert and delete in a transaction. with conn.begin(): + for fk_insert in fk_inserts: + conn.execute(fk_insert) + for fk_delete in fk_deletes: + result_fk_delete = conn.execute(fk_delete) + extras[fk_delete.table.name] += result_fk_delete.rowcount conn.execute(insert) result_delete = conn.execute(delete) - rows_archived = result_delete.rowcount + rows_archived += result_delete.rowcount except db_exc.DBReferenceError as ex: # A foreign key constraint keeps us from deleting some of # these rows until we clean up a dependent table. Just @@ -4204,22 +4296,7 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before): "%(tablename)s: %(error)s", {'tablename': tablename, 'error': six.text_type(ex)}) - # NOTE(jake): instance_actions_events doesn't have a instance_uuid column - # but still needs to be archived as it is a FK constraint - if ((max_rows is None or rows_archived < max_rows) and - # NOTE(melwitt): The pci_devices table uses the 'instance_uuid' - # column to track the allocated association of a PCI device and its - # records are not tied to the lifecycles of instance records. - (tablename != 'pci_devices' and - 'instance_uuid' in columns or - tablename == 'instance_actions_events')): - instances = models.BASE.metadata.tables['instances'] - limit = max_rows - rows_archived if max_rows is not None else None - extra = _archive_if_instance_deleted(table, shadow_table, instances, - conn, limit, before) - rows_archived += extra - - return rows_archived, deleted_instance_uuids + return rows_archived, deleted_instance_uuids, extras def archive_deleted_rows(context=None, max_rows=None, before=None): @@ -4243,13 +4320,18 @@ def archive_deleted_rows(context=None, max_rows=None, before=None): - list of UUIDs of instances that were archived - total number of rows that were archived """ - table_to_rows_archived = {} + table_to_rows_archived = collections.defaultdict(int) deleted_instance_uuids = [] total_rows_archived = 0 meta = MetaData(get_engine(use_slave=True, context=context)) meta.reflect() - # Reverse sort the tables so we get the leaf nodes first for processing. - for table in reversed(meta.sorted_tables): + # Get the sorted list of tables in order of foreign key dependency. + # Process the parent tables and find their dependent records in order to + # archive the related records in a single database transactions. The goal + # is to avoid a situation where, for example, an 'instances' table record + # is missing its corresponding 'instance_extra' record due to running the + # archive_deleted_rows command with max_rows. + for table in meta.sorted_tables: tablename = table.name rows_archived = 0 # skip the special sqlalchemy-migrate migrate_version table and any @@ -4257,7 +4339,7 @@ def archive_deleted_rows(context=None, max_rows=None, before=None): if (tablename == 'migrate_version' or tablename.startswith(_SHADOW_TABLE_PREFIX)): continue - rows_archived, _deleted_instance_uuids = ( + rows_archived, _deleted_instance_uuids, extras = ( _archive_deleted_rows_for_table( meta, tablename, max_rows=max_rows - total_rows_archived, @@ -4268,6 +4350,9 @@ def archive_deleted_rows(context=None, max_rows=None, before=None): # Only report results for tables that had updates. if rows_archived: table_to_rows_archived[tablename] = rows_archived + for tablename, extra_rows_archived in extras.items(): + table_to_rows_archived[tablename] += extra_rows_archived + total_rows_archived += extra_rows_archived if total_rows_archived >= max_rows: break return table_to_rows_archived, deleted_instance_uuids, total_rows_archived diff --git a/nova/network/neutron.py b/nova/network/neutron.py index 66194cb696..f3d763044e 100644 --- a/nova/network/neutron.py +++ b/nova/network/neutron.py @@ -273,6 +273,7 @@ def _get_ksa_client(context, admin=False): client = utils.get_ksa_adapter( 'network', ksa_auth=auth_plugin, ksa_session=session) client.additional_headers = {'accept': 'application/json'} + client.connect_retries = CONF.neutron.http_retries return client diff --git a/nova/test.py b/nova/test.py index 6f5a8eebe3..0aaf5db80c 100644 --- a/nova/test.py +++ b/nova/test.py @@ -50,10 +50,13 @@ from oslotest import base from oslotest import mock_fixture import six from six.moves import builtins +from sqlalchemy.dialects import sqlite import testtools +from nova.api.openstack import wsgi_app from nova.compute import rpcapi as compute_rpcapi from nova import context +from nova.db.sqlalchemy import api as sqlalchemy_api from nova import exception from nova import objects from nova.objects import base as objects_base @@ -285,6 +288,10 @@ class TestCase(base.BaseTestCase): quota.UID_QFD_POPULATED_CACHE_BY_PROJECT = set() quota.UID_QFD_POPULATED_CACHE_ALL = False + # make sure that the wsgi app is fully initialized for all testcase + # instead of only once initialized for test worker + wsgi_app.init_global_data.reset() + def _setup_cells(self): """Setup a normal cellsv2 environment. @@ -378,6 +385,22 @@ class TestCase(base.BaseTestCase): for k, v in kw.items(): CONF.set_override(k, v, group) + def enforce_fk_constraints(self, engine=None): + if engine is None: + engine = sqlalchemy_api.get_engine() + dialect = engine.url.get_dialect() + if dialect == sqlite.dialect: + # We're seeing issues with foreign key support in SQLite 3.6.20 + # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 + # It works fine in SQLite 3.7. + # So return early to skip this test if running SQLite < 3.7 + import sqlite3 + tup = sqlite3.sqlite_version_info + if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7): + self.skipTest( + 'sqlite version too old for reliable SQLA foreign_keys') + engine.connect().execute("PRAGMA foreign_keys = ON") + def start_service(self, name, host=None, cell_name=None, **kwargs): # Disallow starting multiple scheduler services if name == 'scheduler' and self._service_fixture_count[name]: diff --git a/nova/tests/functional/db/test_archive.py b/nova/tests/functional/db/test_archive.py index 8a6211cdcf..7a5e55963d 100644 --- a/nova/tests/functional/db/test_archive.py +++ b/nova/tests/functional/db/test_archive.py @@ -17,7 +17,6 @@ import re from dateutil import parser as dateutil_parser from oslo_utils import timeutils -from sqlalchemy.dialects import sqlite from sqlalchemy import func from sqlalchemy import MetaData from sqlalchemy import select @@ -33,22 +32,7 @@ class TestDatabaseArchive(test_servers.ServersTestBase): def setUp(self): super(TestDatabaseArchive, self).setUp() - # TODO(mriedem): pull this out so we can re-use it in - # test_archive_deleted_rows_fk_constraint - # SQLite doesn't enforce foreign key constraints without a pragma. - engine = sqlalchemy_api.get_engine() - dialect = engine.url.get_dialect() - if dialect == sqlite.dialect: - # We're seeing issues with foreign key support in SQLite 3.6.20 - # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 - # It works fine in SQLite 3.7. - # So return early to skip this test if running SQLite < 3.7 - import sqlite3 - tup = sqlite3.sqlite_version_info - if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7): - self.skipTest( - 'sqlite version too old for reliable SQLA foreign_keys') - engine.connect().execute("PRAGMA foreign_keys = ON") + self.enforce_fk_constraints() def test_archive_deleted_rows(self): # Boots a server, deletes it, and then tries to archive it. @@ -144,6 +128,47 @@ class TestDatabaseArchive(test_servers.ServersTestBase): # Verify that the pci_devices record has not been dropped self.assertNotIn('pci_devices', results) + def test_archive_deleted_rows_incomplete(self): + """This tests a scenario where archive_deleted_rows is run with + --max_rows and does not run to completion. + + That is, the archive is stopped before all archivable records have been + archived. Specifically, the problematic state is when a single instance + becomes partially archived (example: 'instance_extra' record for one + instance has been archived while its 'instances' record remains). Any + access of the instance (example: listing deleted instances) that + triggers the retrieval of a dependent record that has been archived + away, results in undefined behavior that may raise an error. + + We will force the system into a state where a single deleted instance + is partially archived. We want to verify that we can, for example, + successfully do a GET /servers/detail at any point between partial + archive_deleted_rows runs without errors. + """ + # Boots a server, deletes it, and then tries to archive it. + server = self._create_server() + server_id = server['id'] + # Assert that there are instance_actions. instance_actions are + # interesting since we don't soft delete them but they have a foreign + # key back to the instances table. + actions = self.api.get_instance_actions(server_id) + self.assertTrue(len(actions), + 'No instance actions for server: %s' % server_id) + self._delete_server(server) + # Archive deleted records iteratively, 1 row at a time, and try to do a + # GET /servers/detail between each run. All should succeed. + exceptions = [] + while True: + _, _, archived = db.archive_deleted_rows(max_rows=1) + try: + # Need to use the admin API to list deleted servers. + self.admin_api.get_servers(search_opts={'deleted': True}) + except Exception as ex: + exceptions.append(ex) + if archived == 0: + break + self.assertFalse(exceptions) + def _get_table_counts(self): engine = sqlalchemy_api.get_engine() conn = engine.connect() diff --git a/nova/tests/functional/test_nova_manage.py b/nova/tests/functional/test_nova_manage.py index 811224e96e..65abb45241 100644 --- a/nova/tests/functional/test_nova_manage.py +++ b/nova/tests/functional/test_nova_manage.py @@ -1657,6 +1657,7 @@ class TestDBArchiveDeletedRows(integrated_helpers._IntegratedTestBase): def setUp(self): super(TestDBArchiveDeletedRows, self).setUp() + self.enforce_fk_constraints() self.cli = manage.DbCommands() self.output = StringIO() self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output)) @@ -1702,6 +1703,7 @@ class TestDBArchiveDeletedRowsMultiCell(integrated_helpers.InstanceHelperMixin, def setUp(self): super(TestDBArchiveDeletedRowsMultiCell, self).setUp() + self.enforce_fk_constraints() self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(func_fixtures.PlacementFixture()) diff --git a/nova/tests/unit/api/openstack/test_wsgi_app.py b/nova/tests/unit/api/openstack/test_wsgi_app.py new file mode 100644 index 0000000000..4cb7459c98 --- /dev/null +++ b/nova/tests/unit/api/openstack/test_wsgi_app.py @@ -0,0 +1,85 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tempfile + +import fixtures +import mock +from oslo_config import fixture as config_fixture +from oslotest import base + +from nova.api.openstack import wsgi_app +from nova import test +from nova.tests import fixtures as nova_fixtures + + +class WSGIAppTest(base.BaseTestCase): + + _paste_config = """ +[app:nova-api] +use = egg:Paste#static +document_root = /tmp + """ + + def setUp(self): + # Ensure BaseTestCase's ConfigureLogging fixture is disabled since + # we're using our own (StandardLogging). + with fixtures.EnvironmentVariable('OS_LOG_CAPTURE', '0'): + super(WSGIAppTest, self).setUp() + self.stdlog = self.useFixture(nova_fixtures.StandardLogging()) + self.conf = tempfile.NamedTemporaryFile(mode='w+t') + self.conf.write(self._paste_config.lstrip()) + self.conf.seek(0) + self.conf.flush() + self.addCleanup(self.conf.close) + # Use of this fixture takes care of cleaning up config settings for + # subsequent tests. + self.useFixture(config_fixture.Config()) + + @mock.patch('sys.argv', return_value=mock.sentinel.argv) + @mock.patch('nova.db.sqlalchemy.api.configure') + @mock.patch('nova.api.openstack.wsgi_app._setup_service') + @mock.patch('nova.api.openstack.wsgi_app._get_config_files') + def test_init_application_called_twice(self, mock_get_files, mock_setup, + mock_db_configure, mock_argv): + """Test that init_application can tolerate being called twice in a + single python interpreter instance. + + When nova-api is run via mod_wsgi, if any exception is raised during + init_application, mod_wsgi will re-run the WSGI script without + restarting the daemon process even when configured for Daemon Mode. + + We access the database as part of init_application, so if nova-api + starts up before the database is up, we'll get, for example, a + DBConnectionError raised during init_application and our WSGI script + will get reloaded/re-run by mod_wsgi. + """ + mock_get_files.return_value = [self.conf.name] + mock_setup.side_effect = [test.TestingException, None] + # We need to mock the global database configure() method, else we will + # be affected by global database state altered by other tests that ran + # before this test, causing this test to fail with + # oslo_db.sqlalchemy.enginefacade.AlreadyStartedError. We can instead + # mock the method to raise an exception if it's called a second time in + # this test to simulate the fact that the database does not tolerate + # re-init [after a database query has been made]. + mock_db_configure.side_effect = [None, test.TestingException] + # Run init_application the first time, simulating an exception being + # raised during it. + self.assertRaises(test.TestingException, wsgi_app.init_application, + 'nova-api') + # Now run init_application a second time, it should succeed since no + # exception is being raised (the init of global data should not be + # re-attempted). + wsgi_app.init_application('nova-api') + self.assertIn('Global data already initialized, not re-initializing.', + self.stdlog.logger.output) diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py index 3c234df891..92883134a2 100644 --- a/nova/tests/unit/console/test_websocketproxy.py +++ b/nova/tests/unit/console/test_websocketproxy.py @@ -15,6 +15,7 @@ """Tests for nova websocketproxy.""" import copy +import io import socket import mock @@ -626,6 +627,72 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase): self.wh.server.top_new_client(conn, address) self.assertIsNone(self.wh._compute_rpcapi) + def test_reject_open_redirect(self): + # This will test the behavior when an attempt is made to cause an open + # redirect. It should be rejected. + mock_req = mock.MagicMock() + mock_req.makefile().readline.side_effect = [ + b'GET //example.com/%2F.. HTTP/1.1\r\n', + b'' + ] + + client_addr = ('8.8.8.8', 54321) + mock_server = mock.MagicMock() + # This specifies that the server will be able to handle requests other + # than only websockets. + mock_server.only_upgrade = False + + # Constructing a handler will process the mock_req request passed in. + handler = websocketproxy.NovaProxyRequestHandler( + mock_req, client_addr, mock_server) + + # Collect the response data to verify at the end. The + # SimpleHTTPRequestHandler writes the response data to a 'wfile' + # attribute. + output = io.BytesIO() + handler.wfile = output + # Process the mock_req again to do the capture. + handler.do_GET() + output.seek(0) + result = output.readlines() + + # Verify no redirect happens and instead a 400 Bad Request is returned. + self.assertIn('400 URI must not start with //', result[0].decode()) + + def test_reject_open_redirect_3_slashes(self): + # This will test the behavior when an attempt is made to cause an open + # redirect. It should be rejected. + mock_req = mock.MagicMock() + mock_req.makefile().readline.side_effect = [ + b'GET ///example.com/%2F.. HTTP/1.1\r\n', + b'' + ] + + # Collect the response data to verify at the end. The + # SimpleHTTPRequestHandler writes the response data by calling the + # request socket sendall() method. + self.data = b'' + + def fake_sendall(data): + self.data += data + + mock_req.sendall.side_effect = fake_sendall + + client_addr = ('8.8.8.8', 54321) + mock_server = mock.MagicMock() + # This specifies that the server will be able to handle requests other + # than only websockets. + mock_server.only_upgrade = False + + # Constructing a handler will process the mock_req request passed in. + websocketproxy.NovaProxyRequestHandler( + mock_req, client_addr, mock_server) + + # Verify no redirect happens and instead a 400 Bad Request is returned. + self.data = self.data.decode() + self.assertIn('Error code: 400', self.data) + self.assertIn('Message: URI must not start with //', self.data) + @mock.patch('websockify.websocketproxy.select_ssl_version') def test_ssl_min_version_is_not_set(self, mock_select_ssl): websocketproxy.NovaWebSocketProxy() diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py index a9943f1c37..6b80f21661 100644 --- a/nova/tests/unit/db/test_db_api.py +++ b/nova/tests/unit/db/test_db_api.py @@ -39,7 +39,6 @@ from oslo_utils import uuidutils import six from six.moves import range from sqlalchemy import Column -from sqlalchemy.dialects import sqlite from sqlalchemy.exc import OperationalError from sqlalchemy.exc import SQLAlchemyError from sqlalchemy import inspect @@ -6296,17 +6295,24 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin): instance_actions_events=1) self._assertEqualObjects(expected, results[0]) - # Archive 1 row deleted before 2017-01-03. instance_action_events - # should be the table with row deleted due to FK contraints + # Archive 1 row deleted before 2017-01-03 + # Because the instances table will be processed first, tables that + # refer to it (instance_actions and instance_action_events) will be + # visited and archived in the same transaction as the instance, to + # avoid orphaning the instance record (archive dependent records in one + # transaction) before_date = dateutil_parser.parse('2017-01-03', fuzzy=True) results = db.archive_deleted_rows(max_rows=1, before=before_date) - expected = dict(instance_actions_events=1) + expected = dict(instances=1, + instance_actions=1, + instance_actions_events=1) self._assertEqualObjects(expected, results[0]) - # Archive all other rows deleted before 2017-01-03. This should - # delete row in instance_actions, then row in instances due to FK - # constraints + # Try to archive all other rows deleted before 2017-01-03. This should + # not archive anything because the instances table and tables that + # refer to it (instance_actions and instance_action_events) were all + # archived in the last run. results = db.archive_deleted_rows(max_rows=100, before=before_date) - expected = dict(instances=1, instance_actions=1) + expected = {} self._assertEqualObjects(expected, results[0]) # Verify we have 4 left in main @@ -6440,18 +6446,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin): def _check_sqlite_version_less_than_3_7(self): # SQLite doesn't enforce foreign key constraints without a pragma. - dialect = self.engine.url.get_dialect() - if dialect == sqlite.dialect: - # We're seeing issues with foreign key support in SQLite 3.6.20 - # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 - # It works fine in SQLite 3.7. - # So return early to skip this test if running SQLite < 3.7 - import sqlite3 - tup = sqlite3.sqlite_version_info - if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7): - self.skipTest( - 'sqlite version too old for reliable SQLA foreign_keys') - self.conn.execute("PRAGMA foreign_keys = ON") + self.enforce_fk_constraints(engine=self.engine) def test_archive_deleted_rows_for_migrations(self): # migrations.instance_uuid depends on instances.uuid @@ -6465,19 +6460,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin): ins_stmt = self.migrations.insert().values(instance_uuid=instance_uuid, deleted=0) self.conn.execute(ins_stmt) - # The first try to archive instances should fail, due to FK. - num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, - "instances", - max_rows=None, - before=None) - self.assertEqual(0, num[0]) - # Then archiving migrations should work. - num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, - "migrations", - max_rows=None, - before=None) - self.assertEqual(1, num[0]) - # Then archiving instances should work. + # Archiving instances should result in migrations related to the + # instances also being archived. num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata, "instances", max_rows=None, diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py index 3e6dc6417b..5d7c2069eb 100644 --- a/nova/tests/unit/network/test_neutron.py +++ b/nova/tests/unit/network/test_neutron.py @@ -253,6 +253,8 @@ class TestNeutronClient(test.NoDBTestCase): auth_token='token') cl = neutronapi.get_client(my_context) self.assertEqual(retries, cl.httpclient.connect_retries) + kcl = neutronapi._get_ksa_client(my_context) + self.assertEqual(retries, kcl.connect_retries) class TestAPIBase(test.TestCase): diff --git a/nova/tests/unit/test_utils.py b/nova/tests/unit/test_utils.py index 2b92cf9144..1b2872f35c 100644 --- a/nova/tests/unit/test_utils.py +++ b/nova/tests/unit/test_utils.py @@ -1326,3 +1326,101 @@ class TestOldComputeCheck(test.NoDBTestCase): 'not allowed to directly access the database. You should run this ' 'service without the [api_database]/connection config option. The ' 'service version check will only query the local cell.') + + +class RunOnceTests(test.NoDBTestCase): + + fake_logger = mock.MagicMock() + + @utils.run_once("already ran once", fake_logger) + def dummy_test_func(self, fail=False): + if fail: + raise ValueError() + return True + + def setUp(self): + super(RunOnceTests, self).setUp() + self.dummy_test_func.reset() + RunOnceTests.fake_logger.reset_mock() + + def test_wrapped_funtions_called_once(self): + self.assertFalse(self.dummy_test_func.called) + result = self.dummy_test_func() + self.assertTrue(result) + self.assertTrue(self.dummy_test_func.called) + + # assert that on second invocation no result + # is returned and that the logger is invoked. + result = self.dummy_test_func() + RunOnceTests.fake_logger.assert_called_once() + self.assertIsNone(result) + + def test_wrapped_funtions_called_once_raises(self): + self.assertFalse(self.dummy_test_func.called) + self.assertRaises(ValueError, self.dummy_test_func, fail=True) + self.assertTrue(self.dummy_test_func.called) + + # assert that on second invocation no result + # is returned and that the logger is invoked. + result = self.dummy_test_func() + RunOnceTests.fake_logger.assert_called_once() + self.assertIsNone(result) + + def test_wrapped_funtions_can_be_reset(self): + # assert we start with a clean state + self.assertFalse(self.dummy_test_func.called) + result = self.dummy_test_func() + self.assertTrue(result) + + self.dummy_test_func.reset() + # assert we restored a clean state + self.assertFalse(self.dummy_test_func.called) + result = self.dummy_test_func() + self.assertTrue(result) + + # assert that we never called the logger + RunOnceTests.fake_logger.assert_not_called() + + def test_reset_calls_cleanup(self): + mock_clean = mock.Mock() + + @utils.run_once("already ran once", self.fake_logger, + cleanup=mock_clean) + def f(): + pass + + f() + self.assertTrue(f.called) + + f.reset() + self.assertFalse(f.called) + mock_clean.assert_called_once_with() + + def test_clean_is_not_called_at_reset_if_wrapped_not_called(self): + mock_clean = mock.Mock() + + @utils.run_once("already ran once", self.fake_logger, + cleanup=mock_clean) + def f(): + pass + + self.assertFalse(f.called) + + f.reset() + self.assertFalse(f.called) + self.assertFalse(mock_clean.called) + + def test_reset_works_even_if_cleanup_raises(self): + mock_clean = mock.Mock(side_effect=ValueError()) + + @utils.run_once("already ran once", self.fake_logger, + cleanup=mock_clean) + def f(): + pass + + f() + self.assertTrue(f.called) + + self.assertRaises(ValueError, f.reset) + self.assertFalse(f.called) + mock_clean.assert_called_once_with() diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py index dcb8dba990..240f6143d5 100644 --- a/nova/tests/unit/virt/libvirt/test_vif.py +++ b/nova/tests/unit/virt/libvirt/test_vif.py @@ -380,6 +380,10 @@ class LibvirtVifTestCase(test.NoDBTestCase): uuid='f0000000-0000-0000-0000-000000000001', project_id=723) + flavor_1vcpu = objects.Flavor(vcpus=1, memory=512, root_gb=1) + + flavor_2vcpu = objects.Flavor(vcpus=2, memory=512, root_gb=1) + bandwidth = { 'quota:vif_inbound_peak': '200', 'quota:vif_outbound_peak': '20', @@ -1091,30 +1095,50 @@ class LibvirtVifTestCase(test.NoDBTestCase): @mock.patch('nova.privsep.linux_net.device_exists', return_value=True) @mock.patch('nova.privsep.linux_net.set_device_mtu') @mock.patch('nova.privsep.linux_net.create_tap_dev') - def test_plug_tap_kvm_virtio(self, mock_create_tap_dev, mock_set_mtu, - mock_device_exists): + def test_plug_tap_kvm_virtio( + self, mock_create_tap_dev, mock_set_mtu, mock_device_exists): d1 = vif.LibvirtGenericVIFDriver() ins = objects.Instance( id=1, uuid='f0000000-0000-0000-0000-000000000001', + flavor=self.flavor_2vcpu, project_id=723, system_metadata={} ) d1.plug(ins, self.vif_tap) - mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz', None, - multiqueue=False) + mock_create_tap_dev.assert_called_once_with( + 'tap-xxx-yyy-zzz', None, multiqueue=False) mock_create_tap_dev.reset_mock() d2 = vif.LibvirtGenericVIFDriver() mq_ins = objects.Instance( id=1, uuid='f0000000-0000-0000-0000-000000000001', + flavor=self.flavor_2vcpu, project_id=723, system_metadata={ 'image_hw_vif_multiqueue_enabled': 'True' } ) d2.plug(mq_ins, self.vif_tap) - mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz', None, - multiqueue=True) + mock_create_tap_dev.assert_called_once_with( + 'tap-xxx-yyy-zzz', None, multiqueue=True) + + @mock.patch('nova.privsep.linux_net.device_exists', return_value=True) + @mock.patch('nova.privsep.linux_net.set_device_mtu') + @mock.patch('nova.privsep.linux_net.create_tap_dev') + def test_plug_tap_mq_ignored_1vcpu( + self, mock_create_tap_dev, mock_set_mtu, mock_device_exists): + + d1 = vif.LibvirtGenericVIFDriver() + mq_ins = objects.Instance( + id=1, uuid='f0000000-0000-0000-0000-000000000001', + image_ref=uuids.image_ref, flavor=self.flavor_1vcpu, + project_id=723, system_metadata={ + 'image_hw_vif_multiqueue_enabled': 'True', + } + ) + d1.plug(mq_ins, self.vif_tap) + mock_create_tap_dev.assert_called_once_with( + 'tap-xxx-yyy-zzz', None, multiqueue=False) @mock.patch('nova.privsep.linux_net.device_exists', return_value=True) @mock.patch('nova.privsep.linux_net.set_device_mtu') @@ -1129,14 +1153,14 @@ class LibvirtVifTestCase(test.NoDBTestCase): d1 = vif.LibvirtGenericVIFDriver() ins = objects.Instance( id=1, uuid='f0000000-0000-0000-0000-000000000001', + flavor=self.flavor_2vcpu, project_id=723, system_metadata={ 'image_hw_vif_multiqueue_enabled': 'True' } ) d1.plug(ins, self.vif_tap) - mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz', - None, - multiqueue=False) + mock_create_tap_dev.assert_called_once_with( + 'tap-xxx-yyy-zzz', None, multiqueue=False) @mock.patch('nova.privsep.linux_net.device_exists', return_value=True) @mock.patch('nova.privsep.linux_net.set_device_mtu') @@ -1147,15 +1171,15 @@ class LibvirtVifTestCase(test.NoDBTestCase): d1 = vif.LibvirtGenericVIFDriver() ins = objects.Instance( id=1, uuid='f0000000-0000-0000-0000-000000000001', + flavor=self.flavor_2vcpu, project_id=723, system_metadata={ 'image_hw_vif_multiqueue_enabled': 'True', 'image_hw_vif_model': 'e1000', } ) d1.plug(ins, self.vif_tap) - mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz', - None, - multiqueue=False) + mock_create_tap_dev.assert_called_once_with( + 'tap-xxx-yyy-zzz', None, multiqueue=False) def test_unplug_tap(self): d = vif.LibvirtGenericVIFDriver() diff --git a/nova/utils.py b/nova/utils.py index 53b87f98be..56e3aa300a 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -1195,3 +1195,49 @@ def raise_if_old_compute(): scope=scope, min_service_level=current_service_version, oldest_supported_service=oldest_supported_service_level) + + +def run_once(message, logger, cleanup=None): + """This is a utility function decorator to ensure a function + is run once and only once in an interpreter instance. + + Note: this is copied from the placement repo (placement/util.py) + + The decorated function object can be reset by calling its + reset function. All exceptions raised by the wrapped function, + logger and cleanup function will be propagated to the caller. + """ + def outer_wrapper(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not wrapper.called: + # Note(sean-k-mooney): the called state is always + # updated even if the wrapped function completes + # by raising an exception. If the caller catches + # the exception it is their responsibility to call + # reset if they want to re-execute the wrapped function. + try: + return func(*args, **kwargs) + finally: + wrapper.called = True + else: + logger(message) + + wrapper.called = False + + def reset(wrapper, *args, **kwargs): + # Note(sean-k-mooney): we conditionally call the + # cleanup function if one is provided only when the + # wrapped function has been called previously. We catch + # and reraise any exception that may be raised and update + # the called state in a finally block to ensure its + # always updated if reset is called. + try: + if cleanup and wrapper.called: + return cleanup(*args, **kwargs) + finally: + wrapper.called = False + + wrapper.reset = functools.partial(reset, wrapper) + return wrapper + return outer_wrapper diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index cf8aff35a0..c40b23f0f8 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -722,7 +722,8 @@ class LibvirtGenericVIFDriver(object): vif_model = self.get_vif_model(image_meta=image_meta) # TODO(ganso): explore whether multiqueue works for other vif models # that go through this code path. - multiqueue = (self._requests_multiqueue(image_meta) and + multiqueue = (instance.get_flavor().vcpus > 1 and + self._requests_multiqueue(image_meta) and vif_model == network_model.VIF_MODEL_VIRTIO) nova.privsep.linux_net.create_tap_dev(dev, mac, multiqueue=multiqueue) network = vif.get('network') diff --git a/releasenotes/notes/bug-1939604-547c729b7741831b.yaml b/releasenotes/notes/bug-1939604-547c729b7741831b.yaml new file mode 100644 index 0000000000..e14327c285 --- /dev/null +++ b/releasenotes/notes/bug-1939604-547c729b7741831b.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Addressed an issue that prevented instances with 1 vcpu using multiqueue + feature from being created successfully when their vif_type is TAP. diff --git a/releasenotes/notes/console-proxy-reject-open-redirect-4ac0a7895acca7eb.yaml b/releasenotes/notes/console-proxy-reject-open-redirect-4ac0a7895acca7eb.yaml new file mode 100644 index 0000000000..ce05b9a867 --- /dev/null +++ b/releasenotes/notes/console-proxy-reject-open-redirect-4ac0a7895acca7eb.yaml @@ -0,0 +1,19 @@ +--- +security: + - | + A vulnerability in the console proxies (novnc, serial, spice) that allowed + open redirection has been `patched`_. The novnc, serial, and spice console + proxies are implemented as websockify servers and the request handler + inherits from the python standard SimpleHTTPRequestHandler. There is a + `known issue`_ in the SimpleHTTPRequestHandler which allows open redirects + by way of URLs in the following format:: + + http://vncproxy.my.domain.com//example.com/%2F.. + + which if visited, will redirect a user to example.com. + + The novnc, serial, and spice console proxies will now reject requests that + pass a redirection URL beginning with "//" with a 400 Bad Request. + + .. _patched: https://bugs.launchpad.net/nova/+bug/1927677 + .. _known issue: https://bugs.python.org/issue32084 diff --git a/tools/check-cherry-picks.sh b/tools/check-cherry-picks.sh index 5ca6ded203..5a449c520b 100755 --- a/tools/check-cherry-picks.sh +++ b/tools/check-cherry-picks.sh @@ -4,11 +4,6 @@ # to verify that they're all on either master or stable/ branches # -# Allow this script to be disabled by a simple env var -if [ ${DISABLE_CHERRY_PICK_CHECK:-0} -eq 1 ]; then - exit 0 -fi - commit_hash="" # Check if the patch is a merge patch by counting the number of parents. @@ -1,10 +1,14 @@ [tox] -minversion = 3.1.1 +minversion = 3.2.0 envlist = py37,functional,pep8 # Automatic envs (pyXX) will only use the python version appropriate to that # env and ignore basepython inherited from [testenv] if we set # ignore_basepython_conflict. ignore_basepython_conflict = True +# Pin the virtualenv and therefore the setuptools version used for the env +# creation. This results in a new tox being installed in .tox/.tox virtualenv +# and the tox on the host will delegate all the calls to the tox in that env. +requires = virtualenv<20.8 [testenv] basepython = python3 @@ -42,15 +46,12 @@ commands = description = Run style checks. envdir = {toxworkdir}/shared -passenv = - DISABLE_CHERRY_PICK_CHECK commands = bash tools/flake8wrap.sh {posargs} # Check that all JSON files don't have \r\n in line. bash -c "! find doc/ -type f -name *.json | xargs grep -U -n $'\r'" # Check that all included JSON files are valid JSON bash -c '! find doc/ -type f -name *.json | xargs -t -n1 python -m json.tool 2>&1 > /dev/null | grep -B1 -v ^python' - bash tools/check-cherry-picks.sh [testenv:fast8] description = @@ -59,6 +60,15 @@ envdir = {toxworkdir}/shared commands = bash tools/flake8wrap.sh -HEAD +[testenv:validate-backport] +description = + Determine whether a backport is ready to be merged by checking whether it has + already been merged to master or more recent stable branches. +deps = +skipsdist = true +commands = + bash tools/check-cherry-picks.sh + [testenv:functional] description = Run functional tests using python3. |