summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml14
-rw-r--r--nova/api/openstack/wsgi_app.py24
-rw-r--r--nova/db/sqlalchemy/api.py221
-rw-r--r--nova/network/neutron.py1
-rw-r--r--nova/test.py23
-rw-r--r--nova/tests/functional/db/test_archive.py59
-rw-r--r--nova/tests/functional/test_nova_manage.py2
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi_app.py85
-rw-r--r--nova/tests/unit/db/test_db_api.py50
-rw-r--r--nova/tests/unit/network/test_neutron.py2
-rw-r--r--nova/tests/unit/test_utils.py98
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py48
-rw-r--r--nova/utils.py46
-rw-r--r--nova/virt/libvirt/vif.py3
-rw-r--r--releasenotes/notes/bug-1939604-547c729b7741831b.yaml5
-rwxr-xr-xtools/check-cherry-picks.sh5
-rw-r--r--tox.ini12
17 files changed, 557 insertions, 141 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 2ec47a6f30..c5df8beb16 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -79,6 +79,17 @@
timeout: 3600
- job:
+ name: nova-tox-validate-backport
+ parent: openstack-tox
+ description: |
+ Determine whether a backport is ready to be merged by checking whether it
+ has already been merged to master or more recent stable branches.
+
+ Uses tox with the ``validate-backport`` environment.
+ vars:
+ tox_envlist: validate-backport
+
+- job:
name: nova-live-migration
parent: tempest-multinode-full-py3
description: |
@@ -400,6 +411,8 @@
- nova-multi-cell
- nova-next
- nova-tox-functional-py36
+ - nova-tox-validate-backport:
+ voting: false
- tempest-integrated-compute:
# NOTE(gmann): Policies changes do not need to run all the
# integration test jobs. Running only tempest and grenade
@@ -434,6 +447,7 @@
- nova-tox-functional-py36
- nova-multi-cell
- nova-next
+ - nova-tox-validate-backport
- tempest-integrated-compute:
irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
diff --git a/nova/api/openstack/wsgi_app.py b/nova/api/openstack/wsgi_app.py
index 481ce71834..aa9af49743 100644
--- a/nova/api/openstack/wsgi_app.py
+++ b/nova/api/openstack/wsgi_app.py
@@ -30,6 +30,8 @@ CONF = cfg.CONF
CONFIG_FILES = ['api-paste.ini', 'nova.conf']
+LOG = logging.getLogger(__name__)
+
objects.register_all()
@@ -78,8 +80,12 @@ def error_application(exc, name):
return application
-def init_application(name):
- conf_files = _get_config_files()
+@utils.run_once('Global data already initialized, not re-initializing.',
+ LOG.info)
+def init_global_data(conf_files):
+ # NOTE(melwitt): parse_args initializes logging and calls global rpc.init()
+ # and db_api.configure(). The db_api.configure() call does not initiate any
+ # connection to the database.
config.parse_args([], default_config_files=conf_files)
logging.setup(CONF, "nova")
@@ -94,11 +100,25 @@ def init_application(name):
logging.getLogger(__name__),
logging.DEBUG)
+
+def init_application(name):
+ conf_files = _get_config_files()
+
+ # NOTE(melwitt): The init_application method can be called multiple times
+ # within a single python interpreter instance if any exception is raised
+ # during it (example: DBConnectionError while setting up the service) and
+ # apache/mod_wsgi reloads the init_application script. So, we initialize
+ # global data separately and decorate the method to run only once in a
+ # python interpreter instance.
+ init_global_data(conf_files)
+
try:
_setup_service(CONF.host, name)
except exception.ServiceTooOld as exc:
return error_application(exc, name)
+ # This global init is safe because if we got here, we already successfully
+ # set up the service and setting up the profile cannot fail.
service.setup_profiler(name, CONF.host)
conf = conf_files[0]
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index af1c7c3d6a..1fa7f5a322 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -4080,64 +4080,132 @@ def task_log_end_task(context, task_name, period_beginning, period_ending,
##################
-def _archive_if_instance_deleted(table, shadow_table, instances, conn,
- max_rows, before):
- """Look for records that pertain to deleted instances, but may not be
- deleted themselves. This catches cases where we delete an instance,
- but leave some residue because of a failure in a cleanup path or
- similar.
-
- Logic is: if I have a column called instance_uuid, and that instance
- is deleted, then I can be deleted.
- """
-
- # NOTE(jake): handle instance_actions_events differently as it relies on
- # instance_actions.id not instances.uuid
- if table.name == "instance_actions_events":
- instance_actions = models.BASE.metadata.tables["instance_actions"]
- query_select = sql.select(
- [table],
- and_(instances.c.deleted != instances.c.deleted.default.arg,
- instances.c.uuid == instance_actions.c.instance_uuid,
- instance_actions.c.id == table.c.action_id))
+def _get_tables_with_fk_to_table(table):
+ """Get a list of tables that refer to the given table by foreign key (FK).
- else:
- query_select = sql.select(
- [table],
- and_(instances.c.deleted != instances.c.deleted.default.arg,
- instances.c.uuid == table.c.instance_uuid))
-
- if before:
- query_select = query_select.where(instances.c.deleted_at < before)
+ :param table: Table object (parent) for which to find references by FK
- query_select = query_select.order_by(table.c.id).limit(max_rows)
-
- query_insert = shadow_table.insert(inline=True).\
- from_select([c.name for c in table.c], query_select)
-
- delete_statement = DeleteFromSelect(table, query_select,
- table.c.id)
+ :returns: A list of Table objects that refer to the specified table by FK
+ """
+ tables = []
+ for t in models.BASE.metadata.tables.values():
+ for fk in t.foreign_keys:
+ if fk.references(table):
+ tables.append(t)
+ return tables
+
+
+def _get_fk_stmts(metadata, conn, table, column, records):
+ """Find records related to this table by foreign key (FK) and create and
+ return insert/delete statements for them.
+
+ Logic is: find the tables that reference the table passed to this method
+ and walk the tree of references by FK. As child records are found, prepend
+ them to deques to execute later in a single database transaction (to avoid
+ orphaning related records if any one insert/delete fails or the archive
+ process is otherwise interrupted).
+
+ :param metadata: Metadata object to use to construct a shadow Table object
+ :param conn: Connection object to use to select records related by FK
+ :param table: Table object (parent) for which to find references by FK
+ :param column: Column object (parent) to use to select records related by
+ FK
+ :param records: A list of records (column values) to use to select records
+ related by FK
+
+ :returns: tuple of (insert statements, delete statements) for records
+ related by FK to insert into shadow tables and delete from main tables
+ """
+ inserts = collections.deque()
+ deletes = collections.deque()
+ fk_tables = _get_tables_with_fk_to_table(table)
+ for fk_table in fk_tables:
+ # Create the shadow table for the referencing table.
+ fk_shadow_tablename = _SHADOW_TABLE_PREFIX + fk_table.name
+ try:
+ fk_shadow_table = Table(fk_shadow_tablename, metadata,
+ autoload=True)
+ except NoSuchTableError:
+ # No corresponding shadow table; skip it.
+ continue
- try:
- with conn.begin():
- conn.execute(query_insert)
- result_delete = conn.execute(delete_statement)
- return result_delete.rowcount
- except db_exc.DBReferenceError as ex:
- LOG.warning('Failed to archive %(table)s: %(error)s',
- {'table': table.name,
- 'error': six.text_type(ex)})
- return 0
+ # TODO(stephenfin): Drop this when we drop the table
+ if fk_table.name == "dns_domains":
+ # We have one table (dns_domains) where the key is called
+ # "domain" rather than "id"
+ fk_column = fk_table.c.domain
+ else:
+ fk_column = fk_table.c.id
+
+ for fk in fk_table.foreign_keys:
+ # We need to find the records in the referring (child) table that
+ # correspond to the records in our (parent) table so we can archive
+ # them.
+
+ # First, select the column in the parent referenced by the child
+ # table that corresponds to the parent table records that were
+ # passed in.
+ # Example: table = 'instances' and fk_table = 'instance_extra'
+ # fk.parent = instance_extra.instance_uuid
+ # fk.column = instances.uuid
+ # SELECT instances.uuid FROM instances, instance_extra
+ # WHERE instance_extra.instance_uuid = instances.uuid
+ # AND instance.id IN (<ids>)
+ # We need the instance uuids for the <ids> in order to
+ # look up the matching instance_extra records.
+ select = sql.select([fk.column]).where(
+ sql.and_(fk.parent == fk.column, column.in_(records)))
+ rows = conn.execute(select).fetchall()
+ p_records = [r[0] for r in rows]
+ # Then, select rows in the child table that correspond to the
+ # parent table records that were passed in.
+ # Example: table = 'instances' and fk_table = 'instance_extra'
+ # fk.parent = instance_extra.instance_uuid
+ # fk.column = instances.uuid
+ # SELECT instance_extra.id FROM instance_extra, instances
+ # WHERE instance_extra.instance_uuid = instances.uuid
+ # AND instances.uuid IN (<uuids>)
+ # We will get the instance_extra ids we need to archive
+ # them.
+ fk_select = sql.select([fk_column]).where(
+ sql.and_(fk.parent == fk.column, fk.column.in_(p_records)))
+ fk_rows = conn.execute(fk_select).fetchall()
+ fk_records = [r[0] for r in fk_rows]
+ if fk_records:
+ # If we found any records in the child table, create shadow
+ # table insert statements for them and prepend them to the
+ # deque.
+ fk_columns = [c.name for c in fk_table.c]
+ fk_insert = fk_shadow_table.insert(inline=True).\
+ from_select(fk_columns, sql.select([fk_table],
+ fk_column.in_(fk_records)))
+ inserts.appendleft(fk_insert)
+ # Create main table delete statements and prepend them to the
+ # deque.
+ fk_delete = fk_table.delete().where(fk_column.in_(fk_records))
+ deletes.appendleft(fk_delete)
+ # Repeat for any possible nested child tables.
+ i, d = _get_fk_stmts(metadata, conn, fk_table, fk_column, fk_records)
+ inserts.extendleft(i)
+ deletes.extendleft(d)
+
+ return inserts, deletes
def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
"""Move up to max_rows rows from one tables to the corresponding
shadow table.
- :returns: 2-item tuple:
+ Will also follow FK constraints and archive all referring rows.
+ Example: archving a record from the 'instances' table will also archive
+ the 'instance_extra' record before archiving the 'instances' record.
+
+ :returns: 3-item tuple:
- number of rows archived
- list of UUIDs of instances that were archived
+ - number of extra rows archived (due to FK constraints)
+ dict of {tablename: rows_archived}
"""
conn = metadata.bind.connect()
# NOTE(tdurakov): table metadata should be received
@@ -4153,7 +4221,7 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
- return rows_archived, deleted_instance_uuids
+ return rows_archived, deleted_instance_uuids, {}
# TODO(stephenfin): Drop this when we drop the table
if tablename == "dns_domains":
@@ -4176,10 +4244,29 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
rows = conn.execute(select).fetchall()
records = [r[0] for r in rows]
+ # We will archive deleted rows for this table and also generate insert and
+ # delete statements for extra rows we may archive by following FK
+ # relationships. Because we are iterating over the sorted_tables (list of
+ # Table objects sorted in order of foreign key dependency), new inserts and
+ # deletes ("leaves") will be added to the fronts of the deques created in
+ # _get_fk_stmts. This way, we make sure we delete child table records
+ # before we delete their parent table records.
+
+ # Keep track of any extra tablenames to number of rows that we archive by
+ # following FK relationships.
+ # {tablename: extra_rows_archived}
+ extras = collections.defaultdict(int)
if records:
insert = shadow_table.insert(inline=True).\
from_select(columns, sql.select([table], column.in_(records)))
delete = table.delete().where(column.in_(records))
+ # Walk FK relationships and add insert/delete statements for rows that
+ # refer to this table via FK constraints. fk_inserts and fk_deletes
+ # will be prepended to by _get_fk_stmts if referring rows are found by
+ # FK constraints.
+ fk_inserts, fk_deletes = _get_fk_stmts(
+ metadata, conn, table, column, records)
+
# NOTE(tssurya): In order to facilitate the deletion of records from
# instance_mappings, request_specs and instance_group_member tables in
# the nova_api DB, the rows of deleted instances from the instances
@@ -4193,9 +4280,14 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
try:
# Group the insert and delete in a transaction.
with conn.begin():
+ for fk_insert in fk_inserts:
+ conn.execute(fk_insert)
+ for fk_delete in fk_deletes:
+ result_fk_delete = conn.execute(fk_delete)
+ extras[fk_delete.table.name] += result_fk_delete.rowcount
conn.execute(insert)
result_delete = conn.execute(delete)
- rows_archived = result_delete.rowcount
+ rows_archived += result_delete.rowcount
except db_exc.DBReferenceError as ex:
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
@@ -4204,22 +4296,7 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
"%(tablename)s: %(error)s",
{'tablename': tablename, 'error': six.text_type(ex)})
- # NOTE(jake): instance_actions_events doesn't have a instance_uuid column
- # but still needs to be archived as it is a FK constraint
- if ((max_rows is None or rows_archived < max_rows) and
- # NOTE(melwitt): The pci_devices table uses the 'instance_uuid'
- # column to track the allocated association of a PCI device and its
- # records are not tied to the lifecycles of instance records.
- (tablename != 'pci_devices' and
- 'instance_uuid' in columns or
- tablename == 'instance_actions_events')):
- instances = models.BASE.metadata.tables['instances']
- limit = max_rows - rows_archived if max_rows is not None else None
- extra = _archive_if_instance_deleted(table, shadow_table, instances,
- conn, limit, before)
- rows_archived += extra
-
- return rows_archived, deleted_instance_uuids
+ return rows_archived, deleted_instance_uuids, extras
def archive_deleted_rows(context=None, max_rows=None, before=None):
@@ -4243,13 +4320,18 @@ def archive_deleted_rows(context=None, max_rows=None, before=None):
- list of UUIDs of instances that were archived
- total number of rows that were archived
"""
- table_to_rows_archived = {}
+ table_to_rows_archived = collections.defaultdict(int)
deleted_instance_uuids = []
total_rows_archived = 0
meta = MetaData(get_engine(use_slave=True, context=context))
meta.reflect()
- # Reverse sort the tables so we get the leaf nodes first for processing.
- for table in reversed(meta.sorted_tables):
+ # Get the sorted list of tables in order of foreign key dependency.
+ # Process the parent tables and find their dependent records in order to
+ # archive the related records in a single database transactions. The goal
+ # is to avoid a situation where, for example, an 'instances' table record
+ # is missing its corresponding 'instance_extra' record due to running the
+ # archive_deleted_rows command with max_rows.
+ for table in meta.sorted_tables:
tablename = table.name
rows_archived = 0
# skip the special sqlalchemy-migrate migrate_version table and any
@@ -4257,7 +4339,7 @@ def archive_deleted_rows(context=None, max_rows=None, before=None):
if (tablename == 'migrate_version' or
tablename.startswith(_SHADOW_TABLE_PREFIX)):
continue
- rows_archived, _deleted_instance_uuids = (
+ rows_archived, _deleted_instance_uuids, extras = (
_archive_deleted_rows_for_table(
meta, tablename,
max_rows=max_rows - total_rows_archived,
@@ -4268,6 +4350,9 @@ def archive_deleted_rows(context=None, max_rows=None, before=None):
# Only report results for tables that had updates.
if rows_archived:
table_to_rows_archived[tablename] = rows_archived
+ for tablename, extra_rows_archived in extras.items():
+ table_to_rows_archived[tablename] += extra_rows_archived
+ total_rows_archived += extra_rows_archived
if total_rows_archived >= max_rows:
break
return table_to_rows_archived, deleted_instance_uuids, total_rows_archived
diff --git a/nova/network/neutron.py b/nova/network/neutron.py
index 66194cb696..f3d763044e 100644
--- a/nova/network/neutron.py
+++ b/nova/network/neutron.py
@@ -273,6 +273,7 @@ def _get_ksa_client(context, admin=False):
client = utils.get_ksa_adapter(
'network', ksa_auth=auth_plugin, ksa_session=session)
client.additional_headers = {'accept': 'application/json'}
+ client.connect_retries = CONF.neutron.http_retries
return client
diff --git a/nova/test.py b/nova/test.py
index 6f5a8eebe3..0aaf5db80c 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -50,10 +50,13 @@ from oslotest import base
from oslotest import mock_fixture
import six
from six.moves import builtins
+from sqlalchemy.dialects import sqlite
import testtools
+from nova.api.openstack import wsgi_app
from nova.compute import rpcapi as compute_rpcapi
from nova import context
+from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova import objects
from nova.objects import base as objects_base
@@ -285,6 +288,10 @@ class TestCase(base.BaseTestCase):
quota.UID_QFD_POPULATED_CACHE_BY_PROJECT = set()
quota.UID_QFD_POPULATED_CACHE_ALL = False
+ # make sure that the wsgi app is fully initialized for all testcase
+ # instead of only once initialized for test worker
+ wsgi_app.init_global_data.reset()
+
def _setup_cells(self):
"""Setup a normal cellsv2 environment.
@@ -378,6 +385,22 @@ class TestCase(base.BaseTestCase):
for k, v in kw.items():
CONF.set_override(k, v, group)
+ def enforce_fk_constraints(self, engine=None):
+ if engine is None:
+ engine = sqlalchemy_api.get_engine()
+ dialect = engine.url.get_dialect()
+ if dialect == sqlite.dialect:
+ # We're seeing issues with foreign key support in SQLite 3.6.20
+ # SQLAlchemy doesn't support it at all with < SQLite 3.6.19
+ # It works fine in SQLite 3.7.
+ # So return early to skip this test if running SQLite < 3.7
+ import sqlite3
+ tup = sqlite3.sqlite_version_info
+ if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
+ self.skipTest(
+ 'sqlite version too old for reliable SQLA foreign_keys')
+ engine.connect().execute("PRAGMA foreign_keys = ON")
+
def start_service(self, name, host=None, cell_name=None, **kwargs):
# Disallow starting multiple scheduler services
if name == 'scheduler' and self._service_fixture_count[name]:
diff --git a/nova/tests/functional/db/test_archive.py b/nova/tests/functional/db/test_archive.py
index 8a6211cdcf..7a5e55963d 100644
--- a/nova/tests/functional/db/test_archive.py
+++ b/nova/tests/functional/db/test_archive.py
@@ -17,7 +17,6 @@ import re
from dateutil import parser as dateutil_parser
from oslo_utils import timeutils
-from sqlalchemy.dialects import sqlite
from sqlalchemy import func
from sqlalchemy import MetaData
from sqlalchemy import select
@@ -33,22 +32,7 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
def setUp(self):
super(TestDatabaseArchive, self).setUp()
- # TODO(mriedem): pull this out so we can re-use it in
- # test_archive_deleted_rows_fk_constraint
- # SQLite doesn't enforce foreign key constraints without a pragma.
- engine = sqlalchemy_api.get_engine()
- dialect = engine.url.get_dialect()
- if dialect == sqlite.dialect:
- # We're seeing issues with foreign key support in SQLite 3.6.20
- # SQLAlchemy doesn't support it at all with < SQLite 3.6.19
- # It works fine in SQLite 3.7.
- # So return early to skip this test if running SQLite < 3.7
- import sqlite3
- tup = sqlite3.sqlite_version_info
- if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
- self.skipTest(
- 'sqlite version too old for reliable SQLA foreign_keys')
- engine.connect().execute("PRAGMA foreign_keys = ON")
+ self.enforce_fk_constraints()
def test_archive_deleted_rows(self):
# Boots a server, deletes it, and then tries to archive it.
@@ -144,6 +128,47 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
# Verify that the pci_devices record has not been dropped
self.assertNotIn('pci_devices', results)
+ def test_archive_deleted_rows_incomplete(self):
+ """This tests a scenario where archive_deleted_rows is run with
+ --max_rows and does not run to completion.
+
+ That is, the archive is stopped before all archivable records have been
+ archived. Specifically, the problematic state is when a single instance
+ becomes partially archived (example: 'instance_extra' record for one
+ instance has been archived while its 'instances' record remains). Any
+ access of the instance (example: listing deleted instances) that
+ triggers the retrieval of a dependent record that has been archived
+ away, results in undefined behavior that may raise an error.
+
+ We will force the system into a state where a single deleted instance
+ is partially archived. We want to verify that we can, for example,
+ successfully do a GET /servers/detail at any point between partial
+ archive_deleted_rows runs without errors.
+ """
+ # Boots a server, deletes it, and then tries to archive it.
+ server = self._create_server()
+ server_id = server['id']
+ # Assert that there are instance_actions. instance_actions are
+ # interesting since we don't soft delete them but they have a foreign
+ # key back to the instances table.
+ actions = self.api.get_instance_actions(server_id)
+ self.assertTrue(len(actions),
+ 'No instance actions for server: %s' % server_id)
+ self._delete_server(server)
+ # Archive deleted records iteratively, 1 row at a time, and try to do a
+ # GET /servers/detail between each run. All should succeed.
+ exceptions = []
+ while True:
+ _, _, archived = db.archive_deleted_rows(max_rows=1)
+ try:
+ # Need to use the admin API to list deleted servers.
+ self.admin_api.get_servers(search_opts={'deleted': True})
+ except Exception as ex:
+ exceptions.append(ex)
+ if archived == 0:
+ break
+ self.assertFalse(exceptions)
+
def _get_table_counts(self):
engine = sqlalchemy_api.get_engine()
conn = engine.connect()
diff --git a/nova/tests/functional/test_nova_manage.py b/nova/tests/functional/test_nova_manage.py
index 811224e96e..65abb45241 100644
--- a/nova/tests/functional/test_nova_manage.py
+++ b/nova/tests/functional/test_nova_manage.py
@@ -1657,6 +1657,7 @@ class TestDBArchiveDeletedRows(integrated_helpers._IntegratedTestBase):
def setUp(self):
super(TestDBArchiveDeletedRows, self).setUp()
+ self.enforce_fk_constraints()
self.cli = manage.DbCommands()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
@@ -1702,6 +1703,7 @@ class TestDBArchiveDeletedRowsMultiCell(integrated_helpers.InstanceHelperMixin,
def setUp(self):
super(TestDBArchiveDeletedRowsMultiCell, self).setUp()
+ self.enforce_fk_constraints()
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
diff --git a/nova/tests/unit/api/openstack/test_wsgi_app.py b/nova/tests/unit/api/openstack/test_wsgi_app.py
new file mode 100644
index 0000000000..4cb7459c98
--- /dev/null
+++ b/nova/tests/unit/api/openstack/test_wsgi_app.py
@@ -0,0 +1,85 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import tempfile
+
+import fixtures
+import mock
+from oslo_config import fixture as config_fixture
+from oslotest import base
+
+from nova.api.openstack import wsgi_app
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+
+
+class WSGIAppTest(base.BaseTestCase):
+
+ _paste_config = """
+[app:nova-api]
+use = egg:Paste#static
+document_root = /tmp
+ """
+
+ def setUp(self):
+ # Ensure BaseTestCase's ConfigureLogging fixture is disabled since
+ # we're using our own (StandardLogging).
+ with fixtures.EnvironmentVariable('OS_LOG_CAPTURE', '0'):
+ super(WSGIAppTest, self).setUp()
+ self.stdlog = self.useFixture(nova_fixtures.StandardLogging())
+ self.conf = tempfile.NamedTemporaryFile(mode='w+t')
+ self.conf.write(self._paste_config.lstrip())
+ self.conf.seek(0)
+ self.conf.flush()
+ self.addCleanup(self.conf.close)
+ # Use of this fixture takes care of cleaning up config settings for
+ # subsequent tests.
+ self.useFixture(config_fixture.Config())
+
+ @mock.patch('sys.argv', return_value=mock.sentinel.argv)
+ @mock.patch('nova.db.sqlalchemy.api.configure')
+ @mock.patch('nova.api.openstack.wsgi_app._setup_service')
+ @mock.patch('nova.api.openstack.wsgi_app._get_config_files')
+ def test_init_application_called_twice(self, mock_get_files, mock_setup,
+ mock_db_configure, mock_argv):
+ """Test that init_application can tolerate being called twice in a
+ single python interpreter instance.
+
+ When nova-api is run via mod_wsgi, if any exception is raised during
+ init_application, mod_wsgi will re-run the WSGI script without
+ restarting the daemon process even when configured for Daemon Mode.
+
+ We access the database as part of init_application, so if nova-api
+ starts up before the database is up, we'll get, for example, a
+ DBConnectionError raised during init_application and our WSGI script
+ will get reloaded/re-run by mod_wsgi.
+ """
+ mock_get_files.return_value = [self.conf.name]
+ mock_setup.side_effect = [test.TestingException, None]
+ # We need to mock the global database configure() method, else we will
+ # be affected by global database state altered by other tests that ran
+ # before this test, causing this test to fail with
+ # oslo_db.sqlalchemy.enginefacade.AlreadyStartedError. We can instead
+ # mock the method to raise an exception if it's called a second time in
+ # this test to simulate the fact that the database does not tolerate
+ # re-init [after a database query has been made].
+ mock_db_configure.side_effect = [None, test.TestingException]
+ # Run init_application the first time, simulating an exception being
+ # raised during it.
+ self.assertRaises(test.TestingException, wsgi_app.init_application,
+ 'nova-api')
+ # Now run init_application a second time, it should succeed since no
+ # exception is being raised (the init of global data should not be
+ # re-attempted).
+ wsgi_app.init_application('nova-api')
+ self.assertIn('Global data already initialized, not re-initializing.',
+ self.stdlog.logger.output)
diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py
index a9943f1c37..6b80f21661 100644
--- a/nova/tests/unit/db/test_db_api.py
+++ b/nova/tests/unit/db/test_db_api.py
@@ -39,7 +39,6 @@ from oslo_utils import uuidutils
import six
from six.moves import range
from sqlalchemy import Column
-from sqlalchemy.dialects import sqlite
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy import inspect
@@ -6296,17 +6295,24 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
instance_actions_events=1)
self._assertEqualObjects(expected, results[0])
- # Archive 1 row deleted before 2017-01-03. instance_action_events
- # should be the table with row deleted due to FK contraints
+ # Archive 1 row deleted before 2017-01-03
+ # Because the instances table will be processed first, tables that
+ # refer to it (instance_actions and instance_action_events) will be
+ # visited and archived in the same transaction as the instance, to
+ # avoid orphaning the instance record (archive dependent records in one
+ # transaction)
before_date = dateutil_parser.parse('2017-01-03', fuzzy=True)
results = db.archive_deleted_rows(max_rows=1, before=before_date)
- expected = dict(instance_actions_events=1)
+ expected = dict(instances=1,
+ instance_actions=1,
+ instance_actions_events=1)
self._assertEqualObjects(expected, results[0])
- # Archive all other rows deleted before 2017-01-03. This should
- # delete row in instance_actions, then row in instances due to FK
- # constraints
+ # Try to archive all other rows deleted before 2017-01-03. This should
+ # not archive anything because the instances table and tables that
+ # refer to it (instance_actions and instance_action_events) were all
+ # archived in the last run.
results = db.archive_deleted_rows(max_rows=100, before=before_date)
- expected = dict(instances=1, instance_actions=1)
+ expected = {}
self._assertEqualObjects(expected, results[0])
# Verify we have 4 left in main
@@ -6440,18 +6446,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
def _check_sqlite_version_less_than_3_7(self):
# SQLite doesn't enforce foreign key constraints without a pragma.
- dialect = self.engine.url.get_dialect()
- if dialect == sqlite.dialect:
- # We're seeing issues with foreign key support in SQLite 3.6.20
- # SQLAlchemy doesn't support it at all with < SQLite 3.6.19
- # It works fine in SQLite 3.7.
- # So return early to skip this test if running SQLite < 3.7
- import sqlite3
- tup = sqlite3.sqlite_version_info
- if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
- self.skipTest(
- 'sqlite version too old for reliable SQLA foreign_keys')
- self.conn.execute("PRAGMA foreign_keys = ON")
+ self.enforce_fk_constraints(engine=self.engine)
def test_archive_deleted_rows_for_migrations(self):
# migrations.instance_uuid depends on instances.uuid
@@ -6465,19 +6460,8 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
ins_stmt = self.migrations.insert().values(instance_uuid=instance_uuid,
deleted=0)
self.conn.execute(ins_stmt)
- # The first try to archive instances should fail, due to FK.
- num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata,
- "instances",
- max_rows=None,
- before=None)
- self.assertEqual(0, num[0])
- # Then archiving migrations should work.
- num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata,
- "migrations",
- max_rows=None,
- before=None)
- self.assertEqual(1, num[0])
- # Then archiving instances should work.
+ # Archiving instances should result in migrations related to the
+ # instances also being archived.
num = sqlalchemy_api._archive_deleted_rows_for_table(self.metadata,
"instances",
max_rows=None,
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index 3e6dc6417b..5d7c2069eb 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -253,6 +253,8 @@ class TestNeutronClient(test.NoDBTestCase):
auth_token='token')
cl = neutronapi.get_client(my_context)
self.assertEqual(retries, cl.httpclient.connect_retries)
+ kcl = neutronapi._get_ksa_client(my_context)
+ self.assertEqual(retries, kcl.connect_retries)
class TestAPIBase(test.TestCase):
diff --git a/nova/tests/unit/test_utils.py b/nova/tests/unit/test_utils.py
index 2b92cf9144..1b2872f35c 100644
--- a/nova/tests/unit/test_utils.py
+++ b/nova/tests/unit/test_utils.py
@@ -1326,3 +1326,101 @@ class TestOldComputeCheck(test.NoDBTestCase):
'not allowed to directly access the database. You should run this '
'service without the [api_database]/connection config option. The '
'service version check will only query the local cell.')
+
+
+class RunOnceTests(test.NoDBTestCase):
+
+ fake_logger = mock.MagicMock()
+
+ @utils.run_once("already ran once", fake_logger)
+ def dummy_test_func(self, fail=False):
+ if fail:
+ raise ValueError()
+ return True
+
+ def setUp(self):
+ super(RunOnceTests, self).setUp()
+ self.dummy_test_func.reset()
+ RunOnceTests.fake_logger.reset_mock()
+
+ def test_wrapped_funtions_called_once(self):
+ self.assertFalse(self.dummy_test_func.called)
+ result = self.dummy_test_func()
+ self.assertTrue(result)
+ self.assertTrue(self.dummy_test_func.called)
+
+ # assert that on second invocation no result
+ # is returned and that the logger is invoked.
+ result = self.dummy_test_func()
+ RunOnceTests.fake_logger.assert_called_once()
+ self.assertIsNone(result)
+
+ def test_wrapped_funtions_called_once_raises(self):
+ self.assertFalse(self.dummy_test_func.called)
+ self.assertRaises(ValueError, self.dummy_test_func, fail=True)
+ self.assertTrue(self.dummy_test_func.called)
+
+ # assert that on second invocation no result
+ # is returned and that the logger is invoked.
+ result = self.dummy_test_func()
+ RunOnceTests.fake_logger.assert_called_once()
+ self.assertIsNone(result)
+
+ def test_wrapped_funtions_can_be_reset(self):
+ # assert we start with a clean state
+ self.assertFalse(self.dummy_test_func.called)
+ result = self.dummy_test_func()
+ self.assertTrue(result)
+
+ self.dummy_test_func.reset()
+ # assert we restored a clean state
+ self.assertFalse(self.dummy_test_func.called)
+ result = self.dummy_test_func()
+ self.assertTrue(result)
+
+ # assert that we never called the logger
+ RunOnceTests.fake_logger.assert_not_called()
+
+ def test_reset_calls_cleanup(self):
+ mock_clean = mock.Mock()
+
+ @utils.run_once("already ran once", self.fake_logger,
+ cleanup=mock_clean)
+ def f():
+ pass
+
+ f()
+ self.assertTrue(f.called)
+
+ f.reset()
+ self.assertFalse(f.called)
+ mock_clean.assert_called_once_with()
+
+ def test_clean_is_not_called_at_reset_if_wrapped_not_called(self):
+ mock_clean = mock.Mock()
+
+ @utils.run_once("already ran once", self.fake_logger,
+ cleanup=mock_clean)
+ def f():
+ pass
+
+ self.assertFalse(f.called)
+
+ f.reset()
+ self.assertFalse(f.called)
+ self.assertFalse(mock_clean.called)
+
+ def test_reset_works_even_if_cleanup_raises(self):
+ mock_clean = mock.Mock(side_effect=ValueError())
+
+ @utils.run_once("already ran once", self.fake_logger,
+ cleanup=mock_clean)
+ def f():
+ pass
+
+ f()
+ self.assertTrue(f.called)
+
+ self.assertRaises(ValueError, f.reset)
+ self.assertFalse(f.called)
+ mock_clean.assert_called_once_with()
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
index dcb8dba990..240f6143d5 100644
--- a/nova/tests/unit/virt/libvirt/test_vif.py
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -380,6 +380,10 @@ class LibvirtVifTestCase(test.NoDBTestCase):
uuid='f0000000-0000-0000-0000-000000000001',
project_id=723)
+ flavor_1vcpu = objects.Flavor(vcpus=1, memory=512, root_gb=1)
+
+ flavor_2vcpu = objects.Flavor(vcpus=2, memory=512, root_gb=1)
+
bandwidth = {
'quota:vif_inbound_peak': '200',
'quota:vif_outbound_peak': '20',
@@ -1091,30 +1095,50 @@ class LibvirtVifTestCase(test.NoDBTestCase):
@mock.patch('nova.privsep.linux_net.device_exists', return_value=True)
@mock.patch('nova.privsep.linux_net.set_device_mtu')
@mock.patch('nova.privsep.linux_net.create_tap_dev')
- def test_plug_tap_kvm_virtio(self, mock_create_tap_dev, mock_set_mtu,
- mock_device_exists):
+ def test_plug_tap_kvm_virtio(
+ self, mock_create_tap_dev, mock_set_mtu, mock_device_exists):
d1 = vif.LibvirtGenericVIFDriver()
ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
+ flavor=self.flavor_2vcpu,
project_id=723, system_metadata={}
)
d1.plug(ins, self.vif_tap)
- mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz', None,
- multiqueue=False)
+ mock_create_tap_dev.assert_called_once_with(
+ 'tap-xxx-yyy-zzz', None, multiqueue=False)
mock_create_tap_dev.reset_mock()
d2 = vif.LibvirtGenericVIFDriver()
mq_ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
+ flavor=self.flavor_2vcpu,
project_id=723, system_metadata={
'image_hw_vif_multiqueue_enabled': 'True'
}
)
d2.plug(mq_ins, self.vif_tap)
- mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz', None,
- multiqueue=True)
+ mock_create_tap_dev.assert_called_once_with(
+ 'tap-xxx-yyy-zzz', None, multiqueue=True)
+
+ @mock.patch('nova.privsep.linux_net.device_exists', return_value=True)
+ @mock.patch('nova.privsep.linux_net.set_device_mtu')
+ @mock.patch('nova.privsep.linux_net.create_tap_dev')
+ def test_plug_tap_mq_ignored_1vcpu(
+ self, mock_create_tap_dev, mock_set_mtu, mock_device_exists):
+
+ d1 = vif.LibvirtGenericVIFDriver()
+ mq_ins = objects.Instance(
+ id=1, uuid='f0000000-0000-0000-0000-000000000001',
+ image_ref=uuids.image_ref, flavor=self.flavor_1vcpu,
+ project_id=723, system_metadata={
+ 'image_hw_vif_multiqueue_enabled': 'True',
+ }
+ )
+ d1.plug(mq_ins, self.vif_tap)
+ mock_create_tap_dev.assert_called_once_with(
+ 'tap-xxx-yyy-zzz', None, multiqueue=False)
@mock.patch('nova.privsep.linux_net.device_exists', return_value=True)
@mock.patch('nova.privsep.linux_net.set_device_mtu')
@@ -1129,14 +1153,14 @@ class LibvirtVifTestCase(test.NoDBTestCase):
d1 = vif.LibvirtGenericVIFDriver()
ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
+ flavor=self.flavor_2vcpu,
project_id=723, system_metadata={
'image_hw_vif_multiqueue_enabled': 'True'
}
)
d1.plug(ins, self.vif_tap)
- mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz',
- None,
- multiqueue=False)
+ mock_create_tap_dev.assert_called_once_with(
+ 'tap-xxx-yyy-zzz', None, multiqueue=False)
@mock.patch('nova.privsep.linux_net.device_exists', return_value=True)
@mock.patch('nova.privsep.linux_net.set_device_mtu')
@@ -1147,15 +1171,15 @@ class LibvirtVifTestCase(test.NoDBTestCase):
d1 = vif.LibvirtGenericVIFDriver()
ins = objects.Instance(
id=1, uuid='f0000000-0000-0000-0000-000000000001',
+ flavor=self.flavor_2vcpu,
project_id=723, system_metadata={
'image_hw_vif_multiqueue_enabled': 'True',
'image_hw_vif_model': 'e1000',
}
)
d1.plug(ins, self.vif_tap)
- mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz',
- None,
- multiqueue=False)
+ mock_create_tap_dev.assert_called_once_with(
+ 'tap-xxx-yyy-zzz', None, multiqueue=False)
def test_unplug_tap(self):
d = vif.LibvirtGenericVIFDriver()
diff --git a/nova/utils.py b/nova/utils.py
index 53b87f98be..56e3aa300a 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -1195,3 +1195,49 @@ def raise_if_old_compute():
scope=scope,
min_service_level=current_service_version,
oldest_supported_service=oldest_supported_service_level)
+
+
+def run_once(message, logger, cleanup=None):
+ """This is a utility function decorator to ensure a function
+ is run once and only once in an interpreter instance.
+
+ Note: this is copied from the placement repo (placement/util.py)
+
+ The decorated function object can be reset by calling its
+ reset function. All exceptions raised by the wrapped function,
+ logger and cleanup function will be propagated to the caller.
+ """
+ def outer_wrapper(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ if not wrapper.called:
+ # Note(sean-k-mooney): the called state is always
+ # updated even if the wrapped function completes
+ # by raising an exception. If the caller catches
+ # the exception it is their responsibility to call
+ # reset if they want to re-execute the wrapped function.
+ try:
+ return func(*args, **kwargs)
+ finally:
+ wrapper.called = True
+ else:
+ logger(message)
+
+ wrapper.called = False
+
+ def reset(wrapper, *args, **kwargs):
+ # Note(sean-k-mooney): we conditionally call the
+ # cleanup function if one is provided only when the
+ # wrapped function has been called previously. We catch
+ # and reraise any exception that may be raised and update
+ # the called state in a finally block to ensure its
+ # always updated if reset is called.
+ try:
+ if cleanup and wrapper.called:
+ return cleanup(*args, **kwargs)
+ finally:
+ wrapper.called = False
+
+ wrapper.reset = functools.partial(reset, wrapper)
+ return wrapper
+ return outer_wrapper
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index cf8aff35a0..c40b23f0f8 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -722,7 +722,8 @@ class LibvirtGenericVIFDriver(object):
vif_model = self.get_vif_model(image_meta=image_meta)
# TODO(ganso): explore whether multiqueue works for other vif models
# that go through this code path.
- multiqueue = (self._requests_multiqueue(image_meta) and
+ multiqueue = (instance.get_flavor().vcpus > 1 and
+ self._requests_multiqueue(image_meta) and
vif_model == network_model.VIF_MODEL_VIRTIO)
nova.privsep.linux_net.create_tap_dev(dev, mac, multiqueue=multiqueue)
network = vif.get('network')
diff --git a/releasenotes/notes/bug-1939604-547c729b7741831b.yaml b/releasenotes/notes/bug-1939604-547c729b7741831b.yaml
new file mode 100644
index 0000000000..e14327c285
--- /dev/null
+++ b/releasenotes/notes/bug-1939604-547c729b7741831b.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Addressed an issue that prevented instances with 1 vcpu using multiqueue
+ feature from being created successfully when their vif_type is TAP.
diff --git a/tools/check-cherry-picks.sh b/tools/check-cherry-picks.sh
index 5ca6ded203..5a449c520b 100755
--- a/tools/check-cherry-picks.sh
+++ b/tools/check-cherry-picks.sh
@@ -4,11 +4,6 @@
# to verify that they're all on either master or stable/ branches
#
-# Allow this script to be disabled by a simple env var
-if [ ${DISABLE_CHERRY_PICK_CHECK:-0} -eq 1 ]; then
- exit 0
-fi
-
commit_hash=""
# Check if the patch is a merge patch by counting the number of parents.
diff --git a/tox.ini b/tox.ini
index cad163ee3f..e5d0e45446 100644
--- a/tox.ini
+++ b/tox.ini
@@ -42,15 +42,12 @@ commands =
description =
Run style checks.
envdir = {toxworkdir}/shared
-passenv =
- DISABLE_CHERRY_PICK_CHECK
commands =
bash tools/flake8wrap.sh {posargs}
# Check that all JSON files don't have \r\n in line.
bash -c "! find doc/ -type f -name *.json | xargs grep -U -n $'\r'"
# Check that all included JSON files are valid JSON
bash -c '! find doc/ -type f -name *.json | xargs -t -n1 python -m json.tool 2>&1 > /dev/null | grep -B1 -v ^python'
- bash tools/check-cherry-picks.sh
[testenv:fast8]
description =
@@ -59,6 +56,15 @@ envdir = {toxworkdir}/shared
commands =
bash tools/flake8wrap.sh -HEAD
+[testenv:validate-backport]
+description =
+ Determine whether a backport is ready to be merged by checking whether it has
+ already been merged to master or more recent stable branches.
+deps =
+skipsdist = true
+commands =
+ bash tools/check-cherry-picks.sh
+
[testenv:functional]
description =
Run functional tests using python3.