summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--devstack/lib/ironic11
-rw-r--r--doc/source/admin/drivers/irmc.rst7
-rw-r--r--doc/source/contributor/dev-quickstart.rst7
-rw-r--r--doc/source/contributor/ironic-boot-from-volume.rst3
-rw-r--r--doc/source/install/include/common-prerequisites.inc10
-rw-r--r--ironic/cmd/status.py41
-rw-r--r--ironic/conductor/base_manager.py17
-rw-r--r--ironic/conductor/manager.py7
-rw-r--r--ironic/conf/irmc.py15
-rw-r--r--ironic/db/sqlalchemy/__init__.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py16
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py8
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/48d6c242bb9b_add_node_tags.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/5ea1b0d310e_added_port_group_table_and_altered_ports.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py5
-rw-r--r--ironic/db/sqlalchemy/api.py549
-rw-r--r--ironic/db/sqlalchemy/models.py40
-rw-r--r--ironic/drivers/modules/boot_mode_utils.py2
-rw-r--r--ironic/drivers/modules/irmc/common.py7
-rw-r--r--ironic/drivers/modules/irmc/inspect.py9
-rw-r--r--ironic/drivers/modules/irmc/power.py7
-rw-r--r--ironic/tests/unit/cmd/test_status.py82
-rw-r--r--ironic/tests/unit/common/test_release_mappings.py8
-rw-r--r--ironic/tests/unit/conductor/mgr_utils.py16
-rw-r--r--ironic/tests/unit/conductor/test_allocations.py2
-rw-r--r--ironic/tests/unit/conductor/test_base_manager.py6
-rw-r--r--ironic/tests/unit/conductor/test_manager.py2
-rw-r--r--ironic/tests/unit/db/sqlalchemy/test_migrations.py313
-rw-r--r--ironic/tests/unit/db/test_conductor.py4
-rw-r--r--ironic/tests/unit/db/test_nodes.py7
-rw-r--r--releasenotes/notes/add-allocations-table-check-38f1c9eef189b411.yaml8
-rw-r--r--releasenotes/notes/allocations-charset-5384d1ea00964bdd.yaml23
-rw-r--r--releasenotes/notes/irmc-add-snmp-auth-protocols-3ff7597cea7ef9dd.yaml5
-rw-r--r--releasenotes/notes/prepare-for-sqlalchemy-20-e817f340f261b1a2.yaml7
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po202
-rw-r--r--releasenotes/source/yoga.rst6
-rw-r--r--releasenotes/source/zed.rst6
-rw-r--r--requirements.txt2
-rw-r--r--test-requirements.txt2
-rw-r--r--tox.ini2
43 files changed, 1095 insertions, 393 deletions
diff --git a/devstack/lib/ironic b/devstack/lib/ironic
index 08cccce7a..aae8ca5cf 100644
--- a/devstack/lib/ironic
+++ b/devstack/lib/ironic
@@ -1649,15 +1649,9 @@ function configure_client_for {
# NOTE(TheJulia): Below are services which we know, as of late 2021, which support
# explicit scope based ops *and* have knobs.
- # Needed: Neutron, swift, nova ?service_catalog?
- # Neutron - https://review.opendev.org/c/openstack/devstack/+/797450
if [[ "$service_config_section" == "inspector" ]] && [[ "$IRONIC_INSPECTOR_ENFORCE_SCOPE" == "True" ]]; then
use_system_scope="True"
- elif [[ "$service_config_section" == "cinder" ]] && [[ "${CINDER_ENFORCE_SCOPE:-False}" == "True" ]]; then
- use_system_scope="True"
- elif [[ "$service_config_section" == "glance" ]] && [[ "${GLANCE_ENFORCE_SCOPE:-False}" == "True" ]]; then
- use_system_scope="True"
fi
if [[ "$use_system_scope" == "True" ]]; then
@@ -1924,6 +1918,11 @@ function init_ironic {
# NOTE(rloo): We're not upgrading but want to make sure this command works,
# even though we're not parsing the output of this command.
$IRONIC_BIN_DIR/ironic-status upgrade check
+
+ $IRONIC_BIN_DIR/ironic-status upgrade check && ret_val=$? || ret_val=$?
+ if [ $ret_val -gt 1 ] ; then
+ die $LINENO "The `ironic-status upgrade check` command returned an error. Cannot proceed."
+ fi
}
# _ironic_bm_vm_names() - Generates list of names for baremetal VMs.
diff --git a/doc/source/admin/drivers/irmc.rst b/doc/source/admin/drivers/irmc.rst
index 17b8d8644..521d464cb 100644
--- a/doc/source/admin/drivers/irmc.rst
+++ b/doc/source/admin/drivers/irmc.rst
@@ -229,9 +229,10 @@ Configuration via ``ironic.conf``
and ``v2c``. The default value is ``public``. Optional.
- ``snmp_security``: SNMP security name required for version ``v3``.
Optional.
- - ``snmp_auth_proto``: The SNMPv3 auth protocol. The valid value and the
- default value are both ``sha``. We will add more supported valid values
- in the future. Optional.
+ - ``snmp_auth_proto``: The SNMPv3 auth protocol. If using iRMC S4 or S5, the
+ valid value of this option is only ``sha``. If using iRMC S6, the valid
+ values are ``sha256``, ``sha384`` and ``sha512``. The default value is
+ ``sha``. Optional.
- ``snmp_priv_proto``: The SNMPv3 privacy protocol. The valid value and
the default value are both ``aes``. We will add more supported valid values
in the future. Optional.
diff --git a/doc/source/contributor/dev-quickstart.rst b/doc/source/contributor/dev-quickstart.rst
index 3fe03f02b..6f63104f9 100644
--- a/doc/source/contributor/dev-quickstart.rst
+++ b/doc/source/contributor/dev-quickstart.rst
@@ -131,6 +131,13 @@ The unit tests need a local database setup, you can use
``tools/test-setup.sh`` to set up the database the same way as setup
in the OpenStack test systems.
+.. note::
+ If you encounter issues executing unit tests, specifically where errors
+ may indicate that a field is too long, check your database's default
+ character encoding. Debian specifically sets MariaDB to ``utf8mb4``
+ which utilizes 4 byte encoded unicode characters by default, which is
+ incompatible by default.
+
Additional Tox Targets
----------------------
diff --git a/doc/source/contributor/ironic-boot-from-volume.rst b/doc/source/contributor/ironic-boot-from-volume.rst
index fc3fd1c2b..673a189be 100644
--- a/doc/source/contributor/ironic-boot-from-volume.rst
+++ b/doc/source/contributor/ironic-boot-from-volume.rst
@@ -125,7 +125,8 @@ You can also run an integration test that an instance is booted from a remote
volume with tempest in the environment::
cd /opt/stack/tempest
- tox -e all-plugin -- ironic_tempest_plugin.tests.scenario.test_baremetal_boot_from_volume
+ tox -e venv-tempest -- pip install (path to the ironic-tempest-plugin directory)
+ tox -e all -- ironic_tempest_plugin.tests.scenario.test_baremetal_boot_from_volume
Please note that the storage interface will only indicate errors based upon
the state of the node and the configuration present. As such a node does not
diff --git a/doc/source/install/include/common-prerequisites.inc b/doc/source/install/include/common-prerequisites.inc
index edaca46d0..718e80c9d 100644
--- a/doc/source/install/include/common-prerequisites.inc
+++ b/doc/source/install/include/common-prerequisites.inc
@@ -22,8 +22,16 @@ MySQL database that is used by other OpenStack services.
.. code-block:: console
# mysql -u root -p
- mysql> CREATE DATABASE ironic CHARACTER SET utf8;
+ mysql> CREATE DATABASE ironic CHARACTER SET utf8mb3;
mysql> GRANT ALL PRIVILEGES ON ironic.* TO 'ironic'@'localhost' \
IDENTIFIED BY 'IRONIC_DBPASSWORD';
mysql> GRANT ALL PRIVILEGES ON ironic.* TO 'ironic'@'%' \
IDENTIFIED BY 'IRONIC_DBPASSWORD';
+
+.. note::
+ When creating the database to house Ironic, specifically on MySQL/MariaDB,
+ the character set *cannot* be 4 byte Unicode characters. This is due to
+ an internal structural constraint. UTF8, in these database platforms,
+ has traditionally meant ``utf8mb3``, short for "UTF-8, 3 byte encoding",
+ however the platforms are expected to move to ``utf8mb4`` which is
+ incompatible with Ironic.
diff --git a/ironic/cmd/status.py b/ironic/cmd/status.py
index 10c8a5bfd..d395b985b 100644
--- a/ironic/cmd/status.py
+++ b/ironic/cmd/status.py
@@ -19,7 +19,7 @@ from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import utils
from oslo_upgradecheck import common_checks
from oslo_upgradecheck import upgradecheck
-from sqlalchemy import exc as sa_exc
+import sqlalchemy
from ironic.cmd import dbsync
from ironic.common.i18n import _
@@ -50,7 +50,7 @@ class Checks(upgradecheck.UpgradeCommands):
# when a table is missing, so lets catch it, since it is fatal.
msg = dbsync.DBCommand().check_obj_versions(
ignore_missing_tables=True)
- except sa_exc.NoSuchTableError as e:
+ except sqlalchemy.exc.NoSuchTableError as e:
msg = ('Database table missing. Please ensure you have '
'updated the database schema. Not Found: %s' % e)
return upgradecheck.Result(upgradecheck.Code.FAILURE, details=msg)
@@ -94,6 +94,41 @@ class Checks(upgradecheck.UpgradeCommands):
else:
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+ def _check_allocations_table(self):
+ msg = None
+ engine = enginefacade.reader.get_engine()
+ if 'mysql' not in str(engine.url):
+ # This test only applies to mysql and database schema
+ # selection.
+ return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+ res = engine.execute("show create table allocations")
+ results = str(res.all()).lower()
+ if 'utf8' not in results:
+ msg = ('The Allocations table is is not using UTF8 encoding. '
+ 'This is corrected in later versions of Ironic, where '
+ 'the table character set schema is automatically '
+ 'migrated. Continued use of a non-UTF8 character '
+ 'set may produce unexpected results.')
+
+ if 'innodb' not in results:
+ warning = ('The engine used by MySQL for the allocations '
+ 'table is not the intended engine for the Ironic '
+ 'database tables to use. This may have been a result '
+ 'of an error with the table creation schema. This '
+ 'may require Database Administrator intervention '
+ 'and downtime to dump, modify the table engine to '
+ 'utilize InnoDB, and reload the allocations table to '
+ 'utilize the InnoDB engine.')
+ if msg:
+ msg = msg + ' Additionally: ' + warning
+ else:
+ msg = warning
+
+ if msg:
+ return upgradecheck.Result(upgradecheck.Code.WARNING, details=msg)
+ else:
+ return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+
# A tuple of check tuples of (<name of check>, <check function>).
# The name of the check will be used in the output of this command.
# The check function takes no arguments and returns an
@@ -105,6 +140,8 @@ class Checks(upgradecheck.UpgradeCommands):
_upgrade_checks = (
(_('Object versions'), _check_obj_versions),
(_('Database Index Status'), _check_db_indexes),
+ (_('Allocations Name Field Length Check'),
+ _check_allocations_table),
# Victoria -> Wallaby migration
(_('Policy File JSON to YAML Migration'),
(common_checks.check_policy_json, {'conf': CONF})),
diff --git a/ironic/conductor/base_manager.py b/ironic/conductor/base_manager.py
index aa684408f..22ebd57f5 100644
--- a/ironic/conductor/base_manager.py
+++ b/ironic/conductor/base_manager.py
@@ -88,10 +88,14 @@ class BaseConductorManager(object):
# clear all locks held by this conductor before registering
self.dbapi.clear_node_reservations_for_conductor(self.host)
- def init_host(self, admin_context=None):
+ def init_host(self, admin_context=None, start_consoles=True,
+ start_allocations=True):
"""Initialize the conductor host.
:param admin_context: the admin context to pass to periodic tasks.
+ :param start_consoles: If consoles should be started in intialization.
+ :param start_allocations: If allocations should be started in
+ initialization.
:raises: RuntimeError when conductor is already running.
:raises: NoDriversLoaded when no drivers are enabled on the conductor.
:raises: DriverNotFound if a driver is enabled that does not exist.
@@ -189,8 +193,9 @@ class BaseConductorManager(object):
# Start consoles if it set enabled in a greenthread.
try:
- self._spawn_worker(self._start_consoles,
- ironic_context.get_admin_context())
+ if start_consoles:
+ self._spawn_worker(self._start_consoles,
+ ironic_context.get_admin_context())
except exception.NoFreeConductorWorker:
LOG.warning('Failed to start worker for restarting consoles.')
@@ -207,8 +212,9 @@ class BaseConductorManager(object):
# Resume allocations that started before the restart.
try:
- self._spawn_worker(self._resume_allocations,
- ironic_context.get_admin_context())
+ if start_allocations:
+ self._spawn_worker(self._resume_allocations,
+ ironic_context.get_admin_context())
except exception.NoFreeConductorWorker:
LOG.warning('Failed to start worker for resuming allocations.')
@@ -539,6 +545,7 @@ class BaseConductorManager(object):
try:
with task_manager.acquire(context, node_uuid, shared=False,
purpose='start console') as task:
+
notify_utils.emit_console_notification(
task, 'console_restore',
obj_fields.NotificationStatus.START)
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index 7e98459ff..cf4988958 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -2203,18 +2203,16 @@ class ConductorManager(base_manager.BaseConductorManager):
"""
LOG.debug('RPC set_console_mode called for node %(node)s with '
'enabled %(enabled)s', {'node': node_id, 'enabled': enabled})
-
- with task_manager.acquire(context, node_id, shared=False,
+ with task_manager.acquire(context, node_id, shared=True,
purpose='setting console mode') as task:
node = task.node
-
task.driver.console.validate(task)
-
if enabled == node.console_enabled:
op = 'enabled' if enabled else 'disabled'
LOG.info("No console action was triggered because the "
"console is already %s", op)
else:
+ task.upgrade_lock()
node.last_error = None
node.save()
task.spawn_after(self._spawn_worker,
@@ -3469,7 +3467,6 @@ class ConductorManager(base_manager.BaseConductorManager):
self.conductor.id):
# Another conductor has taken over, skipping
continue
-
LOG.debug('Taking over allocation %s', allocation.uuid)
allocations.do_allocate(context, allocation)
except Exception:
diff --git a/ironic/conf/irmc.py b/ironic/conf/irmc.py
index 7c319e2d8..68ee43b3a 100644
--- a/ironic/conf/irmc.py
+++ b/ironic/conf/irmc.py
@@ -81,9 +81,20 @@ opts = [
help='SNMP polling interval in seconds'),
cfg.StrOpt('snmp_auth_proto',
default='sha',
- choices=[('sha', _('Secure Hash Algorithm 1'))],
+ choices=[('sha', _('Secure Hash Algorithm 1, supported in iRMC '
+ 'S4 and S5.')),
+ ('sha256', ('Secure Hash Algorithm 2 with 256 bits '
+ 'digest, only supported in iRMC S6.')),
+ ('sha384', ('Secure Hash Algorithm 2 with 384 bits '
+ 'digest, only supported in iRMC S6.')),
+ ('sha512', ('Secure Hash Algorithm 2 with 512 bits '
+ 'digest, only supported in iRMC S6.'))],
help=_("SNMPv3 message authentication protocol ID. "
- "Required for version 'v3'. 'sha' is supported.")),
+ "Required for version 'v3'. The valid options are "
+ "'sha', 'sha256', 'sha384' and 'sha512', while 'sha' is "
+ "the only supported protocol in iRMC S4 and S5, and "
+ "from iRMC S6, 'sha256', 'sha384' and 'sha512' are "
+ "supported, but 'sha' is not supported any more.")),
cfg.StrOpt('snmp_priv_proto',
default='aes',
choices=[('aes', _('Advanced Encryption Standard'))],
diff --git a/ironic/db/sqlalchemy/__init__.py b/ironic/db/sqlalchemy/__init__.py
index 88ac079d0..0f792361a 100644
--- a/ironic/db/sqlalchemy/__init__.py
+++ b/ironic/db/sqlalchemy/__init__.py
@@ -13,6 +13,4 @@
from oslo_db.sqlalchemy import enginefacade
# NOTE(dtantsur): we want sqlite as close to a real database as possible.
-# FIXME(stephenfin): we need to remove reliance on autocommit semantics ASAP
-# since it's not compatible with SQLAlchemy 2.0
-enginefacade.configure(sqlite_fk=True, __autocommit=True)
+enginefacade.configure(sqlite_fk=True)
diff --git a/ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py b/ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py
index d47a3d131..1587dc94a 100644
--- a/ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py
+++ b/ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py
@@ -38,8 +38,8 @@ def upgrade():
sa.Column('drivers', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('hostname', name='uniq_conductors0hostname'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_charset='UTF8MB3',
+ mysql_engine='InnoDB',
)
op.create_table(
'chassis',
@@ -51,8 +51,8 @@ def upgrade():
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_chassis0uuid'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
op.create_table(
'nodes',
@@ -77,8 +77,8 @@ def upgrade():
sa.ForeignKeyConstraint(['chassis_id'], ['chassis.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_nodes0uuid'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
op.create_index('node_instance_uuid', 'nodes', ['instance_uuid'],
unique=False)
@@ -95,7 +95,7 @@ def upgrade():
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('address', name='uniq_ports0address'),
sa.UniqueConstraint('uuid', name='uniq_ports0uuid'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
# end Alembic commands
diff --git a/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py b/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py
index 0b5e8ff10..0cdc38fb2 100644
--- a/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py
+++ b/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py
@@ -39,8 +39,8 @@ def upgrade():
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_deploytemplates0uuid'),
sa.UniqueConstraint('name', name='uniq_deploytemplates0name'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
op.create_table(
@@ -62,6 +62,6 @@ def upgrade():
sa.Index('deploy_template_id', 'deploy_template_id'),
sa.Index('deploy_template_steps_interface_idx', 'interface'),
sa.Index('deploy_template_steps_step_idx', 'step'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
diff --git a/ironic/db/sqlalchemy/alembic/versions/48d6c242bb9b_add_node_tags.py b/ironic/db/sqlalchemy/alembic/versions/48d6c242bb9b_add_node_tags.py
index 641419f09..b0e12e56b 100644
--- a/ironic/db/sqlalchemy/alembic/versions/48d6c242bb9b_add_node_tags.py
+++ b/ironic/db/sqlalchemy/alembic/versions/48d6c242bb9b_add_node_tags.py
@@ -36,7 +36,7 @@ def upgrade():
sa.Column('tag', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('node_id', 'tag'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
op.create_index('node_tags_idx', 'node_tags', ['tag'], unique=False)
diff --git a/ironic/db/sqlalchemy/alembic/versions/5ea1b0d310e_added_port_group_table_and_altered_ports.py b/ironic/db/sqlalchemy/alembic/versions/5ea1b0d310e_added_port_group_table_and_altered_ports.py
index a799c1b1d..7b1eacbe0 100644
--- a/ironic/db/sqlalchemy/alembic/versions/5ea1b0d310e_added_port_group_table_and_altered_ports.py
+++ b/ironic/db/sqlalchemy/alembic/versions/5ea1b0d310e_added_port_group_table_and_altered_ports.py
@@ -42,8 +42,8 @@ def upgrade():
sa.UniqueConstraint('address',
name='uniq_portgroups0address'),
sa.UniqueConstraint('name', name='uniq_portgroups0name'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8')
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3')
op.add_column(u'ports', sa.Column('local_link_connection', sa.Text(),
nullable=True))
op.add_column(u'ports', sa.Column('portgroup_id', sa.Integer(),
diff --git a/ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py b/ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py
index 0d93bed30..33c141caa 100644
--- a/ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py
+++ b/ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py
@@ -37,6 +37,6 @@ def upgrade():
sa.Column('version', sa.String(length=15), nullable=True),
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('node_id', 'name'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
diff --git a/ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py b/ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py
index 9f5b855ed..748d281e2 100644
--- a/ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py
+++ b/ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py
@@ -48,5 +48,5 @@ def upgrade():
sa.Index('history_node_id_idx', 'node_id'),
sa.Index('history_uuid_idx', 'uuid'),
sa.Index('history_conductor_idx', 'conductor'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8')
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3')
diff --git a/ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py b/ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py
index 8cf30a2d9..66216b722 100644
--- a/ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py
+++ b/ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py
@@ -37,7 +37,7 @@ def upgrade():
sa.Column('trait', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('node_id', 'trait'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
op.create_index('node_traits_idx', 'node_traits', ['trait'], unique=False)
diff --git a/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py b/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py
index 55560dc68..74ab297a5 100644
--- a/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py
+++ b/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py
@@ -48,7 +48,10 @@ def upgrade():
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', name='uniq_allocations0name'),
- sa.UniqueConstraint('uuid', name='uniq_allocations0uuid')
+ sa.UniqueConstraint('uuid', name='uniq_allocations0uuid'),
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
+
)
op.add_column('nodes', sa.Column('allocation_id', sa.Integer(),
nullable=True))
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index c14719af8..b05af3637 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -19,9 +19,11 @@ import datetime
import json
import threading
+from oslo_concurrency import lockutils
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
+from oslo_db.sqlalchemy import orm as sa_orm
from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log
from oslo_utils import netutils
@@ -31,7 +33,7 @@ from oslo_utils import uuidutils
from osprofiler import sqlalchemy as osp_sqlalchemy
import sqlalchemy as sa
from sqlalchemy import or_
-from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
+from sqlalchemy.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import Load
from sqlalchemy.orm import selectinload
@@ -53,6 +55,10 @@ LOG = log.getLogger(__name__)
_CONTEXT = threading.local()
+
+RESERVATION_SEMAPHORE = "reserve_node_db_lock"
+synchronized = lockutils.synchronized_with_prefix('ironic-')
+
# NOTE(mgoddard): We limit the number of traits per node to 50 as this is the
# maximum number of traits per resource provider allowed in placement.
MAX_TRAITS_PER_NODE = 50
@@ -83,6 +89,11 @@ def _wrap_session(session):
def _get_node_query_with_all_for_single_node():
"""Return a query object for the Node joined with all relevant fields.
+ Deprecated: This method, while useful, returns a "Legacy Query" object
+ which, while useful is considered a legacy object from SQLAlchemy
+ which at some point may be removed. SQLAlchemy encourages all users
+ to move to the unified ORM/Core Select interface.
+
This method utilizes a joined load query which creates a result set
where corresponding traits, and tags, are joined together in the result
set.
@@ -109,9 +120,10 @@ def _get_node_query_with_all_for_single_node():
Where joins are super in-efficent for Ironic, is where nodes are being
enumerated, as the above result set pattern is not just for one node, but
- potentially thousands of nodes. In that case, we should use the
- _get_node_query_with_all_for_list helper to return a more appropriate
- query object which will be more efficient for the end user.
+ potentially thousands of nodes. Please consider using _get_node_select
+ which results in a primary query for the nodes, and then performs
+ additional targeted queries for the joined tables, as opposed to
+ performing client side de-duplication.
:returns: a query object.
"""
@@ -127,49 +139,47 @@ def _get_node_query_with_all_for_single_node():
# 2.43 seconds to obtain all nodes from SQLAlchemy (10k nodes)
# 5.15 seconds to obtain all nodes *and* have node objects (10k nodes)
return (model_query(models.Node)
- .options(joinedload('tags'))
- .options(joinedload('traits')))
+ .options(joinedload(models.Node.tags))
+ .options(joinedload(models.Node.traits)))
-def _get_node_query_with_all_for_list():
- """Return a query object for the Node with queried extra fields.
+def _get_node_select():
+ """Returns a SQLAlchemy Select Object for Nodes.
- This method returns a query object joining tags and traits in a pattern
- where the result set is first built, and then the resulting associations
- are queried separately and the objects are reconciled by SQLAlchemy to
- build the composite objects based upon the associations.
+ This method returns a pre-formatted select object which models
+ the entire Node object, allowing callers to operate on a node like
+ they would have with an SQLAlchemy ORM Query Object.
- This results in the following query pattern when the query is executed:
+ This object *also* performs two additional select queries, in the form
+ of a selectin operation, to achieve the same results of a Join query,
+ but without the join query itself, and the client side load.
- select $fields from nodes where x;
- # SQLAlchemy creates a list of associated node IDs.
- select $fields from tags where node_id in ('1', '3', '37268');
- select $fields from traits where node_id in ('1', '3', '37268');
+ This method is best utilized when retrieving lists of nodes.
- SQLAlchemy then returns a result set where the tags and traits are
- composited together efficently as opposed to having to deduplicate
- the result set. This shifts additional load to the database which
- was previously a high overhead operation with-in the conductor...
- which results in a slower conductor.
+ Select objects in this fashion were added as a result of SQLAlchemy 1.4
+ in preparation for SQLAlchemy 2.0's release to provide a unified
+ select interface.
- :returns: a query object.
+ :returns: a select object
"""
- # NOTE(TheJulia): When comparing CI rubs *with* this being the default
- # for all general list operations, at 10k nodes, this pattern appears
- # to be on-par with a 5% variability between the two example benchmark
- # tests. That being said, the test *does* not include tags or traits
- # in it's test data set so client side deduplication is not measured.
- # NOTE(TheJulia): Basic benchmark difference
- # tests data creation: 67.117 seconds
- # 2.32 seconds to obtain all nodes from SQLAlchemy (10k nodes)
- # 4.99 seconds to obtain all nodes *and* have node objects (10k nodes)
- # If this holds true, the required record deduplication with joinedload
- # may be basically the same amount of overhead as requesting the tags
- # and traits separately.
- return (model_query(models.Node)
- .options(selectinload('tags'))
- .options(selectinload('traits')))
+ # NOTE(TheJulia): This returns a query in the SQLAlchemy 1.4->2.0
+ # migration style as query model loading is deprecated.
+
+ # This must use selectinload to avoid later need to invokededuplication.
+ return (sa.select(models.Node)
+ .options(selectinload(models.Node.tags),
+ selectinload(models.Node.traits)))
+
+
+def _get_deploy_template_select_with_steps():
+ """Return a select object for the DeployTemplate joined with steps.
+
+ :returns: a select object.
+ """
+ return sa.select(
+ models.DeployTemplate
+ ).options(selectinload(models.DeployTemplate.steps))
def _get_deploy_template_query_with_steps():
@@ -177,7 +187,8 @@ def _get_deploy_template_query_with_steps():
:returns: a query object.
"""
- return model_query(models.DeployTemplate).options(joinedload('steps'))
+ return model_query(models.DeployTemplate).options(
+ selectinload(models.DeployTemplate.steps))
def model_query(model, *args, **kwargs):
@@ -209,6 +220,26 @@ def add_identity_filter(query, value):
raise exception.InvalidIdentity(identity=value)
+def add_identity_where(op, model, value):
+ """Adds an identity filter to operation for where method.
+
+ Filters results by ID, if supplied value is a valid integer.
+ Otherwise attempts to filter results by UUID.
+
+ :param op: Initial operation to add filter to.
+ i.e. a update or delete statement.
+ :param model: The SQLAlchemy model to apply.
+ :param value: Value for filtering results by.
+ :return: Modified query.
+ """
+ if strutils.is_int_like(value):
+ return op.where(model.id == value)
+ elif uuidutils.is_uuid_like(value):
+ return op.where(model.uuid == value)
+ else:
+ raise exception.InvalidIdentity(identity=value)
+
+
def add_port_filter(query, value):
"""Adds a port-specific filter to a query.
@@ -281,7 +312,7 @@ def add_portgroup_filter(query, value):
if netutils.is_valid_mac(value):
return query.filter_by(address=value)
else:
- return add_identity_filter(query, value)
+ return add_identity_where(query, models.Portgroup, value)
def add_portgroup_filter_by_node(query, value):
@@ -332,8 +363,10 @@ def add_allocation_filter_by_conductor(query, value):
def _paginate_query(model, limit=None, marker=None, sort_key=None,
- sort_dir=None, query=None):
- if not query:
+ sort_dir=None, query=None, return_base_tuple=False):
+ # NOTE(TheJulia): We can't just ask for the bool of query if it is
+ # populated, so we need to ask if it is None.
+ if query is None:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
@@ -345,7 +378,28 @@ def _paginate_query(model, limit=None, marker=None, sort_key=None,
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
- return query.all()
+ with _session_for_read() as session:
+ # NOTE(TheJulia): SQLAlchemy 2.0 no longer returns pre-uniqued result
+ # sets in ORM mode, so we need to explicitly ask for it to be unique
+ # before returning it to the caller.
+ if isinstance(query, sa_orm.Query):
+ # The classic "Legacy" ORM query object result set which is
+ # deprecated in advance of SQLAlchemy 2.0.
+ return query.all()
+ else:
+ # In this case, we have a sqlalchemy.sql.selectable.Select
+ # (most likely) which utilizes the unified select interface.
+ res = session.execute(query).fetchall()
+ if len(res) == 0:
+ # Return an empty list instead of a class with no objects.
+ return []
+ if return_base_tuple:
+ # The caller expects a tuple, lets just give it to them.
+ return res
+ # Everything is a tuple in a resultset from the unified interface
+ # but for objects, our model expects just object access,
+ # so we extract and return them.
+ return [r[0] for r in res]
def _filter_active_conductors(query, interval=None):
@@ -514,15 +568,16 @@ class Connection(api.Connection):
else:
columns = [getattr(models.Node, c) for c in columns]
- query = model_query(*columns, base_model=models.Node)
+ query = sa.select(*columns)
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
- sort_key, sort_dir, query)
+ sort_key, sort_dir, query,
+ return_base_tuple=True)
def get_node_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None, fields=None):
if not fields:
- query = _get_node_query_with_all_for_list()
+ query = _get_node_select()
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
@@ -559,24 +614,25 @@ class Connection(api.Connection):
# with SQLAlchemy.
traits_found = True
use_columns.remove('traits')
-
# Generate the column object list so SQLAlchemy only fulfills
# the requested columns.
use_columns = [getattr(models.Node, c) for c in use_columns]
-
# In essence, traits (and anything else needed to generate the
# composite objects) need to be reconciled without using a join
# as multiple rows can be generated in the result set being returned
# from the database server. In this case, with traits, we use
# a selectinload pattern.
if traits_found:
- query = model_query(models.Node).options(
- Load(models.Node).load_only(*use_columns),
- selectinload(models.Node.traits))
+ query = sa.select(models.Node).options(
+ selectinload(models.Node.traits),
+ Load(models.Node).load_only(*use_columns)
+ )
else:
- query = model_query(models.Node).options(
- Load(models.Node).load_only(*use_columns))
-
+ # Note for others, if you ask for a whole model, it is
+ # modeled, i.e. you can access it as an object.
+ query = sa.select(models.NodeBase).options(
+ Load(models.Node).load_only(*use_columns)
+ )
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
@@ -618,40 +674,85 @@ class Connection(api.Connection):
return mapping
+ @synchronized(RESERVATION_SEMAPHORE, fair=True)
+ def _reserve_node_place_lock(self, tag, node_id, node):
+ try:
+ # NOTE(TheJulia): We explicitly do *not* synch the session
+ # so the other actions in the conductor do not become aware
+ # that the lock is in place and believe they hold the lock.
+ # This necessitates an overall lock in the code side, so
+ # we avoid conditions where two separate threads can believe
+ # they hold locks at the same time.
+ with _session_for_write() as session:
+ res = session.execute(
+ sa.update(models.Node).
+ where(models.Node.id == node.id).
+ where(models.Node.reservation == None). # noqa
+ values(reservation=tag).
+ execution_options(synchronize_session=False))
+ session.flush()
+ node = self._get_node_by_id_no_joins(node.id)
+ # NOTE(TheJulia): In SQLAlchemy 2.0 style, we don't
+ # magically get a changed node as they moved from the
+ # many ways to do things to singular ways to do things.
+ if res.rowcount != 1:
+ # Nothing updated and node exists. Must already be
+ # locked.
+ raise exception.NodeLocked(node=node.uuid,
+ host=node.reservation)
+ except NoResultFound:
+ # In the event that someone has deleted the node on
+ # another thread.
+ raise exception.NodeNotFound(node=node_id)
+
@oslo_db_api.retry_on_deadlock
def reserve_node(self, tag, node_id):
- with _session_for_write():
- query = _get_node_query_with_all_for_single_node()
- query = add_identity_filter(query, node_id)
- count = query.filter_by(reservation=None).update(
- {'reservation': tag}, synchronize_session=False)
+ with _session_for_read():
try:
+ # TODO(TheJulia): Figure out a good way to query
+ # this so that we do it as light as possible without
+ # the full object invocation, which will speed lock
+ # activities. Granted, this is all at the DB level
+ # so maybe that is okay in the grand scheme of things.
+ query = model_query(models.Node)
+ query = add_identity_filter(query, node_id)
node = query.one()
- if count != 1:
- # Nothing updated and node exists. Must already be
- # locked.
- raise exception.NodeLocked(node=node.uuid,
- host=node['reservation'])
- return node
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
+ if node.reservation:
+ # Fail fast, instead of attempt the update.
+ raise exception.NodeLocked(node=node.uuid,
+ host=node.reservation)
+ self._reserve_node_place_lock(tag, node_id, node)
+ # Return a node object as that is the contract for this method.
+ return self.get_node_by_id(node.id)
@oslo_db_api.retry_on_deadlock
def release_node(self, tag, node_id):
- with _session_for_write():
- query = model_query(models.Node)
- query = add_identity_filter(query, node_id)
- # be optimistic and assume we usually release a reservation
- count = query.filter_by(reservation=tag).update(
- {'reservation': None}, synchronize_session=False)
+ with _session_for_read():
try:
- if count != 1:
- node = query.one()
- if node['reservation'] is None:
+ query = model_query(models.Node)
+ query = add_identity_filter(query, node_id)
+ node = query.one()
+ except NoResultFound:
+ raise exception.NodeNotFound(node=node_id)
+ with _session_for_write() as session:
+ try:
+ res = session.execute(
+ sa.update(models.Node).
+ where(models.Node.id == node.id).
+ where(models.Node.reservation == tag).
+ values(reservation=None).
+ execution_options(synchronize_session=False)
+ )
+ node = self.get_node_by_id(node.id)
+ if res.rowcount != 1:
+ if node.reservation is None:
raise exception.NodeNotLocked(node=node.uuid)
else:
raise exception.NodeLocked(node=node.uuid,
host=node['reservation'])
+ session.flush()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
@@ -677,47 +778,68 @@ class Connection(api.Connection):
node = models.Node()
node.update(values)
- with _session_for_write() as session:
- try:
+ try:
+ with _session_for_write() as session:
session.add(node)
+ # Set tags & traits to [] for new created node
+ # NOTE(mgoddard): We need to set the tags and traits fields in
+ # the session context, otherwise SQLAlchemy will try and fail
+ # to lazy load the attributes, resulting in an exception being
+ # raised.
+ node['tags'] = []
+ node['traits'] = []
session.flush()
- except db_exc.DBDuplicateEntry as exc:
- if 'name' in exc.columns:
- raise exception.DuplicateName(name=values['name'])
- elif 'instance_uuid' in exc.columns:
- raise exception.InstanceAssociated(
- instance_uuid=values['instance_uuid'],
- node=values['uuid'])
- raise exception.NodeAlreadyExists(uuid=values['uuid'])
- # Set tags & traits to [] for new created node
- # NOTE(mgoddard): We need to set the tags and traits fields in the
- # session context, otherwise SQLAlchemy will try and fail to lazy
- # load the attributes, resulting in an exception being raised.
- node['tags'] = []
- node['traits'] = []
+ except db_exc.DBDuplicateEntry as exc:
+ if 'name' in exc.columns:
+ raise exception.DuplicateName(name=values['name'])
+ elif 'instance_uuid' in exc.columns:
+ raise exception.InstanceAssociated(
+ instance_uuid=values['instance_uuid'],
+ node=values['uuid'])
+ raise exception.NodeAlreadyExists(uuid=values['uuid'])
return node
+ def _get_node_by_id_no_joins(self, node_id):
+ # TODO(TheJulia): Maybe replace with this with a minimal
+ # "get these three fields" thing.
+ try:
+ with _session_for_read() as session:
+ # Explicitly load NodeBase as the invocation of the
+ # priamary model object reesults in the join query
+ # triggering.
+ return session.execute(
+ sa.select(models.NodeBase).filter_by(id=node_id).limit(1)
+ ).scalars().first()
+ except NoResultFound:
+ raise exception.NodeNotFound(node=node_id)
+
def get_node_by_id(self, node_id):
- query = _get_node_query_with_all_for_single_node()
- query = query.filter_by(id=node_id)
try:
- return query.one()
+ query = _get_node_select()
+ with _session_for_read() as session:
+ return session.scalars(
+ query.filter_by(id=node_id).limit(1)
+ ).unique().one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
def get_node_by_uuid(self, node_uuid):
- query = _get_node_query_with_all_for_single_node()
- query = query.filter_by(uuid=node_uuid)
try:
- return query.one()
+ query = _get_node_select()
+ with _session_for_read() as session:
+ return session.scalars(
+ query.filter_by(uuid=node_uuid).limit(1)
+ ).unique().one()
except NoResultFound:
raise exception.NodeNotFound(node=node_uuid)
def get_node_by_name(self, node_name):
- query = _get_node_query_with_all_for_single_node()
- query = query.filter_by(name=node_name)
try:
- return query.one()
+ query = _get_node_select()
+ with _session_for_read() as session:
+ return session.scalars(
+ query.filter_by(name=node_name).limit(1)
+ ).unique().one()
except NoResultFound:
raise exception.NodeNotFound(node=node_name)
@@ -725,15 +847,14 @@ class Connection(api.Connection):
if not uuidutils.is_uuid_like(instance):
raise exception.InvalidUUID(uuid=instance)
- query = _get_node_query_with_all_for_single_node()
- query = query.filter_by(instance_uuid=instance)
-
try:
- result = query.one()
+ query = _get_node_select()
+ with _session_for_read() as session:
+ return session.scalars(
+ query.filter_by(instance_uuid=instance).limit(1)
+ ).unique().one()
except NoResultFound:
- raise exception.InstanceNotFound(instance=instance)
-
- return result
+ raise exception.InstanceNotFound(instance_uuid=instance)
@oslo_db_api.retry_on_deadlock
def destroy_node(self, node_id):
@@ -846,10 +967,14 @@ class Connection(api.Connection):
ref.update(values)
- # Return the updated node model joined with all relevant fields.
- query = _get_node_query_with_all_for_single_node()
- query = add_identity_filter(query, node_id)
- return query.one()
+ # Return the updated node model joined with all relevant fields.
+ query = _get_node_select()
+ query = add_identity_filter(query, node_id)
+ # FIXME(TheJulia): This entire method needs to be re-written to
+ # use the proper execution format for SQLAlchemy 2.0. Likely
+ # A query, independent update, and a re-query on the transaction.
+ with _session_for_read() as session:
+ return session.execute(query).one()[0]
def get_port_by_id(self, port_id):
query = model_query(models.Port).filter_by(id=port_id)
@@ -886,7 +1011,7 @@ class Connection(api.Connection):
def get_port_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None, owner=None,
project=None):
- query = model_query(models.Port)
+ query = sa.select(models.Port)
if owner:
query = add_port_filter_by_node_owner(query, owner)
elif project:
@@ -897,8 +1022,7 @@ class Connection(api.Connection):
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None, owner=None,
project=None):
- query = model_query(models.Port)
- query = query.filter_by(node_id=node_id)
+ query = sa.select(models.Port).where(models.Port.node_id == node_id)
if owner:
query = add_port_filter_by_node_owner(query, owner)
elif project:
@@ -909,8 +1033,10 @@ class Connection(api.Connection):
def get_ports_by_portgroup_id(self, portgroup_id, limit=None, marker=None,
sort_key=None, sort_dir=None, owner=None,
project=None):
- query = model_query(models.Port)
- query = query.filter_by(portgroup_id=portgroup_id)
+ query = sa.select(models.Port).where(
+ models.Port.portgroup_id == portgroup_id
+ )
+
if owner:
query = add_port_filter_by_node_owner(query, owner)
elif project:
@@ -925,15 +1051,15 @@ class Connection(api.Connection):
port = models.Port()
port.update(values)
- with _session_for_write() as session:
- try:
+ try:
+ with _session_for_write() as session:
session.add(port)
session.flush()
- except db_exc.DBDuplicateEntry as exc:
- if 'address' in exc.columns:
- raise exception.MACAlreadyExists(mac=values['address'])
- raise exception.PortAlreadyExists(uuid=values['uuid'])
- return port
+ except db_exc.DBDuplicateEntry as exc:
+ if 'address' in exc.columns:
+ raise exception.MACAlreadyExists(mac=values['address'])
+ raise exception.PortAlreadyExists(uuid=values['uuid'])
+ return port
@oslo_db_api.retry_on_deadlock
def update_port(self, port_id, values):
@@ -941,7 +1067,6 @@ class Connection(api.Connection):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Port.")
raise exception.InvalidParameterValue(err=msg)
-
try:
with _session_for_write() as session:
query = model_query(models.Port)
@@ -1010,7 +1135,7 @@ class Connection(api.Connection):
def get_portgroups_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None, project=None):
query = model_query(models.Portgroup)
- query = query.filter_by(node_id=node_id)
+ query = query.where(models.Portgroup.node_id == node_id)
if project:
query = add_portgroup_filter_by_node_project(query, project)
return _paginate_query(models.Portgroup, limit, marker,
@@ -1067,34 +1192,40 @@ class Connection(api.Connection):
def destroy_portgroup(self, portgroup_id):
def portgroup_not_empty(session):
"""Checks whether the portgroup does not have ports."""
-
- query = model_query(models.Port)
- query = add_port_filter_by_portgroup(query, portgroup_id)
-
- return query.count() != 0
+ with _session_for_read() as session:
+ return session.scalar(
+ sa.select(
+ sa.func.count(models.Port.id)
+ ).where(models.Port.portgroup_id == portgroup_id)) != 0
with _session_for_write() as session:
if portgroup_not_empty(session):
raise exception.PortgroupNotEmpty(portgroup=portgroup_id)
- query = model_query(models.Portgroup, session=session)
- query = add_identity_filter(query, portgroup_id)
+ query = sa.delete(models.Portgroup)
+ query = add_identity_where(query, models.Portgroup, portgroup_id)
- count = query.delete()
+ count = session.execute(query).rowcount
if count == 0:
raise exception.PortgroupNotFound(portgroup=portgroup_id)
def get_chassis_by_id(self, chassis_id):
- query = model_query(models.Chassis).filter_by(id=chassis_id)
+ query = sa.select(models.Chassis).where(
+ models.Chassis.id == chassis_id)
+
try:
- return query.one()
+ with _session_for_read() as session:
+ return session.execute(query).one()[0]
except NoResultFound:
raise exception.ChassisNotFound(chassis=chassis_id)
def get_chassis_by_uuid(self, chassis_uuid):
- query = model_query(models.Chassis).filter_by(uuid=chassis_uuid)
+ query = sa.select(models.Chassis).where(
+ models.Chassis.uuid == chassis_uuid)
+
try:
- return query.one()
+ with _session_for_read() as session:
+ return session.execute(query).one()[0]
except NoResultFound:
raise exception.ChassisNotFound(chassis=chassis_uuid)
@@ -1110,13 +1241,13 @@ class Connection(api.Connection):
chassis = models.Chassis()
chassis.update(values)
- with _session_for_write() as session:
- try:
+ try:
+ with _session_for_write() as session:
session.add(chassis)
session.flush()
- except db_exc.DBDuplicateEntry:
- raise exception.ChassisAlreadyExists(uuid=values['uuid'])
- return chassis
+ except db_exc.DBDuplicateEntry:
+ raise exception.ChassisAlreadyExists(uuid=values['uuid'])
+ return chassis
@oslo_db_api.retry_on_deadlock
def update_chassis(self, chassis_id, values):
@@ -1127,7 +1258,7 @@ class Connection(api.Connection):
with _session_for_write():
query = model_query(models.Chassis)
- query = add_identity_filter(query, chassis_id)
+ query = add_identity_where(query, models.Chassis, chassis_id)
count = query.update(values)
if count != 1:
@@ -1183,27 +1314,32 @@ class Connection(api.Connection):
def get_conductor(self, hostname, online=True):
try:
- query = model_query(models.Conductor).filter_by(hostname=hostname)
+ query = sa.select(models.Conductor).where(
+ models.Conductor.hostname == hostname)
if online is not None:
- query = query.filter_by(online=online)
- return query.one()
+ query = query.where(models.Conductor.online == online)
+ with _session_for_read() as session:
+ res = session.execute(query).one()[0]
+ return res
except NoResultFound:
raise exception.ConductorNotFound(conductor=hostname)
@oslo_db_api.retry_on_deadlock
def unregister_conductor(self, hostname):
- with _session_for_write():
- query = (model_query(models.Conductor)
- .filter_by(hostname=hostname, online=True))
- count = query.update({'online': False})
+ with _session_for_write() as session:
+ query = sa.update(models.Conductor).where(
+ models.Conductor.hostname == hostname,
+ models.Conductor.online == True).values( # noqa
+ online=False)
+ count = session.execute(query).rowcount
if count == 0:
raise exception.ConductorNotFound(conductor=hostname)
@oslo_db_api.retry_on_deadlock
def touch_conductor(self, hostname):
with _session_for_write():
- query = (model_query(models.Conductor)
- .filter_by(hostname=hostname))
+ query = model_query(models.Conductor)
+ query = query.where(models.Conductor.hostname == hostname)
# since we're not changing any other field, manually set updated_at
# and since we're heartbeating, make sure that online=True
count = query.update({'updated_at': timeutils.utcnow(),
@@ -1278,7 +1414,7 @@ class Connection(api.Connection):
def list_conductor_hardware_interfaces(self, conductor_id):
query = (model_query(models.ConductorHardwareInterfaces)
- .filter_by(conductor_id=conductor_id))
+ .where(models.ConductorHardwareInterfaces.conductor_id == conductor_id)) # noqa
return query.all()
def list_hardware_type_interfaces(self, hardware_types):
@@ -1293,6 +1429,13 @@ class Connection(api.Connection):
def register_conductor_hardware_interfaces(self, conductor_id, interfaces):
with _session_for_write() as session:
try:
+ try:
+ session.begin()
+ except sa.exc.InvalidRequestError:
+ # When running unit tests, the transaction reports as
+ # already started, where as in service startup this is
+ # the first write op.
+ pass
for iface in interfaces:
conductor_hw_iface = models.ConductorHardwareInterfaces()
conductor_hw_iface['conductor_id'] = conductor_id
@@ -1322,7 +1465,8 @@ class Connection(api.Connection):
raise exception.NodeNotFound(node=node_id)
def _check_node_exists(self, node_id):
- if not model_query(models.Node).filter_by(id=node_id).scalar():
+ if not model_query(models.Node).where(
+ models.Node.id == node_id).scalar():
raise exception.NodeNotFound(node=node_id)
@oslo_db_api.retry_on_deadlock
@@ -1383,12 +1527,17 @@ class Connection(api.Connection):
return model_query(q.exists()).scalar()
def get_node_by_port_addresses(self, addresses):
- q = _get_node_query_with_all_for_single_node()
+ q = _get_node_select()
q = q.distinct().join(models.Port)
q = q.filter(models.Port.address.in_(addresses))
try:
- return q.one()
+ # FIXME(TheJulia): This needs to be updated to be
+ # an explicit query to identify the node for SQLAlchemy.
+ with _session_for_read() as session:
+ # Always return the first element, since we always
+ # get a tuple from sqlalchemy.
+ return session.execute(q).one()[0]
except NoResultFound:
raise exception.NodeNotFound(
_('Node with port addresses %s was not found')
@@ -1424,7 +1573,8 @@ class Connection(api.Connection):
def get_volume_connectors_by_node_id(self, node_id, limit=None,
marker=None, sort_key=None,
sort_dir=None, project=None):
- query = model_query(models.VolumeConnector).filter_by(node_id=node_id)
+ query = model_query(models.VolumeConnector).where(
+ models.VolumeConnector.node_id == node_id)
if project:
add_volume_conn_filter_by_node_project(query, project)
return _paginate_query(models.VolumeConnector, limit, marker,
@@ -1492,7 +1642,8 @@ class Connection(api.Connection):
sort_key, sort_dir, query)
def get_volume_target_by_id(self, db_id):
- query = model_query(models.VolumeTarget).filter_by(id=db_id)
+ query = model_query(models.VolumeTarget).where(
+ models.VolumeTarget.id == db_id)
try:
return query.one()
except NoResultFound:
@@ -1517,7 +1668,8 @@ class Connection(api.Connection):
def get_volume_targets_by_volume_id(self, volume_id, limit=None,
marker=None, sort_key=None,
sort_dir=None, project=None):
- query = model_query(models.VolumeTarget).filter_by(volume_id=volume_id)
+ query = model_query(models.VolumeTarget).where(
+ models.VolumeTarget.volume_id == volume_id)
if project:
query = add_volume_target_filter_by_node_project(query, project)
return _paginate_query(models.VolumeTarget, limit, marker, sort_key,
@@ -1586,6 +1738,8 @@ class Connection(api.Connection):
if not versions:
return []
+ if model_name == 'Node':
+ model_name = 'NodeBase'
model = models.get_class(model_name)
# NOTE(rloo): .notin_ does not handle null:
@@ -1614,7 +1768,11 @@ class Connection(api.Connection):
"""
object_versions = release_mappings.get_object_versions()
table_missing_ok = False
- for model in models.Base.__subclasses__():
+ models_to_check = models.Base.__subclasses__()
+ # We need to append Node to the list as it is a subclass of
+ # NodeBase, which is intentional to delineate excess queries.
+ models_to_check.append(models.Node)
+ for model in models_to_check:
if model.__name__ not in object_versions:
continue
@@ -1688,8 +1846,9 @@ class Connection(api.Connection):
mapping = release_mappings.RELEASE_MAPPING['master']['objects']
total_to_migrate = 0
total_migrated = 0
-
- sql_models = [model for model in models.Base.__subclasses__()
+ all_models = models.Base.__subclasses__()
+ all_models.append(models.Node)
+ sql_models = [model for model in all_models
if model.__name__ in mapping]
for model in sql_models:
version = mapping[model.__name__][0]
@@ -2221,29 +2380,29 @@ class Connection(api.Connection):
# this does not work with PostgreSQL.
query = model_query(models.DeployTemplate)
query = add_identity_filter(query, template_id)
- try:
- ref = query.with_for_update().one()
- except NoResultFound:
- raise exception.DeployTemplateNotFound(
- template=template_id)
-
+ ref = query.with_for_update().one()
# First, update non-step columns.
steps = values.pop('steps', None)
ref.update(values)
-
# If necessary, update steps.
if steps is not None:
self._update_deploy_template_steps(session, ref.id, steps)
+ session.flush()
+ with _session_for_read() as session:
# Return the updated template joined with all relevant fields.
- query = _get_deploy_template_query_with_steps()
+ query = _get_deploy_template_select_with_steps()
query = add_identity_filter(query, template_id)
- return query.one()
+ return session.execute(query).one()[0]
except db_exc.DBDuplicateEntry as e:
if 'name' in e.columns:
raise exception.DeployTemplateDuplicateName(
name=values['name'])
raise
+ except NoResultFound:
+ # TODO(TheJulia): What would unified core raise?!?
+ raise exception.DeployTemplateNotFound(
+ template=template_id)
@oslo_db_api.retry_on_deadlock
def destroy_deploy_template(self, template_id):
@@ -2257,21 +2416,26 @@ class Connection(api.Connection):
def _get_deploy_template(self, field, value):
"""Helper method for retrieving a deploy template."""
- query = (_get_deploy_template_query_with_steps()
- .filter_by(**{field: value}))
+ query = (_get_deploy_template_select_with_steps()
+ .where(field == value))
try:
- return query.one()
+ # FIXME(TheJulia): This needs to be fixed for SQLAlchemy 2.0
+ with _session_for_read() as session:
+ return session.execute(query).one()[0]
except NoResultFound:
raise exception.DeployTemplateNotFound(template=value)
def get_deploy_template_by_id(self, template_id):
- return self._get_deploy_template('id', template_id)
+ return self._get_deploy_template(models.DeployTemplate.id,
+ template_id)
def get_deploy_template_by_uuid(self, template_uuid):
- return self._get_deploy_template('uuid', template_uuid)
+ return self._get_deploy_template(models.DeployTemplate.uuid,
+ template_uuid)
def get_deploy_template_by_name(self, template_name):
- return self._get_deploy_template('name', template_name)
+ return self._get_deploy_template(models.DeployTemplate.name,
+ template_name)
def get_deploy_template_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
@@ -2280,9 +2444,14 @@ class Connection(api.Connection):
sort_key, sort_dir, query)
def get_deploy_template_list_by_names(self, names):
- query = (_get_deploy_template_query_with_steps()
- .filter(models.DeployTemplate.name.in_(names)))
- return query.all()
+ query = _get_deploy_template_select_with_steps()
+ with _session_for_read() as session:
+ res = session.execute(
+ query.where(
+ models.DeployTemplate.name.in_(names)
+ )
+ ).all()
+ return [r[0] for r in res]
@oslo_db_api.retry_on_deadlock
def create_node_history(self, values):
@@ -2329,7 +2498,7 @@ class Connection(api.Connection):
def get_node_history_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.NodeHistory)
- query = query.filter_by(node_id=node_id)
+ query = query.where(models.NodeHistory.node_id == node_id)
return _paginate_query(models.NodeHistory, limit, marker,
sort_key, sort_dir, query)
@@ -2396,6 +2565,9 @@ class Connection(api.Connection):
# Uses input entry list, selects entries matching those ids
# then deletes them and does not synchronize the session so
# sqlalchemy doesn't do extra un-necessary work.
+ # NOTE(TheJulia): This is "legacy" syntax, but it is still
+ # valid and under the hood SQLAlchemy rewrites the form into
+ # a delete syntax.
session.query(
models.NodeHistory
).filter(
@@ -2414,13 +2586,12 @@ class Connection(api.Connection):
# literally have the DB do *all* of the world, so no
# client side ops occur. The column is also indexed,
# which means this will be an index based response.
- # TODO(TheJulia): This might need to be revised for
- # SQLAlchemy 2.0 as it should be a scaler select and count
- # instead.
- return session.query(
- models.Node.provision_state
- ).filter(
- or_(
- models.Node.provision_state == v for v in state
+ return session.scalar(
+ sa.select(
+ sa.func.count(models.Node.id)
+ ).filter(
+ or_(
+ models.Node.provision_state == v for v in state
+ )
)
- ).count()
+ )
diff --git a/ironic/db/sqlalchemy/models.py b/ironic/db/sqlalchemy/models.py
index 8f3f6a564..fad29f095 100644
--- a/ironic/db/sqlalchemy/models.py
+++ b/ironic/db/sqlalchemy/models.py
@@ -19,6 +19,7 @@ SQLAlchemy models for baremetal data.
"""
from os import path
+from typing import List
from urllib import parse as urlparse
from oslo_db import options as db_options
@@ -27,8 +28,8 @@ from oslo_db.sqlalchemy import types as db_types
from sqlalchemy import Boolean, Column, DateTime, false, Index
from sqlalchemy import ForeignKey, Integer
from sqlalchemy import schema, String, Text
-from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
+from sqlalchemy.orm import declarative_base
from ironic.common import exception
from ironic.common.i18n import _
@@ -116,8 +117,8 @@ class ConductorHardwareInterfaces(Base):
default = Column(Boolean, default=False, nullable=False)
-class Node(Base):
- """Represents a bare metal node."""
+class NodeBase(Base):
+ """Represents a base bare metal node."""
__tablename__ = 'nodes'
__table_args__ = (
@@ -213,6 +214,32 @@ class Node(Base):
secure_boot = Column(Boolean, nullable=True)
+class Node(NodeBase):
+ """Represents a bare metal node."""
+
+ # NOTE(TheJulia): The purpose of the delineation between NodeBase and Node
+ # is to facilitate a hard delineation for queries where we do not need to
+ # populate additional information needlessly which would normally populate
+ # from the access of the property. In this case, Traits and Tags.
+ # The other reason we do this, is because these are generally "joined"
+ # data structures, we cannot de-duplicate node objects with unhashable dict
+ # data structures.
+
+ # NOTE(TheJulia): The choice of selectin lazy population is intentional
+ # as it causes a subselect to occur, skipping the need for deduplication
+ # in general. This puts a slightly higher query load on the DB server, but
+ # means *far* less gets shipped over the wire in the end.
+ traits: orm.Mapped[List['NodeTrait']] = orm.relationship( # noqa
+ "NodeTrait",
+ back_populates="node",
+ lazy="selectin")
+
+ tags: orm.Mapped[List['NodeTag']] = orm.relationship( # noqa
+ "NodeTag",
+ back_populates="node",
+ lazy="selectin")
+
+
class Port(Base):
"""Represents a network port of a bare metal node."""
@@ -270,7 +297,6 @@ class NodeTag(Base):
node = orm.relationship(
"Node",
- backref='tags',
primaryjoin='and_(NodeTag.node_id == Node.id)',
foreign_keys=node_id
)
@@ -327,7 +353,6 @@ class NodeTrait(Base):
trait = Column(String(255), primary_key=True, nullable=False)
node = orm.relationship(
"Node",
- backref='traits',
primaryjoin='and_(NodeTrait.node_id == Node.id)',
foreign_keys=node_id
)
@@ -389,6 +414,10 @@ class DeployTemplate(Base):
uuid = Column(String(36))
name = Column(String(255), nullable=False)
extra = Column(db_types.JsonEncodedDict)
+ steps: orm.Mapped[List['DeployTemplateStep']] = orm.relationship( # noqa
+ "DeployTemplateStep",
+ back_populates="deploy_template",
+ lazy="selectin")
class DeployTemplateStep(Base):
@@ -409,7 +438,6 @@ class DeployTemplateStep(Base):
priority = Column(Integer, nullable=False)
deploy_template = orm.relationship(
"DeployTemplate",
- backref='steps',
primaryjoin=(
'and_(DeployTemplateStep.deploy_template_id == '
'DeployTemplate.id)'),
diff --git a/ironic/drivers/modules/boot_mode_utils.py b/ironic/drivers/modules/boot_mode_utils.py
index bb3ccc3a4..e48cea32d 100644
--- a/ironic/drivers/modules/boot_mode_utils.py
+++ b/ironic/drivers/modules/boot_mode_utils.py
@@ -137,7 +137,7 @@ def sync_boot_mode(task):
elif ironic_boot_mode != bm_boot_mode:
msg = (_("Boot mode %(node_boot_mode)s currently configured "
"on node %(uuid)s does not match the boot mode "
- "%(ironic_boot_mode)s requested for provisioning."
+ "%(ironic_boot_mode)s requested for provisioning. "
"Attempting to set node boot mode to %(ironic_boot_mode)s.") %
{'uuid': node.uuid, 'node_boot_mode': bm_boot_mode,
'ironic_boot_mode': ironic_boot_mode})
diff --git a/ironic/drivers/modules/irmc/common.py b/ironic/drivers/modules/irmc/common.py
index 7a8fc0f1d..2df85eeb6 100644
--- a/ironic/drivers/modules/irmc/common.py
+++ b/ironic/drivers/modules/irmc/common.py
@@ -83,7 +83,9 @@ SNMP_V3_REQUIRED_PROPERTIES = {
SNMP_V3_OPTIONAL_PROPERTIES = {
'irmc_snmp_auth_proto': _("SNMPv3 message authentication protocol ID. "
"Required for version 'v3'. "
- "'sha' is supported."),
+ "If using iRMC S4/S5, only 'sha' is supported."
+ "If using iRMC S6, the valid options are "
+ "'sha256', 'sha384', 'sha512'."),
'irmc_snmp_priv_proto': _("SNMPv3 message privacy (encryption) protocol "
"ID. Required for version 'v3'. "
"'aes' is supported."),
@@ -243,7 +245,8 @@ def _parse_snmp_driver_info(node, info):
def _parse_snmp_v3_info(node, info):
snmp_info = {}
missing_info = []
- valid_values = {'irmc_snmp_auth_proto': ['sha'],
+ valid_values = {'irmc_snmp_auth_proto': ['sha', 'sha256', 'sha384',
+ 'sha512'],
'irmc_snmp_priv_proto': ['aes']}
valid_protocols = {'irmc_snmp_auth_proto': snmp.snmp_auth_protocols,
'irmc_snmp_priv_proto': snmp.snmp_priv_protocols}
diff --git a/ironic/drivers/modules/irmc/inspect.py b/ironic/drivers/modules/irmc/inspect.py
index 9b6bff5bc..4b250cdfd 100644
--- a/ironic/drivers/modules/irmc/inspect.py
+++ b/ironic/drivers/modules/irmc/inspect.py
@@ -191,9 +191,14 @@ def _inspect_hardware(node, existing_traits=None, **kwargs):
except (scci.SCCIInvalidInputError,
scci.SCCIClientError,
exception.SNMPFailure) as e:
+ advice = ""
+ if ("SNMP operation" in str(e)):
+ advice = ("The SNMP related parameters' value may be different "
+ "with the server, please check if you have set them "
+ "correctly.")
error = (_("Inspection failed for node %(node_id)s "
- "with the following error: %(error)s") %
- {'node_id': node.uuid, 'error': e})
+ "with the following error: %(error)s. (advice)s") %
+ {'node_id': node.uuid, 'error': e, 'advice': advice})
raise exception.HardwareInspectionFailure(error=error)
return props, macs, new_traits
diff --git a/ironic/drivers/modules/irmc/power.py b/ironic/drivers/modules/irmc/power.py
index 28041d835..7cde9cdac 100644
--- a/ironic/drivers/modules/irmc/power.py
+++ b/ironic/drivers/modules/irmc/power.py
@@ -203,9 +203,12 @@ def _set_power_state(task, target_state, timeout=None):
_wait_power_state(task, states.SOFT_REBOOT, timeout=timeout)
except exception.SNMPFailure as snmp_exception:
+ advice = ("The SNMP related parameters' value may be different with "
+ "the server, please check if you have set them correctly.")
LOG.error("iRMC failed to acknowledge the target state "
- "for node %(node_id)s. Error: %(error)s",
- {'node_id': node.uuid, 'error': snmp_exception})
+ "for node %(node_id)s. Error: %(error)s. %(advice)s",
+ {'node_id': node.uuid, 'error': snmp_exception,
+ 'advice': advice})
raise exception.IRMCOperationError(operation=target_state,
error=snmp_exception)
diff --git a/ironic/tests/unit/cmd/test_status.py b/ironic/tests/unit/cmd/test_status.py
index f776e2d51..2d044cc13 100644
--- a/ironic/tests/unit/cmd/test_status.py
+++ b/ironic/tests/unit/cmd/test_status.py
@@ -14,6 +14,7 @@
from unittest import mock
+from oslo_db import sqlalchemy
from oslo_upgradecheck.upgradecheck import Code
from ironic.cmd import dbsync
@@ -38,3 +39,84 @@ class TestUpgradeChecks(db_base.DbTestCase):
check_result = self.cmd._check_obj_versions()
self.assertEqual(Code.FAILURE, check_result.code)
self.assertEqual(msg, check_result.details)
+
+ def test__check_allocations_table_ok(self):
+ check_result = self.cmd._check_allocations_table()
+ self.assertEqual(Code.SUCCESS,
+ check_result.code)
+
+ @mock.patch.object(sqlalchemy.enginefacade.reader,
+ 'get_engine', autospec=True)
+ def test__check_allocations_table_latin1(self, mock_reader):
+ mock_engine = mock.Mock()
+ mock_res = mock.Mock()
+ mock_res.all.return_value = (
+ '... ENGINE=InnoDB DEFAULT CHARSET=latin1',
+ )
+ mock_engine.url = '..mysql..'
+ mock_engine.execute.return_value = mock_res
+ mock_reader.return_value = mock_engine
+ check_result = self.cmd._check_allocations_table()
+ self.assertEqual(Code.WARNING,
+ check_result.code)
+ expected_msg = ('The Allocations table is is not using UTF8 '
+ 'encoding. This is corrected in later versions '
+ 'of Ironic, where the table character set schema '
+ 'is automatically migrated. Continued use of a '
+ 'non-UTF8 character set may produce unexpected '
+ 'results.')
+ self.assertEqual(expected_msg, check_result.details)
+
+ @mock.patch.object(sqlalchemy.enginefacade.reader,
+ 'get_engine', autospec=True)
+ def test__check_allocations_table_myiasm(self, mock_reader):
+ mock_engine = mock.Mock()
+ mock_res = mock.Mock()
+ mock_engine.url = '..mysql..'
+ mock_res.all.return_value = (
+ '... ENGINE=MyIASM DEFAULT CHARSET=utf8',
+ )
+ mock_engine.execute.return_value = mock_res
+ mock_reader.return_value = mock_engine
+ check_result = self.cmd._check_allocations_table()
+ self.assertEqual(Code.WARNING,
+ check_result.code)
+ expected_msg = ('The engine used by MySQL for the allocations '
+ 'table is not the intended engine for the Ironic '
+ 'database tables to use. This may have been a '
+ 'result of an error with the table creation schema. '
+ 'This may require Database Administrator '
+ 'intervention and downtime to dump, modify the '
+ 'table engine to utilize InnoDB, and reload the '
+ 'allocations table to utilize the InnoDB engine.')
+ self.assertEqual(expected_msg, check_result.details)
+
+ @mock.patch.object(sqlalchemy.enginefacade.reader,
+ 'get_engine', autospec=True)
+ def test__check_allocations_table_myiasm_both(self, mock_reader):
+ mock_engine = mock.Mock()
+ mock_res = mock.Mock()
+ mock_engine.url = '..mysql..'
+ mock_res.all.return_value = (
+ '... ENGINE=MyIASM DEFAULT CHARSET=latin1',
+ )
+ mock_engine.execute.return_value = mock_res
+ mock_reader.return_value = mock_engine
+ check_result = self.cmd._check_allocations_table()
+ self.assertEqual(Code.WARNING,
+ check_result.code)
+ expected_msg = ('The Allocations table is is not using UTF8 '
+ 'encoding. This is corrected in later versions '
+ 'of Ironic, where the table character set schema '
+ 'is automatically migrated. Continued use of a '
+ 'non-UTF8 character set may produce unexpected '
+ 'results. Additionally: '
+ 'The engine used by MySQL for the allocations '
+ 'table is not the intended engine for the Ironic '
+ 'database tables to use. This may have been a '
+ 'result of an error with the table creation schema. '
+ 'This may require Database Administrator '
+ 'intervention and downtime to dump, modify the '
+ 'table engine to utilize InnoDB, and reload the '
+ 'allocations table to utilize the InnoDB engine.')
+ self.assertEqual(expected_msg, check_result.details)
diff --git a/ironic/tests/unit/common/test_release_mappings.py b/ironic/tests/unit/common/test_release_mappings.py
index 96dbdfa22..dad536257 100644
--- a/ironic/tests/unit/common/test_release_mappings.py
+++ b/ironic/tests/unit/common/test_release_mappings.py
@@ -91,13 +91,17 @@ class ReleaseMappingsTestCase(base.TestCase):
def test_contains_all_db_objects(self):
self.assertIn('master', release_mappings.RELEASE_MAPPING)
- model_names = set((s.__name__ for s in models.Base.__subclasses__()))
+ use_models = models.Base.__subclasses__()
+ use_models.append(models.Node)
+ model_names = set((s.__name__ for s in use_models))
# NOTE(xek): As a rule, all models which can be changed between
# releases or are sent through RPC should have their counterpart
# versioned objects. Do not add an exception for such objects,
# initialize them with the version 1.0 instead.
+ # NodeBase is also excluded as it is covered by Node.
exceptions = set(['NodeTag', 'ConductorHardwareInterfaces',
- 'NodeTrait', 'DeployTemplateStep'])
+ 'NodeTrait', 'DeployTemplateStep',
+ 'NodeBase'])
model_names -= exceptions
# NodeTrait maps to two objects
model_names |= set(['Trait', 'TraitList'])
diff --git a/ironic/tests/unit/conductor/mgr_utils.py b/ironic/tests/unit/conductor/mgr_utils.py
index 4451d7a15..8ee1fd1f9 100644
--- a/ironic/tests/unit/conductor/mgr_utils.py
+++ b/ironic/tests/unit/conductor/mgr_utils.py
@@ -127,7 +127,12 @@ class ServiceSetUpMixin(object):
def setUp(self):
super(ServiceSetUpMixin, self).setUp()
self.hostname = 'test-host'
- self.config(node_locked_retry_attempts=1, group='conductor')
+ # Relies upon the default number of "NodeLocked" retries as
+ # in unit testing, sqllite is not operated in a transactional
+ # way and utilizes asynchonous IO. Locking, in particular, can
+ # detect this, and it can cause some false or delayed inpressions
+ # of lock status, causing lock failures.
+ self.config(node_locked_retry_attempts=3, group='conductor')
self.config(node_locked_retry_interval=0, group='conductor')
self.service = manager.ConductorManager(self.hostname, 'test-topic')
@@ -139,15 +144,18 @@ class ServiceSetUpMixin(object):
return
self.service.del_host()
- def _start_service(self, start_periodic_tasks=False):
+ def _start_service(self, start_periodic_tasks=False, start_consoles=True,
+ start_allocations=True):
if start_periodic_tasks:
- self.service.init_host()
+ self.service.init_host(start_consoles=start_consoles,
+ start_allocations=start_allocations)
else:
with mock.patch.object(periodics, 'PeriodicWorker', autospec=True):
with mock.patch.object(pxe_utils, 'place_common_config',
autospec=True):
self.service.prepare_host()
- self.service.init_host()
+ self.service.init_host(start_consoles=start_consoles,
+ start_allocations=start_allocations)
self.addCleanup(self._stop_service)
diff --git a/ironic/tests/unit/conductor/test_allocations.py b/ironic/tests/unit/conductor/test_allocations.py
index d063cd13a..6d77bd65b 100644
--- a/ironic/tests/unit/conductor/test_allocations.py
+++ b/ironic/tests/unit/conductor/test_allocations.py
@@ -209,7 +209,7 @@ class AllocationTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
state='allocating',
conductor_affinity=dead_conductor.id)
- self._start_service()
+ self._start_service(start_allocations=False)
with mock.patch.object(self.dbapi, 'get_offline_conductors',
autospec=True) as mock_conds:
mock_conds.return_value = [dead_conductor.id]
diff --git a/ironic/tests/unit/conductor/test_base_manager.py b/ironic/tests/unit/conductor/test_base_manager.py
index f92c6e58c..e69003123 100644
--- a/ironic/tests/unit/conductor/test_base_manager.py
+++ b/ironic/tests/unit/conductor/test_base_manager.py
@@ -494,9 +494,11 @@ class StartConsolesTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
obj_utils.create_test_node(
self.context,
uuid=uuidutils.generate_uuid(),
- driver='fake-hardware'
+ driver='fake-hardware',
)
- self._start_service()
+ # Enable consoles *after* service has started, otherwise it races
+ # as the service startup also launches consoles.
+ self._start_service(start_consoles=False)
self.service._start_consoles(self.context)
self.assertEqual(2, mock_start_console.call_count)
mock_notify.assert_has_calls(
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index 5d84dbbef..fd206e36d 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -7256,7 +7256,7 @@ class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_take_over,
mock_start_console,
mock_notify):
- self._start_service()
+ self._start_service(start_consoles=False)
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
di_info = node.driver_internal_info
diff --git a/ironic/tests/unit/db/sqlalchemy/test_migrations.py b/ironic/tests/unit/db/sqlalchemy/test_migrations.py
index d2af35ceb..90669dbd9 100644
--- a/ironic/tests/unit/db/sqlalchemy/test_migrations.py
+++ b/ironic/tests/unit/db/sqlalchemy/test_migrations.py
@@ -36,6 +36,7 @@ For postgres on Ubuntu this can be done with the following commands:
import collections
import contextlib
+import json
from unittest import mock
from alembic import script
@@ -114,6 +115,7 @@ class WalkVersionsMixin(object):
check = getattr(self, "_check_%s" % version, None)
if check:
check(engine, data)
+
except Exception:
LOG.error("Failed to migrate to version %(version)s on engine "
"%(engine)s",
@@ -248,17 +250,24 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_conductor = conductors.insert().values(data_conductor)
connection.execute(insert_conductor)
- conductor_stmt = conductors.select(
- conductors.c.hostname == data_conductor['hostname'])
+ conductor_stmt = sqlalchemy.select(
+ models.Conductor.id
+ ).where(
+ models.Conductor.hostname == 'test_host'
+ )
conductor = connection.execute(conductor_stmt).first()
-
data_node = {'uuid': uuidutils.generate_uuid(),
- 'conductor_affinity': conductor['id']}
+ 'conductor_affinity': conductor.id}
insert_node = nodes.insert().values(data_node)
+
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == data_node['uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.conductor_affinity
+ ).where(
+ models.Node.uuid == data_node['uuid']
+ )
node = connection.execute(node_stmt).first()
- self.assertEqual(conductor['id'], node['conductor_affinity'])
+ self.assertEqual(conductor.id, node.conductor_affinity)
def _check_242cc6a923b3(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -285,21 +294,26 @@ class MigrationCheckersMixin(object):
return data
def _check_5674c57409b9(self, engine, data):
- nodes = db_utils.get_table(engine, 'nodes')
- result = engine.execute(nodes.select())
-
- def _get_state(uuid):
- for row in data:
- if row['uuid'] == uuid:
- return row['provision_state']
-
- for row in result:
- old = _get_state(row['uuid'])
- new = row['provision_state']
- if old is None:
- self.assertEqual('available', new)
- else:
- self.assertEqual(old, new)
+ with engine.begin() as connection:
+ result = connection.execute(
+ sqlalchemy.select(
+ models.Node.uuid,
+ models.Node.provision_state
+ )
+ )
+
+ def _get_state(uuid):
+ for row in data:
+ if row['uuid'] == uuid:
+ return row['provision_state']
+
+ for row in result:
+ old = _get_state(row.uuid)
+ new = row['provision_state']
+ if old is None:
+ self.assertEqual('available', new)
+ else:
+ self.assertEqual(old, new)
def _check_bb59b63f55a(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -360,9 +374,13 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_node = nodes.insert().values(data)
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == uuid)
+ node_stmt = sqlalchemy.select(
+ models.Node.name
+ ).where(
+ models.Node.uuid == uuid
+ )
node = connection.execute(node_stmt).first()
- self.assertEqual(bigstring, node['name'])
+ self.assertEqual(bigstring, node.name)
def _check_516faf1bb9b1(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -372,9 +390,13 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_node = nodes.insert().values(data)
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == uuid)
+ node_stmt = sqlalchemy.select(
+ models.Node.driver
+ ).where(
+ models.Node.uuid == uuid
+ )
node = connection.execute(node_stmt).first()
- self.assertEqual(bigstring, node['driver'])
+ self.assertEqual(bigstring, node.driver)
def _check_48d6c242bb9b(self, engine, data):
node_tags = db_utils.get_table(engine, 'node_tags')
@@ -390,9 +412,13 @@ class MigrationCheckersMixin(object):
data = {'node_id': '123', 'tag': 'tag1'}
insert_node_tag = node_tags.insert().values(data)
connection.execute(insert_node_tag)
- tag_stmt = node_tags.select(node_tags.c.node_id == '123')
+ tag_stmt = sqlalchemy.select(
+ models.NodeTag.tag
+ ).where(
+ models.NodeTag.node_id == '123'
+ )
tag = connection.execute(tag_stmt).first()
- self.assertEqual('tag1', tag['tag'])
+ self.assertEqual('tag1', tag.tag)
def _check_5ea1b0d310e(self, engine, data):
portgroup = db_utils.get_table(engine, 'portgroups')
@@ -441,17 +467,22 @@ class MigrationCheckersMixin(object):
return data
def _check_f6fdb920c182(self, engine, data):
- ports = db_utils.get_table(engine, 'ports')
- result = engine.execute(ports.select())
- def _was_inserted(uuid):
- for row in data:
- if row['uuid'] == uuid:
- return True
+ with engine.begin() as connection:
+ port_stmt = sqlalchemy.select(
+ models.Port.uuid,
+ models.Port.pxe_enabled
+ )
+ result = connection.execute(port_stmt)
- for row in result:
- if _was_inserted(row['uuid']):
- self.assertTrue(row['pxe_enabled'])
+ def _was_inserted(uuid):
+ for row in data:
+ if row['uuid'] == uuid:
+ return True
+
+ for row in result:
+ if _was_inserted(row['uuid']):
+ self.assertTrue(row['pxe_enabled'])
def _check_e294876e8028(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -494,18 +525,21 @@ class MigrationCheckersMixin(object):
return data
def _check_c14cef6dfedf(self, engine, data):
- nodes = db_utils.get_table(engine, 'nodes')
- result = engine.execute(nodes.select())
counts = collections.defaultdict(int)
+ with engine.begin() as connection:
+ result = connection.execute(
+ sqlalchemy.select(
+ models.Node.uuid,
+ models.Node.network_interface))
- def _was_inserted(uuid):
- for row in data:
- if row['uuid'] == uuid:
- return True
+ def _was_inserted(uuid):
+ for row in data:
+ if row['uuid'] == uuid:
+ return True
- for row in result:
- if _was_inserted(row['uuid']):
- counts[row['network_interface']] += 1
+ for row in result:
+ if _was_inserted(row['uuid']):
+ counts[row['network_interface']] += 1
# using default config values, we should have 2 flat and one neutron
self.assertEqual(2, counts['flat'])
@@ -602,8 +636,10 @@ class MigrationCheckersMixin(object):
self.assertIn('mode', col_names)
self.assertIsInstance(portgroups.c.mode.type,
sqlalchemy.types.String)
-
- result = engine.execute(portgroups.select())
+ with engine.begin() as connection:
+ result = connection.execute(
+ sqlalchemy.select(models.Portgroup.mode)
+ )
for row in result:
self.assertEqual(CONF.default_portgroup_mode, row['mode'])
@@ -675,9 +711,13 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_node = nodes.insert().values(data)
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == data['uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.id
+ ).where(
+ models.Node.uuid == data['uuid']
+ )
node = connection.execute(node_stmt).first()
- data['id'] = node['id']
+ data['id'] = node.id
return data
def _check_b4130a7fc904(self, engine, data):
@@ -694,10 +734,13 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_trait = node_traits.insert().values(trait)
connection.execute(insert_trait)
- trait_stmt = node_traits.select(
- node_traits.c.node_id == data['id'])
+ trait_stmt = sqlalchemy.select(
+ models.NodeTrait.trait
+ ).where(
+ models.NodeTrait.node_id == data['id']
+ )
trait = connection.execute(trait_stmt).first()
- self.assertEqual('trait1', trait['trait'])
+ self.assertEqual('trait1', trait.trait)
def _pre_upgrade_82c315d60161(self, engine):
# Create a node to which bios setting can be added.
@@ -706,9 +749,11 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_node = nodes.insert().values(data)
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == data['uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.id
+ ).where(models.Node.uuid == data['uuid'])
node = connection.execute(node_stmt).first()
- data['id'] = node['id']
+ data['id'] = node.id
return data
def _check_82c315d60161(self, engine, data):
@@ -736,10 +781,12 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_bios_settings = bios_settings.insert().values(setting)
connection.execute(insert_bios_settings)
- setting_stmt = bios_settings.select(
- sqlalchemy.sql.and_(
- bios_settings.c.node_id == data['id'],
- bios_settings.c.name == setting['name']))
+ setting_stmt = sqlalchemy.select(
+ models.BIOSSetting.value
+ ).where(
+ models.BIOSSetting.node_id == data['id'],
+ models.BIOSSetting.name == setting['name']
+ )
setting = connection.execute(setting_stmt).first()
self.assertEqual('on', setting['value'])
@@ -826,15 +873,21 @@ class MigrationCheckersMixin(object):
self.assertIsInstance(tbl.c.conductor_group.type,
sqlalchemy.types.String)
with engine.begin() as connection:
- node_stmt = nodes_tbl.select(
- nodes_tbl.c.uuid == data['node_uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.uuid,
+ models.Node.conductor_group,
+ ).where(
+ models.Node.uuid == data['node_uuid'])
node = connection.execute(node_stmt).first()
- self.assertEqual(node['conductor_group'], "")
+ self.assertEqual(node.conductor_group, "")
- conductor_stmt = conductors_tbl.select(
- conductors_tbl.c.id == data['conductor_id'])
+ conductor_stmt = sqlalchemy.select(
+ models.Conductor.conductor_group,
+ ).where(
+ models.Conductor.id == data['conductor_id'],
+ )
conductor = connection.execute(conductor_stmt).first()
- self.assertEqual(conductor['conductor_group'], "")
+ self.assertEqual(conductor.conductor_group, "")
def _check_d2b036ae9378(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -859,11 +912,15 @@ class MigrationCheckersMixin(object):
self.assertIn('protected_reason', col_names)
with engine.begin() as connection:
- node_stmt = nodes.select(
- nodes.c.uuid == data['node_uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.uuid,
+ models.Node.protected,
+ models.Node.protected_reason
+ ).where(
+ models.Node.uuid == data['node_uuid'])
node = connection.execute(node_stmt).first()
- self.assertFalse(node['protected'])
- self.assertIsNone(node['protected_reason'])
+ self.assertFalse(node.protected)
+ self.assertIsNone(node.protected_reason)
def _check_f190f9d00a11(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -887,10 +944,13 @@ class MigrationCheckersMixin(object):
self.assertIn('allocation_id', col_names)
with engine.begin() as connection:
- node_stmt = nodes.select(
- nodes.c.uuid == data['node_uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.allocation_id
+ ).where(
+ models.Node.uuid == data['node_uuid']
+ )
node = connection.execute(node_stmt).first()
- self.assertIsNone(node['allocation_id'])
+ self.assertIsNone(node.allocation_id)
allocations = db_utils.get_table(engine, 'allocations')
col_names = [column.name for column in allocations.c]
@@ -996,22 +1056,33 @@ class MigrationCheckersMixin(object):
insert_dpt = deploy_templates.insert().values(template)
connection.execute(insert_dpt)
# Query by UUID.
- dpt_uuid_stmt = deploy_templates.select(
- deploy_templates.c.uuid == uuid)
+ dpt_uuid_stmt = sqlalchemy.select(
+ models.DeployTemplate.id,
+ models.DeployTemplate.name,
+ ).where(
+ models.DeployTemplate.uuid == uuid
+ )
result = connection.execute(dpt_uuid_stmt).first()
- template_id = result['id']
- self.assertEqual(name, result['name'])
+ template_id = result.id
+ self.assertEqual(name, result.name)
# Query by name.
- dpt_name_stmt = deploy_templates.select(
- deploy_templates.c.name == name)
+ dpt_name_stmt = sqlalchemy.select(
+ models.DeployTemplate.id
+ ).where(
+ models.DeployTemplate.name == name
+ )
result = connection.execute(dpt_name_stmt).first()
- self.assertEqual(template_id, result['id'])
+ self.assertEqual(template_id, result.id)
# Query by ID.
- dpt_id_stmt = deploy_templates.select(
- deploy_templates.c.id == template_id)
+ dpt_id_stmt = sqlalchemy.select(
+ models.DeployTemplate.uuid,
+ models.DeployTemplate.name
+ ).where(
+ models.DeployTemplate.id == template_id
+ )
result = connection.execute(dpt_id_stmt).first()
- self.assertEqual(uuid, result['uuid'])
- self.assertEqual(name, result['name'])
+ self.assertEqual(uuid, result.uuid)
+ self.assertEqual(name, result.name)
savepoint_uuid = connection.begin_nested()
# UUID is unique.
template = {'name': 'CUSTOM_DT2', 'uuid': uuid}
@@ -1030,6 +1101,7 @@ class MigrationCheckersMixin(object):
# Insert a deploy template step.
interface = 'raid'
step_name = 'create_configuration'
+ # The line below is JSON.
args = '{"logical_disks": []}'
priority = 10
step = {'deploy_template_id': template_id, 'interface': interface,
@@ -1037,15 +1109,30 @@ class MigrationCheckersMixin(object):
insert_dpts = deploy_template_steps.insert().values(step)
connection.execute(insert_dpts)
# Query by deploy template ID.
- query_id_stmt = deploy_template_steps.select(
- deploy_template_steps.c.deploy_template_id
- == template_id)
+ query_id_stmt = sqlalchemy.select(
+ models.DeployTemplateStep.deploy_template_id,
+ models.DeployTemplateStep.interface,
+ models.DeployTemplateStep.step,
+ models.DeployTemplateStep.args,
+ models.DeployTemplateStep.priority,
+ ).where(
+ models.DeployTemplateStep.deploy_template_id == template_id
+ )
result = connection.execute(query_id_stmt).first()
- self.assertEqual(template_id, result['deploy_template_id'])
- self.assertEqual(interface, result['interface'])
- self.assertEqual(step_name, result['step'])
- self.assertEqual(args, result['args'])
- self.assertEqual(priority, result['priority'])
+ self.assertEqual(template_id, result.deploy_template_id)
+ self.assertEqual(interface, result.interface)
+ self.assertEqual(step_name, result.step)
+ if isinstance(result.args, dict):
+ # Postgres testing results in a dict being returned
+ # at this level which if you str() it, you get a dict,
+ # so comparing string to string fails.
+ result_args = json.dumps(result.args)
+ else:
+ # Mysql/MariaDB appears to be actually hand us
+ # a string back so we should be able to compare it.
+ result_args = result.args
+ self.assertEqual(args, result_args)
+ self.assertEqual(priority, result.priority)
# Insert another step for the same template.
insert_step = deploy_template_steps.insert().values(step)
connection.execute(insert_step)
@@ -1103,11 +1190,15 @@ class MigrationCheckersMixin(object):
self.assertIn('retired_reason', col_names)
with engine.begin() as connection:
- node_stmt = nodes.select(
- nodes.c.uuid == data['node_uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.retired,
+ models.Node.retired_reason,
+ ).where(
+ models.Node.uuid == data['node_uuid']
+ )
node = connection.execute(node_stmt).first()
- self.assertFalse(node['retired'])
- self.assertIsNone(node['retired_reason'])
+ self.assertFalse(node.retired)
+ self.assertIsNone(node.retired_reason)
def _check_b2ad35726bb0(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -1186,12 +1277,16 @@ class TestMigrationsMySQL(MigrationCheckersMixin,
# this should always fail pre-upgrade
mediumtext = 'a' * (pow(2, 16) + 1)
+ json_text = str({'key': mediumtext})
uuid = uuidutils.generate_uuid()
- expected_to_fail_data = {'uuid': uuid, 'instance_info': mediumtext}
+ expected_to_fail_data = {'uuid': uuid, 'instance_info': json_text}
# this should always work pre-upgrade
- text = 'a' * (pow(2, 16) - 1)
+ text = 'a' * (pow(2, 16) - 13)
+ # The field needs to contain JSON for the decoder to work against
+ # the field.
+ json_text = str({'key': text})
uuid2 = uuidutils.generate_uuid()
- valid_pre_upgrade_data = {'uuid': uuid2, 'instance_info': text}
+ valid_pre_upgrade_data = {'uuid': uuid2, 'instance_info': json_text}
with engine.begin() as connection:
self.assertRaises(db_exc.DBError, connection.execute,
nodes.insert(), expected_to_fail_data)
@@ -1207,21 +1302,29 @@ class TestMigrationsMySQL(MigrationCheckersMixin,
with engine.begin() as connection:
# check that the data for the successful pre-upgrade
# entry didn't change
- node_stmt = nodes.select(nodes.c.uuid == data['uuid'])
- node = connection.execute(node_stmt).first()
- self.assertIsNotNone(node)
- self.assertEqual(data['instance_info'], node['instance_info'])
+ # NOTE(TheJulia): Directly select the field to bypass
+ # field decoding
+ i_info = connection.execute(
+ sqlalchemy.text(
+ "SELECT instance_info from nodes WHERE uuid = "
+ "'%s'" % data['uuid'])).one()
+ self.assertIsNotNone(i_info[0])
+ self.assertEqual(data['instance_info'], i_info[0])
# now this should pass post-upgrade
test = 'b' * (pow(2, 16) + 1)
+ test_text = str({'a': test})
uuid = uuidutils.generate_uuid()
- data = {'uuid': uuid, 'instance_info': test}
+ data = {'uuid': uuid, 'instance_info': test_text}
insert_node = nodes.insert().values(data)
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == uuid)
- node = connection.execute(node_stmt).first()
- self.assertEqual(test, node['instance_info'])
+ # Re-uses the same query to fetch current results
+ i_info = connection.execute(
+ sqlalchemy.text(
+ "SELECT instance_info from nodes WHERE uuid = "
+ "'%s'" % data['uuid'])).one()
+ self.assertEqual(test_text, i_info[0])
class TestMigrationsPostgreSQL(MigrationCheckersMixin,
diff --git a/ironic/tests/unit/db/test_conductor.py b/ironic/tests/unit/db/test_conductor.py
index fe4e93ed9..0187ebca0 100644
--- a/ironic/tests/unit/db/test_conductor.py
+++ b/ironic/tests/unit/db/test_conductor.py
@@ -166,7 +166,9 @@ class DbConductorTestCase(base.DbTestCase):
c = self._create_test_cdr()
self.dbapi.touch_conductor(c.hostname)
self.assertEqual(2, mock_update.call_count)
- self.assertEqual(2, mock_sleep.call_count)
+ # Count that it was called, but not the number of times
+ # as this is *actually* time.sleep via import from oslo_db.api
+ self.assertTrue(mock_sleep.called)
def test_touch_conductor_not_found(self):
# A conductor's heartbeat will not create a new record,
diff --git a/ironic/tests/unit/db/test_nodes.py b/ironic/tests/unit/db/test_nodes.py
index b4d70b2dd..bb030a80c 100644
--- a/ironic/tests/unit/db/test_nodes.py
+++ b/ironic/tests/unit/db/test_nodes.py
@@ -367,10 +367,10 @@ class DbNodeTestCase(base.DbTestCase):
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([node1.id], [r.id for r in res])
- res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
+ res = self.dbapi.get_node_list(filters={'fault': 'boom'})
self.assertEqual([node2.id], [r.id for r in res])
- res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
+ res = self.dbapi.get_node_list(filters={'fault': 'moob'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'resource_class': 'foo'})
@@ -558,6 +558,9 @@ class DbNodeTestCase(base.DbTestCase):
'cat': 'meow'},
internal_info={'corgi': 'rocks'},
deploy_interface='purring_machine')
+ utils.create_test_node_traits(node_id=node.id,
+ traits=['atrait'])
+
uuids.append(str(node['uuid']))
req_fields = ['uuid',
'provision_state',
diff --git a/releasenotes/notes/add-allocations-table-check-38f1c9eef189b411.yaml b/releasenotes/notes/add-allocations-table-check-38f1c9eef189b411.yaml
new file mode 100644
index 000000000..46046bd2a
--- /dev/null
+++ b/releasenotes/notes/add-allocations-table-check-38f1c9eef189b411.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ Adds an upgrade status check for the Allocation table engine and
+ character set encoding on MySQL. This is a result of a missing
+ encoding definition on the table schema when originally created.
+ This issue will be remedied, in part, in a later version of Ironic,
+ but the upgrade status check will provide advance operator visibility.
diff --git a/releasenotes/notes/allocations-charset-5384d1ea00964bdd.yaml b/releasenotes/notes/allocations-charset-5384d1ea00964bdd.yaml
new file mode 100644
index 000000000..3db4da086
--- /dev/null
+++ b/releasenotes/notes/allocations-charset-5384d1ea00964bdd.yaml
@@ -0,0 +1,23 @@
+---
+fixes:
+ - |
+ Fixes an missing MySQL/MariaDB character set configuration and default
+ table type encoding for the ``allocations`` database table. Previously,
+ If Ironic's database was attempted to be populated on a machine which
+ was using 4 byte character encoding, such as MySQL/MariaDB on Debian
+ based systems, then the database schema creation would fail.
+upgrade:
+ - This upgrade updates the default character set to utilized in the
+ database tables when using MySQL/MariaDB. Previously, the default
+ for Ironic was ``UTF8``, however we now explicitly set ``UTF8MB3``
+ which is short for "3 byte UTF8" encoding. The exception to this
+ is the ``allocations`` table, which would just rely upon the database
+ default. This was done as Ironic's database schema is incompatible
+ with MySQL/MariaDB's ``UTF8MB4``, or "4 byte UTF8" character encoding
+ and storage constraints.
+ - Upgrading will change the default chracter encoding of all tables.
+ For most tables, this should be an effective noop, but may result in
+ transitory table locks. For the ``allocations`` table, it will need to
+ be re-written, during which the database engine will have locked the
+ table from being used. Operators are advised to perform test upgrades
+ and set expectation and upgrade plans accordingly.
diff --git a/releasenotes/notes/irmc-add-snmp-auth-protocols-3ff7597cea7ef9dd.yaml b/releasenotes/notes/irmc-add-snmp-auth-protocols-3ff7597cea7ef9dd.yaml
new file mode 100644
index 000000000..4d0c6bff2
--- /dev/null
+++ b/releasenotes/notes/irmc-add-snmp-auth-protocols-3ff7597cea7ef9dd.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Adds ``sha256``, ``sha384`` and ``sha512`` as supported SNMPv3
+ authentication protocols to iRMC driver.
diff --git a/releasenotes/notes/prepare-for-sqlalchemy-20-e817f340f261b1a2.yaml b/releasenotes/notes/prepare-for-sqlalchemy-20-e817f340f261b1a2.yaml
new file mode 100644
index 000000000..5174f09e4
--- /dev/null
+++ b/releasenotes/notes/prepare-for-sqlalchemy-20-e817f340f261b1a2.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Ironic has started the process of upgrading the code base to support
+ SQLAlchemy 2.0 in anticipation of it's release. This results in the
+ minimum version of SQLAlchemy becoming 1.4.0 as it contains migration
+ features for the move to SQLAlchemy 2.0.
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index d4d148d41..ae046159e 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -7,11 +7,11 @@ msgid ""
msgstr ""
"Project-Id-Version: Ironic Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2022-09-06 22:51+0000\n"
+"POT-Creation-Date: 2022-10-17 23:38+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2022-09-05 10:29+0000\n"
+"PO-Revision-Date: 2022-10-16 10:44+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -211,8 +211,8 @@ msgstr "13.0.6"
msgid "13.0.7"
msgstr "13.0.7"
-msgid "13.0.7-25"
-msgstr "13.0.7-25"
+msgid "13.0.7-28"
+msgstr "13.0.7-28"
msgid "14.0.0"
msgstr "14.0.0"
@@ -226,8 +226,8 @@ msgstr "15.0.1"
msgid "15.0.2"
msgstr "15.0.2"
-msgid "15.0.2-17"
-msgstr "15.0.2-17"
+msgid "15.0.2-21"
+msgstr "15.0.2-21"
msgid "15.1.0"
msgstr "15.1.0"
@@ -253,6 +253,9 @@ msgstr "16.0.4"
msgid "16.0.5"
msgstr "16.0.5"
+msgid "16.0.5-7"
+msgstr "16.0.5-7"
+
msgid "16.1.0"
msgstr "16.1.0"
@@ -271,8 +274,8 @@ msgstr "17.0.3"
msgid "17.0.4"
msgstr "17.0.4"
-msgid "17.0.4-34"
-msgstr "17.0.4-34"
+msgid "17.0.4-39"
+msgstr "17.0.4-39"
msgid "18.0.0"
msgstr "18.0.0"
@@ -286,8 +289,8 @@ msgstr "18.2.0"
msgid "18.2.1"
msgstr "18.2.1"
-msgid "18.2.1-27"
-msgstr "18.2.1-27"
+msgid "18.2.1-31"
+msgstr "18.2.1-31"
msgid "19.0.0"
msgstr "19.0.0"
@@ -298,8 +301,8 @@ msgstr "20.0.0"
msgid "20.1.0"
msgstr "20.1.0"
-msgid "20.1.0-24"
-msgstr "20.1.0-24"
+msgid "20.1.0-29"
+msgstr "20.1.0-29"
msgid "20.2.0"
msgstr "20.2.0"
@@ -307,9 +310,15 @@ msgstr "20.2.0"
msgid "21.0.0"
msgstr "21.0.0"
+msgid "21.1.0"
+msgstr "21.1.0"
+
msgid "4.0.0 First semver release"
msgstr "4.0.0 First semver release"
+msgid "4.1.0"
+msgstr "4.1.0"
+
msgid "4.2.2"
msgstr "4.2.2"
@@ -514,6 +523,15 @@ msgstr ""
"masked for this request."
msgid ""
+"A driver that handles booting itself (for example, a driver that implements "
+"booting from virtual media) should use the following to make calls to the "
+"boot interface a no-op::"
+msgstr ""
+"A driver that handles booting itself (for example, a driver that implements "
+"booting from virtual media) should use the following to make calls to the "
+"boot interface a no-op::"
+
+msgid ""
"A few major changes are worth mentioning. This is not an exhaustive list, "
"and mostly includes changes from 9.0.0:"
msgstr ""
@@ -717,6 +735,15 @@ msgstr ""
"for the node, merely recording the returned state instead."
msgid ""
+"A new option ``[agent]api_ca_file`` allows passing a CA file to the ramdisk "
+"when ``redfish-virtual-media`` boot is used. Requires ironic-python-agent "
+"from the Wallaby cycle."
+msgstr ""
+"A new option ``[agent]api_ca_file`` allows passing a CA file to the ramdisk "
+"when ``redfish-virtual-media`` boot is used. Requires ironic-python-agent "
+"from the Wallaby cycle."
+
+msgid ""
"A node in the ``active`` provision state can be rescued via the ``GET /v1/"
"nodes/{node_ident}/states/provision`` API, by specifying ``rescue`` as the "
"``target`` value, and a ``rescue_password`` value. When the node has been "
@@ -773,6 +800,21 @@ msgstr ""
msgid ""
"A permission setting has been added for ``redfish-virtual-media`` boot "
"interface, which allows for explicit file permission setting when the driver "
+"is being used. The default for the new ``[redfish]file_permission setting is "
+"``0u644``, or 644 if manually changed using ``chmod`` on the command line. "
+"Operators MAY need to adjust this if they were running the conductor with a "
+"specific ``umask`` to work around the permission setting defect."
+msgstr ""
+"A permission setting has been added for ``redfish-virtual-media`` boot "
+"interface, which allows for explicit file permission setting when the driver "
+"is being used. The default for the new ``[redfish]file_permission setting is "
+"``0u644``, or 644 if manually changed using ``chmod`` on the command line. "
+"Operators MAY need to adjust this if they were running the conductor with a "
+"specific ``umask`` to work around the permission setting defect."
+
+msgid ""
+"A permission setting has been added for ``redfish-virtual-media`` boot "
+"interface, which allows for explicit file permission setting when the driver "
"is used. The default for the new ``[redfish]file_permission setting is "
"``0u644``, or 644 if manually changed using ``chmod`` on the command line. "
"Operators may need to check ``/httpboot/redfish`` folder permissions if "
@@ -850,6 +892,27 @@ msgstr ""
"API version 1.58 allows backfilling allocations for existing deployed nodes "
"by providing ``node`` to ``POST /v1/allocations``."
+msgid ""
+"Ability to create an allocation has been restricted by a new policy rule "
+"``baremetal::allocation::create_pre_rbac`` which prevents creation of "
+"allocations by any project administrator when operating with the new Role "
+"Based Access Control model. The use and enforcement of this rule is disabled "
+"when ``[oslo_policy]enforce_new_defaults`` is set which also makes the "
+"population of a ``owner`` field for allocations to become automatically "
+"populated. Most deployments should not encounter any issues with this "
+"security change, and the policy rule will be removed when support for the "
+"legacy ``baremetal_admin`` custom role has been removed."
+msgstr ""
+"Ability to create an allocation has been restricted by a new policy rule "
+"``baremetal::allocation::create_pre_rbac`` which prevents creation of "
+"allocations by any project administrator when operating with the new Role "
+"Based Access Control model. The use and enforcement of this rule is disabled "
+"when ``[oslo_policy]enforce_new_defaults`` is set which also makes the "
+"population of a ``owner`` field for allocations to become automatically "
+"populated. Most deployments should not encounter any issues with this "
+"security change, and the policy rule will be removed when support for the "
+"legacy ``baremetal_admin`` custom role has been removed."
+
msgid "Add BIOS config to DRAC Driver"
msgstr "Add BIOS config to DRAC Driver"
@@ -988,6 +1051,9 @@ msgstr ""
msgid "Added CORS support"
msgstr "Added CORS support"
+msgid "Added Cisco IMC driver"
+msgstr "Added Cisco IMC driver"
+
msgid ""
"Added configdrive support for whole disk images for iSCSI based deploy. This "
"will work for UEFI only or BIOS only images. It will not work for hybrid "
@@ -1025,6 +1091,13 @@ msgstr ""
"validate iLO SSL certificates."
msgid ""
+"Adding ``kernel`` and ``ramdisk`` is no longer necessary for partition "
+"images if ``image_type`` is set to ``partition`` and local boot is used."
+msgstr ""
+"Adding ``kernel`` and ``ramdisk`` is no longer necessary for partition "
+"images if ``image_type`` is set to ``partition`` and local boot is used."
+
+msgid ""
"Addition of the provision state target verb of ``adopt`` which allows an "
"operator to move a node into an ``active`` state from ``manageable`` state, "
"without performing a deployment operation on the node. This can be used to "
@@ -1043,6 +1116,15 @@ msgid "Additionally, adds the following API changes:"
msgstr "Additionally, adds the following API changes:"
msgid ""
+"Additionally, as mentioned before, `ironic.drivers.modules.pxe.PXEDeploy` "
+"has moved to `ironic.drivers.modules.iscsi_deploy.ISCSIDeploy`, which will "
+"break drivers that use this class."
+msgstr ""
+"Additionally, as mentioned before, `ironic.drivers.modules.pxe.PXEDeploy` "
+"has moved to `ironic.drivers.modules.iscsi_deploy.ISCSIDeploy`, which will "
+"break drivers that use this class."
+
+msgid ""
"Addresses a condition where the Compute Service may have been unable to "
"remove VIF attachment records while a baremetal node is being unprovisiond. "
"This condition resulted in VIF records being orphaned, blocking future "
@@ -1161,6 +1243,15 @@ msgstr ""
"nodes that are stuck in the rescue wait state."
msgid ""
+"Adds ``[conductor]clean_step_priority_override`` configuration parameter "
+"which allows the operator to define a custom order in which the cleaning "
+"steps are to run."
+msgstr ""
+"Adds ``[conductor]clean_step_priority_override`` configuration parameter "
+"which allows the operator to define a custom order in which the cleaning "
+"steps are to run."
+
+msgid ""
"Adds ``[swift]/endpoint_override`` option to explicitly set the endpoint URL "
"used for Swift. Ironic uses the Swift connection URL as a base for "
"generation of some TempURLs. Added parameter enables operators to fix the "
@@ -1188,6 +1279,12 @@ msgstr ""
"``instance_info`` (and ``extra`` if using metalsmith), and a lessee should "
"not be able to update all node attributes."
+msgid "Adds ``bios_interface`` to the node list and node show api-ref."
+msgstr "Adds ``bios_interface`` to the node list and node show api-ref."
+
+msgid "Adds ``bios_interface`` to the node validate api-ref."
+msgstr "Adds ``bios_interface`` to the node validate api-ref."
+
msgid "Adds ``bios`` interface to the ``redfish`` hardware type."
msgstr "Adds ``bios`` interface to the ``redfish`` hardware type."
@@ -1914,6 +2011,9 @@ msgstr "Implemented a new Boot interface for drivers"
msgid "Introduce new BootInterface to the Driver API"
msgstr "Introduce new BootInterface to the Driver API"
+msgid "Known issues"
+msgstr "Known issues"
+
msgid "Migrations from Nova \"baremetal\" have been removed"
msgstr "Migrations from Nova \"baremetal\" have been removed"
@@ -1926,6 +2026,23 @@ msgstr "Newton Series (6.0.0 - 6.2.x) Release Notes"
msgid "Ocata Series (7.0.0 - 7.0.x) Release Notes"
msgstr "Ocata Series (7.0.0 - 7.0.x) Release Notes"
+msgid ""
+"Out of tree drivers may be broken by this release. The AgentDeploy and "
+"ISCSIDeploy (formerly known as PXEDeploy) classes now depend on drivers to "
+"utilize an instance of a BootInterface. For drivers that exist out of tree, "
+"that use these deploy classes, an error will be thrown during deployment. "
+"There is a simple fix. For drivers that expect these deploy classes to "
+"handle PXE booting, one can add the following code to the driver's "
+"`__init__` method::"
+msgstr ""
+"Out-of-tree drivers may be broken by this release. The AgentDeploy and "
+"ISCSIDeploy (formerly known as PXEDeploy) classes now depend on drivers to "
+"utilize an instance of a BootInterface. For drivers that exist out-of-tree, "
+"that use these deploy classes, an error will be thrown during deployment. "
+"There is a simple fix. For drivers that expect these deploy classes to "
+"handle PXE booting, one can add the following code to the driver's "
+"`__init__` method::"
+
msgid "PXE drivers now support GRUB2"
msgstr "PXE drivers now support GRUB2"
@@ -1979,6 +2096,33 @@ msgstr ""
"example, web browser-based clients. This is configured in the [cors] section "
"of ironic.conf."
+msgid "The Ironic team apologizes profusely for this inconvenience."
+msgstr "The Ironic team apologises profusely for this inconvenience."
+
+msgid ""
+"The agent must download the tenant image in full before writing it to disk. "
+"As such, the server being deployed must have enough RAM for running the "
+"agent and storing the image. This is now checked before Ironic tells the "
+"agent to deploy an image. An optional config [agent]memory_consumed_by_agent "
+"is provided. When Ironic does this check, this config option may be set to "
+"factor in the amount of RAM to reserve for running the agent."
+msgstr ""
+"The agent must download the tenant image in full before writing it to disk. "
+"As such, the server being deployed must have enough RAM for running the "
+"agent and storing the image. This is now checked before Ironic tells the "
+"agent to deploy an image. An optional config [agent]memory_consumed_by_agent "
+"is provided. When Ironic does this check, this config option may be set to "
+"factor in the amount of RAM to reserve for running the agent."
+
+msgid ""
+"This brings some bug fixes and small features on top of Ironic 4.0.0. Major "
+"changes are listed below, and full release details are available on "
+"Launchpad: https://launchpad.net/ironic/liberty/4.1.0."
+msgstr ""
+"This brings some bug fixes and small features on top of Ironic 4.0.0. Major "
+"changes are listed below, and full release details are available on "
+"Launchpad: https://launchpad.net/ironic/liberty/4.1.0."
+
msgid ""
"This change enhances the driver interface for driver authors, and should not "
"affect users of Ironic, by splitting control of booting a server from the "
@@ -1993,6 +2137,15 @@ msgstr ""
"image to a server."
msgid ""
+"This driver supports managing Cisco UCS C-series servers through the CIMC "
+"API, rather than IPMI. Documentation is available at: https://docs.openstack."
+"org/developer/ironic/drivers/cimc.html"
+msgstr ""
+"This driver supports managing Cisco UCS C-series servers through the CIMC "
+"API, rather than IPMI. Documentation is available at: https://docs.openstack."
+"org/developer/ironic/drivers/cimc.html"
+
+msgid ""
"This is the first semver-versioned release of Ironic, created during the "
"OpenStack \"Liberty\" development cycle. It marks a pivot in our versioning "
"schema from date-based versioning; the previous released version was 2015.1. "
@@ -2020,8 +2173,29 @@ msgstr "Wallaby Series (16.1.0 - 17.0.x) Release Notes"
msgid "Xena Series (18.0.0 - 18.2.x) Release Notes"
msgstr "Xena Series (18.0.0 - 18.2.x) Release Notes"
-msgid "Yoga Series Release Notes"
-msgstr "Yoga Series Release Notes"
+msgid "Yoga Series (19.0.0 - 20.1.x) Release Notes"
+msgstr "Yoga Series (19.0.0 - 20.1.x) Release Notes"
+
+msgid "Zed Series (20.2.0 - 21.1.x) Release Notes"
+msgstr "Zed Series (20.2.0 - 21.1.x) Release Notes"
+
+msgid ""
+"iLO driver documentation is available at: https://docs.openstack.org/"
+"developer/ironic/drivers/ilo.html"
+msgstr ""
+"iLO driver documentation is available at: https://docs.openstack.org/"
+"developer/ironic/drivers/ilo.html"
+
+msgid ""
+"iLO virtual media drivers (iscsi_ilo and agent_ilo) can work standalone "
+"without Swift, by configuring an HTTP(S) server for hosting the deploy/boot "
+"images. A web server needs to be running on every conductor node and needs "
+"to be configured in ironic.conf."
+msgstr ""
+"iLO virtual media drivers (iscsi_ilo and agent_ilo) can work standalone "
+"without Swift, by configuring an HTTP(S) server for hosting the deploy/boot "
+"images. A web server needs to be running on every conductor node and needs "
+"to be configured in ironic.conf."
msgid "ipmitool driver supports IPMI v1.5"
msgstr "ipmitool driver supports IPMI v1.5"
diff --git a/releasenotes/source/yoga.rst b/releasenotes/source/yoga.rst
index 7cd5e908a..54e26c857 100644
--- a/releasenotes/source/yoga.rst
+++ b/releasenotes/source/yoga.rst
@@ -1,6 +1,6 @@
-=========================
-Yoga Series Release Notes
-=========================
+===========================================
+Yoga Series (19.0.0 - 20.1.x) Release Notes
+===========================================
.. release-notes::
:branch: stable/yoga
diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst
index 9608c05e4..f5f672a2d 100644
--- a/releasenotes/source/zed.rst
+++ b/releasenotes/source/zed.rst
@@ -1,6 +1,6 @@
-========================
-Zed Series Release Notes
-========================
+==========================================
+Zed Series (20.2.0 - 21.1.x) Release Notes
+==========================================
.. release-notes::
:branch: stable/zed
diff --git a/requirements.txt b/requirements.txt
index ae8e14f39..8a57727ec 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,7 +6,7 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=3.1.1 # Apache-2.0
-SQLAlchemy>=1.2.19 # MIT
+SQLAlchemy>=1.4.0 # MIT
alembic>=1.4.2 # MIT
automaton>=1.9.0 # Apache-2.0
eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT
diff --git a/test-requirements.txt b/test-requirements.txt
index bd29d9394..0c4bdb0ca 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -11,7 +11,7 @@ oslo.reports>=1.18.0 # Apache-2.0
oslotest>=3.2.0 # Apache-2.0
stestr>=2.0.0 # Apache-2.0
psycopg2>=2.8.5 # LGPL/ZPL
-testtools>=2.2.0 # MIT
+testtools>=2.5.0 # MIT
WebTest>=2.0.27 # MIT
pysnmp>=4.4.12
bandit!=1.6.0,>=1.1.0,<2.0.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 247e819a4..391e797ba 100644
--- a/tox.ini
+++ b/tox.ini
@@ -11,6 +11,8 @@ setenv = VIRTUAL_ENV={envdir}
PYTHONDONTWRITEBYTECODE = 1
LANGUAGE=en_US
LC_ALL=en_US.UTF-8
+ PYTHONUNBUFFERED=1
+ SQLALCHEMY_WARN_20=true
deps =
-c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt