summaryrefslogtreecommitdiff
path: root/nova
diff options
context:
space:
mode:
authorVictor Sergeyev <vsergeyev@mirantis.com>2014-06-27 12:00:14 +0300
committerAdam Gandelman <adamg@ubuntu.com>2014-11-24 22:55:46 -0800
commit9b6699feff3df6ef5d2639e023f37e6cae4c2c63 (patch)
tree641079d73d5cab384e91556821d208bf24aa6e61 /nova
parent8526a727dc20a96d7245ae836e81c29967166f77 (diff)
downloadnova-9b6699feff3df6ef5d2639e023f37e6cae4c2c63.tar.gz
Use opportunistic approach for migration testing
Refactored migration tests to use OpportunisticTestCase, removed unused code, BaseMigrationTestCase class and ``test_migrations.conf`` file. The main feature of this approach is to create a new database with random name for each migration test. This will avoid migration tests of race conditions and reduce tests intersection. After this change, ``openstack_citest`` user credentials will be used only for initial connection to the database. TestMigrationUtils class was refactored also, because BaseMigrationTestCase was removed. Co-Authored-By: Roman Podoliaka <rpodolyaka@mirantis.com> NOTE(adam_g): Backport adapted from cherry pick of commit b930fb3a6b0ab8cbe0c19eb3ab8ba33d60d147be: * Test directory located at nova/tests/db/ in stable/juno * Adapted existing nova-baremetal tests according to backport, required a bit of refactoring the NovaMigrationsChecker to be used by both nova + nova-bm tests. Change-Id: I5c9aaa56e5041b919b1e96a19e0395c5e03b727a (cherry picked from commit b930fb3a6b0ab8cbe0c19eb3ab8ba33d60d147be)
Diffstat (limited to 'nova')
-rw-r--r--nova/tests/db/test_migration_utils.py306
-rw-r--r--nova/tests/db/test_migrations.conf26
-rw-r--r--nova/tests/db/test_migrations.py782
3 files changed, 339 insertions, 775 deletions
diff --git a/nova/tests/db/test_migration_utils.py b/nova/tests/db/test_migration_utils.py
index b3035ca5bb..b76108884a 100644
--- a/nova/tests/db/test_migration_utils.py
+++ b/nova/tests/db/test_migration_utils.py
@@ -15,6 +15,7 @@
import uuid
+from oslo.db.sqlalchemy import test_base
from oslo.db.sqlalchemy import utils as oslodbutils
import sqlalchemy
from sqlalchemy import Integer, String
@@ -26,7 +27,6 @@ from sqlalchemy.types import UserDefinedType
from nova.db.sqlalchemy import api as db
from nova.db.sqlalchemy import utils
from nova import exception
-from nova.tests.db import test_migrations
SA_VERSION = tuple(map(int, sqlalchemy.__version__.split('.')))
@@ -38,219 +38,183 @@ class CustomType(UserDefinedType):
return "CustomType"
-class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
+class TestMigrationUtilsSQLite(test_base.DbTestCase):
"""Class for testing utils that are used in db migrations."""
+ def setUp(self):
+ super(TestMigrationUtilsSQLite, self).setUp()
+ self.meta = MetaData(bind=self.engine)
+
def test_delete_from_select(self):
table_name = "__test_deletefromselect_table__"
uuidstrs = []
for unused in range(10):
uuidstrs.append(uuid.uuid4().hex)
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- conn = engine.connect()
- test_table = Table(table_name, meta,
- Column('id', Integer, primary_key=True,
- nullable=False, autoincrement=True),
- Column('uuid', String(36), nullable=False))
- test_table.create()
- # Add 10 rows to table
- for uuidstr in uuidstrs:
- ins_stmt = test_table.insert().values(uuid=uuidstr)
- conn.execute(ins_stmt)
-
- # Delete 4 rows in one chunk
- column = test_table.c.id
- query_delete = sql.select([column],
- test_table.c.id < 5).order_by(column)
- delete_statement = utils.DeleteFromSelect(test_table,
- query_delete, column)
- result_delete = conn.execute(delete_statement)
- # Verify we delete 4 rows
- self.assertEqual(result_delete.rowcount, 4)
-
- query_all = sql.select([test_table]).\
- where(test_table.c.uuid.in_(uuidstrs))
- rows = conn.execute(query_all).fetchall()
- # Verify we still have 6 rows in table
- self.assertEqual(len(rows), 6)
- test_table.drop()
+ conn = self.engine.connect()
+ test_table = Table(table_name, self.meta,
+ Column('id', Integer, primary_key=True,
+ nullable=False, autoincrement=True),
+ Column('uuid', String(36), nullable=False))
+ test_table.create()
+ # Add 10 rows to table
+ for uuidstr in uuidstrs:
+ ins_stmt = test_table.insert().values(uuid=uuidstr)
+ conn.execute(ins_stmt)
+
+ # Delete 4 rows in one chunk
+ column = test_table.c.id
+ query_delete = sql.select([column],
+ test_table.c.id < 5).order_by(column)
+ delete_statement = utils.DeleteFromSelect(test_table,
+ query_delete, column)
+ result_delete = conn.execute(delete_statement)
+ # Verify we delete 4 rows
+ self.assertEqual(result_delete.rowcount, 4)
+
+ query_all = sql.select([test_table])\
+ .where(test_table.c.uuid.in_(uuidstrs))
+ rows = conn.execute(query_all).fetchall()
+ # Verify we still have 6 rows in table
+ self.assertEqual(len(rows), 6)
def test_check_shadow_table(self):
table_name = 'test_check_shadow_table'
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
-
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer),
- Column('c', String(256)))
- table.create()
-
- # check missing shadow table
- self.assertRaises(NoSuchTableError,
- utils.check_shadow_table, engine, table_name)
-
- shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
- Column('id', Integer),
- Column('a', Integer))
- shadow_table.create()
-
- # check missing column
- self.assertRaises(exception.NovaException,
- utils.check_shadow_table, engine, table_name)
-
- # check when all is ok
- c = Column('c', String(256))
- shadow_table.create_column(c)
- self.assertTrue(utils.check_shadow_table(engine, table_name))
-
- # check extra column
- d = Column('d', Integer)
- shadow_table.create_column(d)
- self.assertRaises(exception.NovaException,
- utils.check_shadow_table, engine, table_name)
-
- table.drop()
- shadow_table.drop()
+
+ table = Table(table_name, self.meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('c', String(256)))
+ table.create()
+
+ # check missing shadow table
+ self.assertRaises(NoSuchTableError,
+ utils.check_shadow_table, self.engine, table_name)
+
+ shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, self.meta,
+ Column('id', Integer),
+ Column('a', Integer))
+ shadow_table.create()
+
+ # check missing column
+ self.assertRaises(exception.NovaException,
+ utils.check_shadow_table, self.engine, table_name)
+
+ # check when all is ok
+ c = Column('c', String(256))
+ shadow_table.create_column(c)
+ self.assertTrue(utils.check_shadow_table(self.engine, table_name))
+
+ # check extra column
+ d = Column('d', Integer)
+ shadow_table.create_column(d)
+ self.assertRaises(exception.NovaException,
+ utils.check_shadow_table, self.engine, table_name)
def test_check_shadow_table_different_types(self):
table_name = 'test_check_shadow_table_different_types'
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer))
- table.create()
-
- shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', String(256)))
- shadow_table.create()
- self.assertRaises(exception.NovaException,
- utils.check_shadow_table, engine, table_name)
+ table = Table(table_name, self.meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer))
+ table.create()
- table.drop()
- shadow_table.drop()
+ shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, self.meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', String(256)))
+ shadow_table.create()
+ self.assertRaises(exception.NovaException,
+ utils.check_shadow_table, self.engine, table_name)
+ @test_base.backend_specific('sqlite')
def test_check_shadow_table_with_unsupported_sqlite_type(self):
- if 'sqlite' not in self.engines:
- self.skipTest('sqlite is not configured')
table_name = 'test_check_shadow_table_with_unsupported_sqlite_type'
- engine = self.engines['sqlite']
- meta = MetaData(bind=engine)
- table = Table(table_name, meta,
+ table = Table(table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer),
Column('c', CustomType))
table.create()
- shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
+ shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer),
Column('c', CustomType))
shadow_table.create()
- self.assertTrue(utils.check_shadow_table(engine, table_name))
- shadow_table.drop()
+ self.assertTrue(utils.check_shadow_table(self.engine, table_name))
def test_create_shadow_table_by_table_instance(self):
table_name = 'test_create_shadow_table_by_table_instance'
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer),
- Column('b', String(256)))
- table.create()
- shadow_table = utils.create_shadow_table(engine, table=table)
- self.assertTrue(utils.check_shadow_table(engine, table_name))
- table.drop()
- shadow_table.drop()
+ table = Table(table_name, self.meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('b', String(256)))
+ table.create()
+ utils.create_shadow_table(self.engine, table=table)
+ self.assertTrue(utils.check_shadow_table(self.engine, table_name))
def test_create_shadow_table_by_name(self):
table_name = 'test_create_shadow_table_by_name'
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
-
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer),
- Column('b', String(256)))
- table.create()
- shadow_table = utils.create_shadow_table(engine,
- table_name=table_name)
- self.assertTrue(utils.check_shadow_table(engine, table_name))
- table.drop()
- shadow_table.drop()
+ table = Table(table_name, self.meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('b', String(256)))
+ table.create()
+ utils.create_shadow_table(self.engine, table_name=table_name)
+ self.assertTrue(utils.check_shadow_table(self.engine, table_name))
+
+ @test_base.backend_specific('sqlite')
def test_create_shadow_table_not_supported_type(self):
- if 'sqlite' in self.engines:
- table_name = 'test_create_shadow_table_not_supported_type'
- engine = self.engines['sqlite']
- meta = MetaData()
- meta.bind = engine
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', CustomType))
- table.create()
-
- # reflection of custom types has been fixed upstream
- if SA_VERSION < (0, 9, 0):
- self.assertRaises(oslodbutils.ColumnError,
- utils.create_shadow_table,
- engine, table_name=table_name)
-
- shadow_table = utils.create_shadow_table(engine,
- table_name=table_name,
- a=Column('a', CustomType())
- )
- self.assertTrue(utils.check_shadow_table(engine, table_name))
- table.drop()
- shadow_table.drop()
+ table_name = 'test_create_shadow_table_not_supported_type'
+ table = Table(table_name, self.meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', CustomType))
+ table.create()
+
+ # reflection of custom types has been fixed upstream
+ if SA_VERSION < (0, 9, 0):
+ self.assertRaises(oslodbutils.ColumnError,
+ utils.create_shadow_table,
+ self.engine, table_name=table_name)
+
+ utils.create_shadow_table(self.engine,
+ table_name=table_name,
+ a=Column('a', CustomType()))
+ self.assertTrue(utils.check_shadow_table(self.engine, table_name))
def test_create_shadow_both_table_and_table_name_are_none(self):
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- self.assertRaises(exception.NovaException,
- utils.create_shadow_table, engine)
+ self.assertRaises(exception.NovaException,
+ utils.create_shadow_table, self.engine)
def test_create_shadow_both_table_and_table_name_are_specified(self):
table_name = ('test_create_shadow_both_table_and_table_name_are_'
'specified')
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer))
- table.create()
- self.assertRaises(exception.NovaException,
- utils.create_shadow_table,
- engine, table=table, table_name=table_name)
- table.drop()
+ table = Table(table_name, self.meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer))
+ table.create()
+ self.assertRaises(exception.NovaException,
+ utils.create_shadow_table,
+ self.engine, table=table, table_name=table_name)
def test_create_duplicate_shadow_table(self):
table_name = 'test_create_duplicate_shadow_table'
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer))
- table.create()
- shadow_table = utils.create_shadow_table(engine,
- table_name=table_name)
- self.assertRaises(exception.ShadowTableExists,
- utils.create_shadow_table,
- engine, table_name=table_name)
- table.drop()
- shadow_table.drop()
+ table = Table(table_name, self.meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer))
+ table.create()
+ utils.create_shadow_table(self.engine, table_name=table_name)
+ self.assertRaises(exception.ShadowTableExists,
+ utils.create_shadow_table,
+ self.engine, table_name=table_name)
+
+
+class TestMigrationUtilsPostgreSQL(TestMigrationUtilsSQLite,
+ test_base.PostgreSQLOpportunisticTestCase):
+ pass
+
+
+class TestMigrationUtilsMySQL(TestMigrationUtilsSQLite,
+ test_base.MySQLOpportunisticTestCase):
+ pass
diff --git a/nova/tests/db/test_migrations.conf b/nova/tests/db/test_migrations.conf
deleted file mode 100644
index 310b7055c4..0000000000
--- a/nova/tests/db/test_migrations.conf
+++ /dev/null
@@ -1,26 +0,0 @@
-[unit_tests]
-# Set up any number of databases to test concurrently.
-# The "name" used in the test is the config variable key.
-
-# A few tests rely on one sqlite database with 'sqlite' as the key.
-
-sqlite=sqlite://
-#sqlitefile=sqlite:///test_migrations_utils.db
-#mysql=mysql+mysqldb://user:pass@localhost/test_migrations_utils
-#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations_utils
-
-[migration_dbs]
-# Migration DB details are listed separately as they can't be connected to
-# concurrently. These databases can't be the same as above
-
-# Note, sqlite:// is in-memory and unique each time it is spawned.
-# However file sqlite's are not unique.
-
-sqlite=sqlite://
-#sqlitefile=sqlite:///test_migrations.db
-#mysql=mysql+mysqldb://user:pass@localhost/test_migrations
-#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations
-
-[walk_style]
-snake_walk=yes
-downgrade=yes
diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py
index db167e735b..494f712a29 100644
--- a/nova/tests/db/test_migrations.py
+++ b/nova/tests/db/test_migrations.py
@@ -15,21 +15,13 @@
# under the License.
"""
-Tests for database migrations. This test case reads the configuration
-file test_migrations.conf for database connection settings
-to use in the tests. For each connection found in the config file,
-the test case runs a series of test cases to ensure that migrations work
-properly both upgrading and downgrading, and that no data loss occurs
-if possible.
-
-There are also "opportunistic" tests for both mysql and postgresql in here,
-which allows testing against all 3 databases (sqlite in memory, mysql, pg) in
-a properly configured unit test environment.
+Tests for database migrations.
+There are "opportunistic" tests which allows testing against all 3 databases
+(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up db's named 'openstack_citest'
-and 'openstack_baremetal_citest' with user 'openstack_citest' and password
-'openstack_citest' on localhost. The test will then use that db and u/p combo
-to run the tests.
+with user 'openstack_citest' and password 'openstack_citest' on localhost. The
+test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands::
@@ -37,462 +29,113 @@ For postgres on Ubuntu this can be done with the following commands::
| postgres=# create user openstack_citest with createdb login password
| 'openstack_citest';
| postgres=# create database openstack_citest with owner openstack_citest;
-| postgres=# create database openstack_baremetal_citest with owner
-| openstack_citest;
"""
-import ConfigParser
import glob
+import logging
import os
from migrate.versioning import repository
-from oslo.db.sqlalchemy import session
+import mock
+from oslo.config import cfg
+from oslo.db.sqlalchemy import test_base
+from oslo.db.sqlalchemy import test_migrations
from oslo.db.sqlalchemy import utils as oslodbutils
-import six.moves.urllib.parse as urlparse
import sqlalchemy
import sqlalchemy.exc
-import nova.db.sqlalchemy.migrate_repo
+from nova.db import migration
+from nova.db.sqlalchemy import migrate_repo
+from nova.db.sqlalchemy import migration as sa_migration
from nova.db.sqlalchemy import utils as db_utils
from nova.i18n import _
-from nova.openstack.common import log as logging
-from nova.openstack.common import processutils
from nova import test
-from nova import utils
-import nova.virt.baremetal.db.sqlalchemy.migrate_repo
-
+from nova.tests import conf_fixture
+from nova.virt.baremetal.db import migration as bm_migration
+from nova.virt.baremetal.db.sqlalchemy import migrate_repo as bm_migrate_repo
+from nova.virt.baremetal.db.sqlalchemy import migration as bm_sa_migration
LOG = logging.getLogger(__name__)
-def _have_mysql(user, passwd, database):
- present = os.environ.get('NOVA_TEST_MYSQL_PRESENT')
- if present is None:
- return oslodbutils.is_backend_avail('mysql+mysqldb', database,
- user, passwd)
- return present.lower() in ('', 'true')
+class NovaMigrationsCheckersBase(test_migrations.WalkVersionsMixin):
+ """Test sqlalchemy-migrate migrations."""
+ @property
+ def INIT_VERSION(self):
+ return migration.db_initial_version()
-def _have_postgresql(user, passwd, database):
- present = os.environ.get('NOVA_TEST_POSTGRESQL_PRESENT')
- if present is None:
- return oslodbutils.is_backend_avail('postgresql+psycopg2', database,
- user, passwd)
- return present.lower() in ('', 'true')
+ @property
+ def REPOSITORY(self):
+ return repository.Repository(
+ os.path.abspath(os.path.dirname(migrate_repo.__file__)))
+ @property
+ def migration_api(self):
+ return sa_migration.versioning_api
-def get_mysql_connection_info(conn_pieces):
- database = conn_pieces.path.strip('/')
- loc_pieces = conn_pieces.netloc.split('@')
- host = loc_pieces[1]
- auth_pieces = loc_pieces[0].split(':')
- user = auth_pieces[0]
- password = ""
- if len(auth_pieces) > 1:
- if auth_pieces[1].strip():
- password = "-p\"%s\"" % auth_pieces[1]
+ @property
+ def migrate_engine(self):
+ return self.engine
- return (user, password, database, host)
+ def setUp(self):
+ super(NovaMigrationsCheckersBase, self).setUp()
+ conf_fixture.ConfFixture(cfg.CONF)
+ self.addCleanup(cfg.CONF.reset)
+ # NOTE(viktors): We should reduce log output because it causes issues,
+ # when we run tests with testr
+ migrate_log = logging.getLogger('migrate')
+ old_level = migrate_log.level
+ migrate_log.setLevel(logging.WARN)
+ self.addCleanup(migrate_log.setLevel, old_level)
+
+ def assertColumnExists(self, engine, table_name, column):
+ self.assertTrue(oslodbutils.column_exists(engine, table_name, column))
+
+ def assertColumnNotExists(self, engine, table_name, column):
+ self.assertFalse(oslodbutils.column_exists(engine, table_name, column))
+ def assertTableNotExists(self, engine, table):
+ self.assertRaises(sqlalchemy.exc.NoSuchTableError,
+ oslodbutils.get_table, engine, table)
-def get_pgsql_connection_info(conn_pieces):
- database = conn_pieces.path.strip('/')
- loc_pieces = conn_pieces.netloc.split('@')
- host = loc_pieces[1]
+ def assertIndexExists(self, engine, table_name, index):
+ self.assertTrue(oslodbutils.index_exists(engine, table_name, index))
- auth_pieces = loc_pieces[0].split(':')
- user = auth_pieces[0]
- password = ""
- if len(auth_pieces) > 1:
- password = auth_pieces[1].strip()
+ def assertIndexMembers(self, engine, table, index, members):
+ self.assertIndexExists(engine, table, index)
+
+ t = oslodbutils.get_table(engine, table)
+ index_columns = None
+ for idx in t.indexes:
+ if idx.name == index:
+ index_columns = idx.columns.keys()
+ break
- return (user, password, database, host)
+ self.assertEqual(sorted(members), sorted(index_columns))
+ def migrate_up(self, version, with_data=False):
+ if with_data:
+ check = getattr(self, "_check_%03d" % version, None)
+ if version not in self._skippable_migrations():
+ self.assertIsNotNone(check,
+ ('DB Migration %i does not have a '
+ 'test. Please add one!') % version)
-class CommonTestsMixIn(object):
- """These tests are shared between TestNovaMigrations and
- TestBaremetalMigrations.
+ super(NovaMigrationsCheckersBase, self).migrate_up(version, with_data)
- BaseMigrationTestCase is effectively an abstract class, meant to be derived
- from and not directly tested against; that's why these `test_` methods need
- to be on a Mixin, so that they won't be picked up as valid tests for
- BaseMigrationTestCase.
- """
def test_walk_versions(self):
- if not self.engines:
- self.skipTest("No engines initialized")
-
- for key, engine in self.engines.items():
- # We start each walk with a completely blank slate.
- self._reset_database(key)
- self._walk_versions(engine, self.snake_walk, self.downgrade)
-
- def test_mysql_opportunistically(self):
- self._test_mysql_opportunistically()
-
- def test_mysql_connect_fail(self):
- """Test that we can trigger a mysql connection failure and we fail
- gracefully to ensure we don't break people without mysql
- """
- if oslodbutils.is_backend_avail('mysql+mysqldb', self.DATABASE,
- "openstack_cifail", self.PASSWD):
- self.fail("Shouldn't have connected")
-
- def test_postgresql_opportunistically(self):
- self._test_postgresql_opportunistically()
-
- def test_postgresql_connect_fail(self):
- """Test that we can trigger a postgres connection failure and we fail
- gracefully to ensure we don't break people without postgres
- """
- if oslodbutils.is_backend_avail('postgresql+psycopg2', self.DATABASE,
- "openstack_cifail", self.PASSWD):
- self.fail("Shouldn't have connected")
-
-
-class BaseMigrationTestCase(test.NoDBTestCase):
- """Base class for testing migrations and migration utils. This sets up
- and configures the databases to run tests against.
- """
-
- # NOTE(jhesketh): It is expected that tests clean up after themselves.
- # This is necessary for concurrency to allow multiple tests to work on
- # one database.
- # The full migration walk tests however do call the old _reset_databases()
- # to throw away whatever was there so they need to operate on their own
- # database that we know isn't accessed concurrently.
- # Hence, BaseWalkMigrationTestCase overwrites the engine list.
-
- USER = None
- PASSWD = None
- DATABASE = None
-
- TIMEOUT_SCALING_FACTOR = 2
+ self.walk_versions(self.snake_walk, self.downgrade)
- def __init__(self, *args, **kwargs):
- super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
-
- self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
- 'test_migrations.conf')
- # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
- # to override the location of the config file for migration testing
- self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
- self.DEFAULT_CONFIG_FILE)
- self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
- self.REPOSITORY = repository.Repository(
- os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
- self.INIT_VERSION = 0
-
- self.snake_walk = False
- self.downgrade = False
- self.test_databases = {}
- self.migration = None
- self.migration_api = None
- def setUp(self):
- super(BaseMigrationTestCase, self).setUp()
- self._load_config()
-
- def _load_config(self):
- # Load test databases from the config file. Only do this
- # once. No need to re-run this on each test...
- LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
- if os.path.exists(self.CONFIG_FILE_PATH):
- cp = ConfigParser.RawConfigParser()
- try:
- cp.read(self.CONFIG_FILE_PATH)
- config = cp.options('unit_tests')
- for key in config:
- self.test_databases[key] = cp.get('unit_tests', key)
- self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
- self.downgrade = cp.getboolean('walk_style', 'downgrade')
-
- except ConfigParser.ParsingError as e:
- self.fail("Failed to read test_migrations.conf config "
- "file. Got error: %s" % e)
- else:
- self.fail("Failed to find test_migrations.conf config "
- "file.")
-
- self.engines = {}
- for key, value in self.test_databases.items():
- self.engines[key] = session.create_engine(value)
-
- # NOTE(jhesketh): We only need to make sure the databases are created
- # not necessarily clean of tables.
- self._create_databases()
-
- def execute_cmd(self, cmd=None):
- out, err = processutils.trycmd(cmd, shell=True, discard_warnings=True)
- output = out or err
- LOG.debug(output)
- self.assertEqual('', err,
- "Failed to run: %s\n%s" % (cmd, output))
-
- @utils.synchronized('pgadmin', external=True)
- def _reset_pg(self, conn_pieces):
- (user, password, database, host) = \
- get_pgsql_connection_info(conn_pieces)
- os.environ['PGPASSWORD'] = password
- os.environ['PGUSER'] = user
- # note(boris-42): We must create and drop database, we can't
- # drop database which we have connected to, so for such
- # operations there is a special database postgres.
- sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
- " '%(sql)s' -d postgres")
- sqldict = {'user': user, 'host': host}
-
- sqldict['sql'] = ("drop database if exists %s;") % database
- droptable = sqlcmd % sqldict
- self.execute_cmd(droptable)
-
- sqldict['sql'] = ("create database %s;") % database
- createtable = sqlcmd % sqldict
- self.execute_cmd(createtable)
-
- os.unsetenv('PGPASSWORD')
- os.unsetenv('PGUSER')
-
- @utils.synchronized('mysql', external=True)
- def _reset_mysql(self, conn_pieces):
- # We can execute the MySQL client to destroy and re-create
- # the MYSQL database, which is easier and less error-prone
- # than using SQLAlchemy to do this via MetaData...trust me.
- (user, password, database, host) = \
- get_mysql_connection_info(conn_pieces)
- sql = ("drop database if exists %(database)s; "
- "create database %(database)s;" % {'database': database})
- cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
- "-e \"%(sql)s\"" % {'user': user, 'password': password,
- 'host': host, 'sql': sql})
- self.execute_cmd(cmd)
-
- @utils.synchronized('sqlite', external=True)
- def _reset_sqlite(self, conn_pieces):
- # We can just delete the SQLite database, which is
- # the easiest and cleanest solution
- db_path = conn_pieces.path.strip('/')
- if os.path.exists(db_path):
- os.unlink(db_path)
- # No need to recreate the SQLite DB. SQLite will
- # create it for us if it's not there...
-
- def _create_databases(self):
- """Create all configured databases as needed."""
- for key, engine in self.engines.items():
- self._create_database(key)
-
- def _create_database(self, key):
- """Create database if it doesn't exist."""
- conn_string = self.test_databases[key]
- conn_pieces = urlparse.urlparse(conn_string)
-
- if conn_string.startswith('mysql'):
- (user, password, database, host) = \
- get_mysql_connection_info(conn_pieces)
- sql = "create database if not exists %s;" % database
- cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
- "-e \"%(sql)s\"" % {'user': user, 'password': password,
- 'host': host, 'sql': sql})
- self.execute_cmd(cmd)
- elif conn_string.startswith('postgresql'):
- (user, password, database, host) = \
- get_pgsql_connection_info(conn_pieces)
- os.environ['PGPASSWORD'] = password
- os.environ['PGUSER'] = user
-
- sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
- " '%(sql)s' -d postgres")
-
- sql = ("create database if not exists %s;") % database
- createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
- # 0 means databases is created
- # 256 means it already exists (which is fine)
- # otherwise raise an error
- out, err = processutils.trycmd(createtable, shell=True,
- check_exit_code=[0, 256],
- discard_warnings=True)
- output = out or err
- if err != '':
- self.fail("Failed to run: %s\n%s" % (createtable, output))
-
- os.unsetenv('PGPASSWORD')
- os.unsetenv('PGUSER')
-
- def _reset_databases(self):
- """Reset all configured databases."""
- for key, engine in self.engines.items():
- self._reset_database(key)
-
- def _reset_database(self, key):
- """Reset specific database."""
- engine = self.engines[key]
- conn_string = self.test_databases[key]
- conn_pieces = urlparse.urlparse(conn_string)
- engine.dispose()
- if conn_string.startswith('sqlite'):
- self._reset_sqlite(conn_pieces)
- elif conn_string.startswith('mysql'):
- self._reset_mysql(conn_pieces)
- elif conn_string.startswith('postgresql'):
- self._reset_pg(conn_pieces)
-
-
-class BaseWalkMigrationTestCase(BaseMigrationTestCase):
- """BaseWalkMigrationTestCase loads in an alternative set of databases for
- testing against. This is necessary as the default databases can run tests
- concurrently without interfering with itself. It is expected that
- databases listed under [migraiton_dbs] in the configuration are only being
- accessed by one test at a time. Currently only test_walk_versions accesses
- the databases (and is the only method that calls _reset_database() which
- is clearly problematic for concurrency).
- """
-
- def _load_config(self):
- # Load test databases from the config file. Only do this
- # once. No need to re-run this on each test...
- LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
- if os.path.exists(self.CONFIG_FILE_PATH):
- cp = ConfigParser.RawConfigParser()
- try:
- cp.read(self.CONFIG_FILE_PATH)
- config = cp.options('migration_dbs')
- for key in config:
- self.test_databases[key] = cp.get('migration_dbs', key)
- self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
- self.downgrade = cp.getboolean('walk_style', 'downgrade')
- except ConfigParser.ParsingError as e:
- self.fail("Failed to read test_migrations.conf config "
- "file. Got error: %s" % e)
- else:
- self.fail("Failed to find test_migrations.conf config "
- "file.")
-
- self.engines = {}
- for key, value in self.test_databases.items():
- self.engines[key] = session.create_engine(value)
-
- self._create_databases()
-
- def _test_mysql_opportunistically(self):
- # Test that table creation on mysql only builds InnoDB tables
- if not _have_mysql(self.USER, self.PASSWD, self.DATABASE):
- self.skipTest("mysql not available")
- # add this to the global lists to make reset work with it, it's removed
- # automatically in tearDown so no need to clean it up here.
- connect_string = oslodbutils.get_connect_string(
- "mysql+mysqldb", self.DATABASE, self.USER, self.PASSWD)
- (user, password, database, host) = \
- get_mysql_connection_info(urlparse.urlparse(connect_string))
- engine = session.create_engine(connect_string)
- self.engines[database] = engine
- self.test_databases[database] = connect_string
-
- # build a fully populated mysql database with all the tables
- self._reset_database(database)
- self._walk_versions(engine, self.snake_walk, self.downgrade)
-
- connection = engine.connect()
- # sanity check
- total = connection.execute("SELECT count(*) "
- "from information_schema.TABLES "
- "where TABLE_SCHEMA='%(database)s'" %
- {'database': database})
- self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
+class NovaMigrationsCheckers(NovaMigrationsCheckersBase):
+ """Test sqlalchemy-migrate migrations."""
- noninnodb = connection.execute("SELECT count(*) "
- "from information_schema.TABLES "
- "where TABLE_SCHEMA='%(database)s' "
- "and ENGINE!='InnoDB' "
- "and TABLE_NAME!='migrate_version'" %
- {'database': database})
- count = noninnodb.scalar()
- self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
- connection.close()
-
- del(self.engines[database])
- del(self.test_databases[database])
-
- def _test_postgresql_opportunistically(self):
- # Test postgresql database migration walk
- if not _have_postgresql(self.USER, self.PASSWD, self.DATABASE):
- self.skipTest("postgresql not available")
- # add this to the global lists to make reset work with it, it's removed
- # automatically in tearDown so no need to clean it up here.
- connect_string = oslodbutils.get_connect_string(
- "postgresql+psycopg2", self.DATABASE, self.USER, self.PASSWD)
- engine = session.create_engine(connect_string)
- (user, password, database, host) = \
- get_pgsql_connection_info(urlparse.urlparse(connect_string))
- self.engines[database] = engine
- self.test_databases[database] = connect_string
-
- # build a fully populated postgresql database with all the tables
- self._reset_database(database)
- self._walk_versions(engine, self.snake_walk, self.downgrade)
- del(self.engines[database])
- del(self.test_databases[database])
-
- def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
- # Determine latest version script from the repo, then
- # upgrade from 1 through to the latest, with no data
- # in the databases. This just checks that the schema itself
- # upgrades successfully.
-
- # Place the database under version control
- self.migration_api.version_control(engine,
- self.REPOSITORY,
- self.INIT_VERSION)
- self.assertEqual(self.INIT_VERSION,
- self.migration_api.db_version(engine,
- self.REPOSITORY))
-
- LOG.debug('latest version is %s' % self.REPOSITORY.latest)
- versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
-
- for version in versions:
- # upgrade -> downgrade -> upgrade
- self._migrate_up(engine, version, with_data=True)
- if snake_walk:
- downgraded = self._migrate_down(
- engine, version - 1, with_data=True)
- if downgraded:
- self._migrate_up(engine, version)
-
- if downgrade:
- # Now walk it back down to 0 from the latest, testing
- # the downgrade paths.
- for version in reversed(versions):
- # downgrade -> upgrade -> downgrade
- downgraded = self._migrate_down(engine, version - 1)
-
- if snake_walk and downgraded:
- self._migrate_up(engine, version)
- self._migrate_down(engine, version - 1)
-
- def _migrate_down(self, engine, version, with_data=False):
- try:
- self.migration_api.downgrade(engine, self.REPOSITORY, version)
- except NotImplementedError:
- # NOTE(sirp): some migrations, namely release-level
- # migrations, don't support a downgrade.
- return False
-
- self.assertEqual(version,
- self.migration_api.db_version(engine,
- self.REPOSITORY))
-
- # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
- # version). So if we have any downgrade checks, they need to be run for
- # the previous (higher numbered) migration.
- if with_data:
- post_downgrade = getattr(
- self, "_post_downgrade_%03d" % (version + 1), None)
- if post_downgrade:
- post_downgrade(engine)
+ TIMEOUT_SCALING_FACTOR = 2
- return True
+ snake_walk = True
+ downgrade = True
def _skippable_migrations(self):
special = [
@@ -501,103 +144,12 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
havana_placeholders = range(217, 227)
icehouse_placeholders = range(235, 244)
+ juno_placeholders = range(255, 265)
- return special + havana_placeholders + icehouse_placeholders
-
- def _migrate_up(self, engine, version, with_data=False):
- """migrate up to a new version of the db.
-
- We allow for data insertion and post checks at every
- migration version with special _pre_upgrade_### and
- _check_### functions in the main test.
- """
- # NOTE(sdague): try block is here because it's impossible to debug
- # where a failed data migration happens otherwise
- try:
- if with_data:
- data = None
- pre_upgrade = getattr(
- self, "_pre_upgrade_%03d" % version, None)
- if pre_upgrade:
- data = pre_upgrade(engine)
-
- self.migration_api.upgrade(engine, self.REPOSITORY, version)
- self.assertEqual(version,
- self.migration_api.db_version(engine,
- self.REPOSITORY))
- if with_data:
- check = getattr(self, "_check_%03d" % version, None)
- if version not in self._skippable_migrations():
- self.assertIsNotNone(check,
- ('DB Migration %i does not have a '
- 'test. Please add one!') % version)
- if check:
- check(engine, data)
- except Exception:
- LOG.error("Failed to migrate to version %s on engine %s" %
- (version, engine))
- raise
-
-
-class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
- """Test sqlalchemy-migrate migrations."""
- USER = "openstack_citest"
- PASSWD = "openstack_citest"
- DATABASE = "openstack_citest"
-
- def __init__(self, *args, **kwargs):
- super(TestNovaMigrations, self).__init__(*args, **kwargs)
-
- self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
- 'test_migrations.conf')
- # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
- # to override the location of the config file for migration testing
- self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
- self.DEFAULT_CONFIG_FILE)
- self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
- self.REPOSITORY = repository.Repository(
- os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
-
- def setUp(self):
- super(TestNovaMigrations, self).setUp()
-
- if self.migration is None:
- self.migration = __import__('nova.db.migration',
- globals(), locals(), ['db_initial_version'], -1)
- self.INIT_VERSION = self.migration.db_initial_version()
- if self.migration_api is None:
- temp = __import__('nova.db.sqlalchemy.migration',
- globals(), locals(), ['versioning_api'], -1)
- self.migration_api = temp.versioning_api
-
- def assertColumnExists(self, engine, table, column):
- t = oslodbutils.get_table(engine, table)
- self.assertIn(column, t.c)
-
- def assertColumnNotExists(self, engine, table, column):
- t = oslodbutils.get_table(engine, table)
- self.assertNotIn(column, t.c)
-
- def assertTableNotExists(self, engine, table):
- self.assertRaises(sqlalchemy.exc.NoSuchTableError,
- oslodbutils.get_table, engine, table)
-
- def assertIndexExists(self, engine, table, index):
- t = oslodbutils.get_table(engine, table)
- index_names = [idx.name for idx in t.indexes]
- self.assertIn(index, index_names)
-
- def assertIndexMembers(self, engine, table, index, members):
- self.assertIndexExists(engine, table, index)
-
- t = oslodbutils.get_table(engine, table)
- index_columns = None
- for idx in t.indexes:
- if idx.name == index:
- index_columns = idx.columns.keys()
- break
-
- self.assertEqual(sorted(members), sorted(index_columns))
+ return (special +
+ havana_placeholders +
+ icehouse_placeholders +
+ juno_placeholders)
def _check_227(self, engine, data):
table = oslodbutils.get_table(engine, 'project_user_quotas')
@@ -794,12 +346,12 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
def _check_251(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
- self.assertColumnExists(
- engine, 'shadow_compute_nodes', 'numa_topology')
+ self.assertColumnExists(engine, 'shadow_compute_nodes',
+ 'numa_topology')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
- shadow_compute_nodes = oslodbutils.get_table(
- engine, 'shadow_compute_nodes')
+ shadow_compute_nodes = oslodbutils.get_table(engine,
+ 'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
@@ -807,8 +359,8 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
def _post_downgrade_251(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'numa_topology')
- self.assertColumnNotExists(
- engine, 'shadow_compute_nodes', 'numa_topology')
+ self.assertColumnNotExists(engine, 'shadow_compute_nodes',
+ 'numa_topology')
def _check_252(self, engine, data):
oslodbutils.get_table(engine, 'instance_extra')
@@ -824,11 +376,10 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
def _check_253(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnExists(
- engine, 'shadow_instance_extra', 'pci_requests')
-
+ engine, 'shadow_instance_extra', 'pci_requests')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
- shadow_instance_extra = oslodbutils.get_table(
- engine, 'shadow_instance_extra')
+ shadow_instance_extra = oslodbutils.get_table(engine,
+ 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.pci_requests.type,
@@ -836,8 +387,8 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
def _post_downgrade_253(self, engine):
self.assertColumnNotExists(engine, 'instance_extra', 'pci_requests')
- self.assertColumnNotExists(
- engine, 'shadow_instance_extra', 'pci_requests')
+ self.assertColumnNotExists(engine, 'shadow_instance_extra',
+ 'pci_requests')
def _check_254(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'request_id')
@@ -857,39 +408,97 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
self.assertColumnNotExists(
engine, 'shadow_pci_devices', 'request_id')
+ def _check_265(self, engine, data):
+ # Assert that only one index exists that covers columns
+ # host and deleted
+ instances = oslodbutils.get_table(engine, 'instances')
+ self.assertEqual(1, len([i for i in instances.indexes
+ if [c.name for c in i.columns][:2] ==
+ ['host', 'deleted']]))
+ # and only one index covers host column
+ iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
+ self.assertEqual(1, len([i for i in iscsi_targets.indexes
+ if [c.name for c in i.columns][:1] ==
+ ['host']]))
+
+ def _post_downgrade_265(self, engine):
+ # The duplicated index is not created on downgrade, so this
+ # asserts that only one index exists that covers columns
+ # host and deleted
+ instances = oslodbutils.get_table(engine, 'instances')
+ self.assertEqual(1, len([i for i in instances.indexes
+ if [c.name for c in i.columns][:2] ==
+ ['host', 'deleted']]))
+ # and only one index covers host column
+ iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
+ self.assertEqual(1, len([i for i in iscsi_targets.indexes
+ if [c.name for c in i.columns][:1] ==
+ ['host']]))
+
+
+class TestNovaMigrationsSQLite(NovaMigrationsCheckers,
+ test_base.DbTestCase):
+ pass
+
+
+class TestNovaMigrationsMySQL(NovaMigrationsCheckers,
+ test_base.MySQLOpportunisticTestCase):
+ def test_innodb_tables(self):
+ with mock.patch.object(sa_migration, 'get_engine',
+ return_value=self.migrate_engine):
+ sa_migration.db_sync()
+
+ total = self.migrate_engine.execute(
+ "SELECT count(*) "
+ "FROM information_schema.TABLES "
+ "WHERE TABLE_SCHEMA = '%(database)s'" %
+ {'database': self.migrate_engine.url.database})
+ self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
+
+ noninnodb = self.migrate_engine.execute(
+ "SELECT count(*) "
+ "FROM information_schema.TABLES "
+ "WHERE TABLE_SCHEMA='%(database)s' "
+ "AND ENGINE != 'InnoDB' "
+ "AND TABLE_NAME != 'migrate_version'" %
+ {'database': self.migrate_engine.url.database})
+ count = noninnodb.scalar()
+ self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
+
-class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
+class TestNovaMigrationsPostgreSQL(NovaMigrationsCheckers,
+ test_base.PostgreSQLOpportunisticTestCase):
+ pass
+
+
+class BaremetalMigrationsCheckers(NovaMigrationsCheckersBase):
"""Test sqlalchemy-migrate migrations."""
- USER = "openstack_citest"
- PASSWD = "openstack_citest"
- DATABASE = "openstack_baremetal_citest"
-
- def __init__(self, *args, **kwargs):
- super(TestBaremetalMigrations, self).__init__(*args, **kwargs)
-
- self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
- '../virt/baremetal/test_baremetal_migrations.conf')
- # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
- # to override the location of the config file for migration testing
- self.CONFIG_FILE_PATH = os.environ.get(
- 'BAREMETAL_TEST_MIGRATIONS_CONF',
- self.DEFAULT_CONFIG_FILE)
- self.MIGRATE_FILE = \
- nova.virt.baremetal.db.sqlalchemy.migrate_repo.__file__
- self.REPOSITORY = repository.Repository(
- os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
+ TIMEOUT_SCALING_FACTOR = 2
- def setUp(self):
- super(TestBaremetalMigrations, self).setUp()
+ snake_walk = True
+ downgrade = True
+
+ @property
+ def INIT_VERSION(self):
+ return bm_migration.db_initial_version()
+
+ @property
+ def REPOSITORY(self):
+ return repository.Repository(
+ os.path.abspath(os.path.dirname(bm_migrate_repo.__file__)))
- if self.migration is None:
- self.migration = __import__('nova.virt.baremetal.db.migration',
- globals(), locals(), ['db_initial_version'], -1)
- self.INIT_VERSION = self.migration.db_initial_version()
- if self.migration_api is None:
- temp = __import__('nova.virt.baremetal.db.sqlalchemy.migration',
- globals(), locals(), ['versioning_api'], -1)
- self.migration_api = temp.versioning_api
+ @property
+ def migration_api(self):
+ return bm_sa_migration.versioning_api
+
+ @property
+ def migrate_engine(self):
+ return self.engine
+
+ def _skippable_migrations(self):
+ # NOTE(danms): This is deprecated code, soon to be removed, so don't
+ # obsess about tests here.
+ return range(1, 100)
def _pre_upgrade_002(self, engine):
data = [{'id': 1, 'key': 'fake-key', 'image_path': '/dev/null',
@@ -990,21 +599,38 @@ class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
self.assertNotIn('preserve_ephemeral', bm_nodes.columns)
- def _skippable_migrations(self):
- # NOTE(danms): This is deprecated code, soon to be removed, so don't
- # obsess about tests here.
- return range(1, 100)
+
+class TestBaremetalMigrationsSQLite(BaremetalMigrationsCheckers,
+ test_base.DbTestCase):
+ pass
+
+
+class TestBaremetalMigrationsMySQL(BaremetalMigrationsCheckers,
+ test_base.MySQLOpportunisticTestCase):
+ pass
+
+
+class TestBaremetalMigrationsPostgreSQL(
+ NovaMigrationsCheckers,
+ test_base.PostgreSQLOpportunisticTestCase):
+ pass
class ProjectTestCase(test.NoDBTestCase):
def test_all_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
- py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy",
- "migrate_repo", "versions", "*.py")
+ py_globs = [os.path.join(topdir, "nova", "db", "sqlalchemy",
+ "migrate_repo", "versions", "*.py"),
+ os.path.join(topdir, "nova", "virt", "baremetal", "db",
+ "sqlalchemy", "migrate_repo", "versions",
+ "*.py")]
+ migrate_files = []
+ for g in py_globs:
+ migrate_files += list(glob.iglob(g))
missing_downgrade = []
- for path in glob.iglob(py_glob):
+ for path in migrate_files:
has_upgrade = False
has_downgrade = False
with open(path, "r") as f: