summaryrefslogtreecommitdiff
path: root/nova/db
diff options
context:
space:
mode:
authorSurya Seetharaman <suryaseetharaman.9@gmail.com>2017-10-25 13:43:43 +0200
committerMatt Riedemann <mriedem.os@gmail.com>2018-07-06 21:52:43 +0000
commit70de423255fd01822188bb9082a0c0cc1c8ec2d0 (patch)
treee79ca4bafe36c3523e9359d2de07dff820381a29 /nova/db
parent966a5a21544fe453a36cf5215d890d2c0b5ed82f (diff)
downloadnova-70de423255fd01822188bb9082a0c0cc1c8ec2d0.tar.gz
cleanup mapping/reqspec after archive instance
This patch aims at deleting the records of the archived instances from the instance_mappings and request_specs tables in the API database immediately following their archival from instances to shadow_instances table. So upon running the 'nova-manage db archive_deleted_rows' command the records of the archived instances will be automatically removed from the instance_mappings and request_specs tables as well. A warning has also been added to fix the issue of 'nova-manage verify_instance' returning a valid instance mapping even after the instance is deleted. The patch also adds InstanceMappingList.destory_bulk() and RequestSpec.destroy_bulk() methods for ease of bulk deletion of records. Change-Id: I483701a55576c245d091ff086b32081b392f746e Closes-Bug: #1724621 Closes-Bug: #1678056 (cherry picked from commit 32fd58813f8247641a6b574b5f01528b29d48b76)
Diffstat (limited to 'nova/db')
-rw-r--r--nova/db/sqlalchemy/api.py30
1 files changed, 25 insertions, 5 deletions
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index afe1b80e40..0789515e74 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -6404,11 +6404,12 @@ def _archive_deleted_rows_for_table(tablename, max_rows):
shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
rows_archived = 0
+ deleted_instance_uuids = []
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
- return rows_archived
+ return rows_archived, deleted_instance_uuids
if tablename == "dns_domains":
# We have one table (dns_domains) where the key is called
@@ -6462,10 +6463,24 @@ def _archive_deleted_rows_for_table(tablename, max_rows):
order_by(column).limit(max_rows)
delete_statement = DeleteFromSelect(table, query_delete, column)
+
+ # NOTE(tssurya): In order to facilitate the deletion of records from
+ # instance_mappings table in the nova_api DB, the rows of deleted instances
+ # from the instances table are stored prior to their deletion from
+ # the instances table. Basically the uuids of the archived instances
+ # are queried and returned.
+ if tablename == "instances":
+ query_delete = query_delete.column(table.c.uuid)
+ rows = conn.execute(query_delete).fetchall()
+ deleted_instance_uuids = [r[1] for r in rows]
+
try:
# Group the insert and delete in a transaction.
with conn.begin():
conn.execute(insert)
+ if tablename == "instances":
+ delete_statement = table.delete().where(table.c.uuid.in_(
+ deleted_instance_uuids))
result_delete = conn.execute(delete_statement)
rows_archived = result_delete.rowcount
except db_exc.DBReferenceError as ex:
@@ -6484,7 +6499,7 @@ def _archive_deleted_rows_for_table(tablename, max_rows):
conn, limit)
rows_archived += extra
- return rows_archived
+ return rows_archived, deleted_instance_uuids
def archive_deleted_rows(max_rows=None):
@@ -6504,26 +6519,31 @@ def archive_deleted_rows(max_rows=None):
"""
table_to_rows_archived = {}
+ deleted_instance_uuids = []
total_rows_archived = 0
meta = MetaData(get_engine(use_slave=True))
meta.reflect()
# Reverse sort the tables so we get the leaf nodes first for processing.
for table in reversed(meta.sorted_tables):
tablename = table.name
+ rows_archived = 0
# skip the special sqlalchemy-migrate migrate_version table and any
# shadow tables
if (tablename == 'migrate_version' or
tablename.startswith(_SHADOW_TABLE_PREFIX)):
continue
- rows_archived = _archive_deleted_rows_for_table(
- tablename, max_rows=max_rows - total_rows_archived)
+ rows_archived,\
+ deleted_instance_uuid = _archive_deleted_rows_for_table(
+ tablename, max_rows=max_rows - total_rows_archived)
total_rows_archived += rows_archived
+ if tablename == 'instances':
+ deleted_instance_uuids = deleted_instance_uuid
# Only report results for tables that had updates.
if rows_archived:
table_to_rows_archived[tablename] = rows_archived
if total_rows_archived >= max_rows:
break
- return table_to_rows_archived
+ return table_to_rows_archived, deleted_instance_uuids
@pick_context_manager_writer