summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2014-01-13 16:53:13 +0000
committerGerrit Code Review <review@openstack.org>2014-01-13 16:53:13 +0000
commit83db9722c201eca9fa47d93fce7d09dd6f28e053 (patch)
tree5fc851a6512dd97aabe30613bc0642a5a7a68f97
parent5a1a371b2ac424f86bae9ce5470be960ca0ffb14 (diff)
parent5dc675f193a1dbaf5807416ceb2e808dd8e10f45 (diff)
downloadkeystone-83db9722c201eca9fa47d93fce7d09dd6f28e053.tar.gz
Merge "Flush tokens in batches with DB2"
-rw-r--r--keystone/tests/test_backend_sql.py33
-rw-r--r--keystone/token/backends/sql.py38
2 files changed, 67 insertions, 4 deletions
diff --git a/keystone/tests/test_backend_sql.py b/keystone/tests/test_backend_sql.py
index 1a6df3abc..aeaa3a74b 100644
--- a/keystone/tests/test_backend_sql.py
+++ b/keystone/tests/test_backend_sql.py
@@ -370,6 +370,39 @@ class SqlToken(SqlTests, test_backend.TokenTests):
self.mox.ReplayAll()
tok.list_revoked_tokens()
+ def test_flush_expired_tokens_batch(self):
+ # This test simply executes the code under test to verify
+ # that the code is legal. It is not possible to test
+ # whether records are deleted in batches using sqlite,
+ # because the limit function does not seem to affect
+ # delete subqueries; these are, however, legal.
+ # After several failed attempts of using mox, it would
+ # seem that the use of mock objects for testing
+ # the target code does not seem possible, because of
+ # the unique way the SQLAlchemy Query class's filter
+ # method works.
+ fixture = self.useFixture(moxstubout.MoxStubout())
+ self.mox = fixture.mox
+ tok = token_sql.Token()
+ self.mox.StubOutWithMock(tok, 'token_flush_batch_size')
+ # Just need a batch larger than 0; note that the code
+ # path with batch_size = 0 is covered by test_backend,
+ # where all backends' flush_expired_tokens methods
+ # are tested.
+ tok.token_flush_batch_size('sqlite').AndReturn(1)
+ self.mox.ReplayAll()
+ tok.flush_expired_tokens()
+
+ def test_token_flush_batch_size_default(self):
+ tok = token_sql.Token()
+ sqlite_batch = tok.token_flush_batch_size('sqlite')
+ self.assertEqual(sqlite_batch, 0)
+
+ def test_token_flush_batch_size_db2(self):
+ tok = token_sql.Token()
+ db2_batch = tok.token_flush_batch_size('ibm_db_sa')
+ self.assertEqual(db2_batch, 100)
+
class SqlCatalog(SqlTests, test_backend.CatalogTests):
def test_malformed_catalog_throws_error(self):
diff --git a/keystone/token/backends/sql.py b/keystone/token/backends/sql.py
index 881283fc9..772adaafd 100644
--- a/keystone/token/backends/sql.py
+++ b/keystone/token/backends/sql.py
@@ -187,11 +187,41 @@ class Token(sql.Base, token.Driver):
tokens.append(record)
return tokens
+ def token_flush_batch_size(self, dialect):
+ batch_size = 0
+ if dialect == 'ibm_db_sa':
+ # This functionality is limited to DB2, because
+ # it is necessary to prevent the tranaction log
+ # from filling up, whereas at least some of the
+ # other supported databases do not support update
+ # queries with LIMIT subqueries nor do they appear
+ # to require the use of such queries when deleting
+ # large numbers of records at once.
+ batch_size = 100
+ # Limit of 100 is known to not fill a transaction log
+ # of default maximum size while not significantly
+ # impacting the performance of large token purges on
+ # systems where the maximum transaction log size has
+ # been increased beyond the default.
+ return batch_size
+
def flush_expired_tokens(self):
session = self.get_session()
-
- query = session.query(TokenModel)
- query = query.filter(TokenModel.expires < timeutils.utcnow())
- query.delete(synchronize_session=False)
+ dialect = session.bind.dialect.name
+ batch_size = self.token_flush_batch_size(dialect)
+ if batch_size > 0:
+ query = session.query(TokenModel.id)
+ query = query.filter(TokenModel.expires < timeutils.utcnow())
+ query = query.limit(batch_size).subquery()
+ delete_query = (session.query(TokenModel).
+ filter(TokenModel.id.in_(query)))
+ while True:
+ rowcount = delete_query.delete(synchronize_session=False)
+ if rowcount == 0:
+ break
+ else:
+ query = session.query(TokenModel)
+ query = query.filter(TokenModel.expires < timeutils.utcnow())
+ query.delete(synchronize_session=False)
session.flush()