summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Warren <jswarren@us.ibm.com>2013-12-02 19:51:41 +0000
committerJohn Warren <jswarren@us.ibm.com>2014-01-06 18:31:37 +0000
commit5dc675f193a1dbaf5807416ceb2e808dd8e10f45 (patch)
tree0003a700e76fea70f8844d55f49bf7678ffabc14
parent6c7f00d4598d04523d614d81704f6b9b21f56d78 (diff)
downloadkeystone-5dc675f193a1dbaf5807416ceb2e808dd8e10f45.tar.gz
Flush tokens in batches with DB2
When there is a very large number of expired tokens to be flushed, keystone-manage token_flush will fail with DB2 because it fills the transaction log. This fix causes tokens to be flushed with DB2 in batches of 100 at a time to prevent the transaction log from filling up. Closes-Bug: #1257323 Change-Id: Ic57d8795a995462fc277ec9655eca9b460ffcbee
-rw-r--r--keystone/tests/test_backend_sql.py33
-rw-r--r--keystone/token/backends/sql.py38
2 files changed, 67 insertions, 4 deletions
diff --git a/keystone/tests/test_backend_sql.py b/keystone/tests/test_backend_sql.py
index e380f8dc0..7b48a1361 100644
--- a/keystone/tests/test_backend_sql.py
+++ b/keystone/tests/test_backend_sql.py
@@ -372,6 +372,39 @@ class SqlToken(SqlTests, test_backend.TokenTests):
self.mox.ReplayAll()
tok.list_revoked_tokens()
+ def test_flush_expired_tokens_batch(self):
+ # This test simply executes the code under test to verify
+ # that the code is legal. It is not possible to test
+ # whether records are deleted in batches using sqlite,
+ # because the limit function does not seem to affect
+ # delete subqueries; these are, however, legal.
+ # After several failed attempts of using mox, it would
+ # seem that the use of mock objects for testing
+ # the target code does not seem possible, because of
+ # the unique way the SQLAlchemy Query class's filter
+ # method works.
+ fixture = self.useFixture(moxstubout.MoxStubout())
+ self.mox = fixture.mox
+ tok = token_sql.Token()
+ self.mox.StubOutWithMock(tok, 'token_flush_batch_size')
+ # Just need a batch larger than 0; note that the code
+ # path with batch_size = 0 is covered by test_backend,
+ # where all backends' flush_expired_tokens methods
+ # are tested.
+ tok.token_flush_batch_size('sqlite').AndReturn(1)
+ self.mox.ReplayAll()
+ tok.flush_expired_tokens()
+
+ def test_token_flush_batch_size_default(self):
+ tok = token_sql.Token()
+ sqlite_batch = tok.token_flush_batch_size('sqlite')
+ self.assertEqual(sqlite_batch, 0)
+
+ def test_token_flush_batch_size_db2(self):
+ tok = token_sql.Token()
+ db2_batch = tok.token_flush_batch_size('ibm_db_sa')
+ self.assertEqual(db2_batch, 100)
+
class SqlCatalog(SqlTests, test_backend.CatalogTests):
def test_malformed_catalog_throws_error(self):
diff --git a/keystone/token/backends/sql.py b/keystone/token/backends/sql.py
index 6be932c10..9be2f80ec 100644
--- a/keystone/token/backends/sql.py
+++ b/keystone/token/backends/sql.py
@@ -193,11 +193,41 @@ class Token(sql.Base, token.Driver):
tokens.append(record)
return tokens
+ def token_flush_batch_size(self, dialect):
+ batch_size = 0
+ if dialect == 'ibm_db_sa':
+ # This functionality is limited to DB2, because
+ # it is necessary to prevent the tranaction log
+ # from filling up, whereas at least some of the
+ # other supported databases do not support update
+ # queries with LIMIT subqueries nor do they appear
+ # to require the use of such queries when deleting
+ # large numbers of records at once.
+ batch_size = 100
+ # Limit of 100 is known to not fill a transaction log
+ # of default maximum size while not significantly
+ # impacting the performance of large token purges on
+ # systems where the maximum transaction log size has
+ # been increased beyond the default.
+ return batch_size
+
def flush_expired_tokens(self):
session = self.get_session()
-
- query = session.query(TokenModel)
- query = query.filter(TokenModel.expires < timeutils.utcnow())
- query.delete(synchronize_session=False)
+ dialect = session.bind.dialect.name
+ batch_size = self.token_flush_batch_size(dialect)
+ if batch_size > 0:
+ query = session.query(TokenModel.id)
+ query = query.filter(TokenModel.expires < timeutils.utcnow())
+ query = query.limit(batch_size).subquery()
+ delete_query = (session.query(TokenModel).
+ filter(TokenModel.id.in_(query)))
+ while True:
+ rowcount = delete_query.delete(synchronize_session=False)
+ if rowcount == 0:
+ break
+ else:
+ query = session.query(TokenModel)
+ query = query.filter(TokenModel.expires < timeutils.utcnow())
+ query.delete(synchronize_session=False)
session.flush()