summaryrefslogtreecommitdiff
path: root/taskflow/persistence/backends
diff options
context:
space:
mode:
authorJoshua Harlow <harlowja@yahoo-inc.com>2015-02-11 18:22:20 -0800
committerJoshua Harlow <harlowja@yahoo-inc.com>2015-04-03 16:04:46 -0700
commitaa8be4b8f5f55cc4e11088dcb122b34345f56cec (patch)
treea46a6e3845de3ed22dfdfe8a14be2934681cca86 /taskflow/persistence/backends
parent97c170473011e24da5481d2e3fd82bf8e6778bed (diff)
downloadtaskflow-aa8be4b8f5f55cc4e11088dcb122b34345f56cec.tar.gz
Use the ability to chain exceptions correctly
In the zookeeper jobboard (and persistence backends) we are associating the cause of a new exception when raising a exception. Using the new exception helper we can make this work better on py2.x and py3.x so that the py3.x version has the chain setup correctly (while the py2.x version just uses the class 'cause' attribute instead). Change-Id: Ieeac2f70e1834d4612556565762ffd3be3e5b5a1
Diffstat (limited to 'taskflow/persistence/backends')
-rw-r--r--taskflow/persistence/backends/impl_dir.py8
-rw-r--r--taskflow/persistence/backends/impl_memory.py7
-rw-r--r--taskflow/persistence/backends/impl_sqlalchemy.py74
-rw-r--r--taskflow/persistence/backends/impl_zookeeper.py32
4 files changed, 72 insertions, 49 deletions
diff --git a/taskflow/persistence/backends/impl_dir.py b/taskflow/persistence/backends/impl_dir.py
index 5185b2b..e71d5b9 100644
--- a/taskflow/persistence/backends/impl_dir.py
+++ b/taskflow/persistence/backends/impl_dir.py
@@ -36,8 +36,12 @@ def _storagefailure_wrapper():
raise
except Exception as e:
if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT:
- raise exc.NotFound('Item not found: %s' % e.filename, e)
- raise exc.StorageFailure("Storage backend internal error", e)
+ exc.raise_with_cause(exc.NotFound,
+ 'Item not found: %s' % e.filename,
+ cause=e)
+ else:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Storage backend internal error", cause=e)
class DirBackend(path_based.PathBasedBackend):
diff --git a/taskflow/persistence/backends/impl_memory.py b/taskflow/persistence/backends/impl_memory.py
index 7efe6ec..0157fcf 100644
--- a/taskflow/persistence/backends/impl_memory.py
+++ b/taskflow/persistence/backends/impl_memory.py
@@ -209,10 +209,11 @@ class Connection(path_based.PathBasedConnection):
with lock():
try:
yield
- except exc.TaskFlowException as e:
+ except exc.TaskFlowException:
raise
- except Exception as e:
- raise exc.StorageFailure("Storage backend internal error", e)
+ except Exception:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Storage backend internal error")
def _join_path(self, *parts):
return pp.join(*parts)
diff --git a/taskflow/persistence/backends/impl_sqlalchemy.py b/taskflow/persistence/backends/impl_sqlalchemy.py
index d6eeaab..d9fdf73 100644
--- a/taskflow/persistence/backends/impl_sqlalchemy.py
+++ b/taskflow/persistence/backends/impl_sqlalchemy.py
@@ -405,16 +405,18 @@ class Connection(base.Connection):
self._metadata.create_all(bind=conn)
else:
migration.db_sync(conn)
- except sa_exc.SQLAlchemyError as e:
- raise exc.StorageFailure("Failed upgrading database version", e)
+ except sa_exc.SQLAlchemyError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed upgrading database version")
def clear_all(self):
try:
logbooks = self._tables.logbooks
with self._engine.begin() as conn:
conn.execute(logbooks.delete())
- except sa_exc.DBAPIError as e:
- raise exc.StorageFailure("Failed clearing all entries", e)
+ except sa_exc.DBAPIError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed clearing all entries")
def update_atom_details(self, atom_detail):
try:
@@ -429,9 +431,10 @@ class Connection(base.Connection):
e_ad = self._converter.convert_atom_detail(row)
self._update_atom_details(conn, atom_detail, e_ad)
return e_ad
- except sa_exc.SQLAlchemyError as e:
- raise exc.StorageFailure("Failed updating atom details with"
- " uuid '%s'" % atom_detail.uuid, e)
+ except sa_exc.SQLAlchemyError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed updating atom details"
+ " with uuid '%s'" % atom_detail.uuid)
def _insert_flow_details(self, conn, fd, parent_uuid):
value = fd.to_dict()
@@ -479,9 +482,10 @@ class Connection(base.Connection):
self._converter.populate_flow_detail(conn, e_fd)
self._update_flow_details(conn, flow_detail, e_fd)
return e_fd
- except sa_exc.SQLAlchemyError as e:
- raise exc.StorageFailure("Failed updating flow details with"
- " uuid '%s'" % flow_detail.uuid, e)
+ except sa_exc.SQLAlchemyError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed updating flow details with"
+ " uuid '%s'" % flow_detail.uuid)
def destroy_logbook(self, book_uuid):
try:
@@ -492,9 +496,9 @@ class Connection(base.Connection):
if r.rowcount == 0:
raise exc.NotFound("No logbook found with"
" uuid '%s'" % book_uuid)
- except sa_exc.DBAPIError as e:
- raise exc.StorageFailure("Failed destroying"
- " logbook '%s'" % book_uuid, e)
+ except sa_exc.DBAPIError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed destroying logbook '%s'" % book_uuid)
def save_logbook(self, book):
try:
@@ -523,9 +527,10 @@ class Connection(base.Connection):
for fd in book:
self._insert_flow_details(conn, fd, book.uuid)
return book
- except sa_exc.DBAPIError as e:
- raise exc.StorageFailure("Failed saving logbook"
- " '%s'" % book.uuid, e)
+ except sa_exc.DBAPIError:
+ exc.raise_with_cause(
+ exc.StorageFailure,
+ "Failed saving logbook '%s'" % book.uuid)
def get_logbook(self, book_uuid, lazy=False):
try:
@@ -541,9 +546,9 @@ class Connection(base.Connection):
if not lazy:
self._converter.populate_book(conn, book)
return book
- except sa_exc.DBAPIError as e:
- raise exc.StorageFailure(
- "Failed getting logbook '%s'" % book_uuid, e)
+ except sa_exc.DBAPIError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed getting logbook '%s'" % book_uuid)
def get_logbooks(self, lazy=False):
gathered = []
@@ -555,8 +560,9 @@ class Connection(base.Connection):
if not lazy:
self._converter.populate_book(conn, book)
gathered.append(book)
- except sa_exc.DBAPIError as e:
- raise exc.StorageFailure("Failed getting logbooks", e)
+ except sa_exc.DBAPIError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed getting logbooks")
for book in gathered:
yield book
@@ -568,8 +574,10 @@ class Connection(base.Connection):
if not lazy:
self._converter.populate_flow_detail(conn, fd)
gathered.append(fd)
- except sa_exc.DBAPIError as e:
- raise exc.StorageFailure("Failed getting flow details", e)
+ except sa_exc.DBAPIError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed getting flow details in"
+ " logbook '%s'" % book_uuid)
for flow_details in gathered:
yield flow_details
@@ -587,9 +595,10 @@ class Connection(base.Connection):
if not lazy:
self._converter.populate_flow_detail(conn, fd)
return fd
- except sa_exc.SQLAlchemyError as e:
- raise exc.StorageFailure("Failed getting flow details with"
- " uuid '%s'" % fd_uuid, e)
+ except sa_exc.SQLAlchemyError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed getting flow details with"
+ " uuid '%s'" % fd_uuid)
def get_atom_details(self, ad_uuid):
try:
@@ -602,9 +611,10 @@ class Connection(base.Connection):
raise exc.NotFound("No atom details found with uuid"
" '%s'" % ad_uuid)
return self._converter.convert_atom_detail(row)
- except sa_exc.SQLAlchemyError as e:
- raise exc.StorageFailure("Failed getting atom details with"
- " uuid '%s'" % ad_uuid, e)
+ except sa_exc.SQLAlchemyError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed getting atom details with"
+ " uuid '%s'" % ad_uuid)
def get_atoms_for_flow(self, fd_uuid):
gathered = []
@@ -612,8 +622,10 @@ class Connection(base.Connection):
with contextlib.closing(self._engine.connect()) as conn:
for ad in self._converter.atom_query_iter(conn, fd_uuid):
gathered.append(ad)
- except sa_exc.DBAPIError as e:
- raise exc.StorageFailure("Failed getting atom details", e)
+ except sa_exc.DBAPIError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Failed getting atom details in flow"
+ " detail '%s'" % fd_uuid)
for atom_details in gathered:
yield atom_details
diff --git a/taskflow/persistence/backends/impl_zookeeper.py b/taskflow/persistence/backends/impl_zookeeper.py
index 5626b28..0d7c00e 100644
--- a/taskflow/persistence/backends/impl_zookeeper.py
+++ b/taskflow/persistence/backends/impl_zookeeper.py
@@ -67,8 +67,9 @@ class ZkBackend(path_based.PathBasedBackend):
return
try:
k_utils.finalize_client(self._client)
- except (k_exc.KazooException, k_exc.ZookeeperError) as e:
- raise exc.StorageFailure("Unable to finalize client", e)
+ except (k_exc.KazooException, k_exc.ZookeeperError):
+ exc.raise_with_cause(exc.StorageFailure,
+ "Unable to finalize client")
class ZkConnection(path_based.PathBasedConnection):
@@ -90,16 +91,21 @@ class ZkConnection(path_based.PathBasedConnection):
"""
try:
yield
- except self._client.handler.timeout_exception as e:
- raise exc.StorageFailure("Storage backend timeout", e)
- except k_exc.SessionExpiredError as e:
- raise exc.StorageFailure("Storage backend session has expired", e)
+ except self._client.handler.timeout_exception:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Storage backend timeout")
+ except k_exc.SessionExpiredError:
+ exc.raise_with_cause(exc.StorageFailure,
+ "Storage backend session has expired")
except k_exc.NoNodeError as e:
- raise exc.NotFound("Storage backend node not found: %s" % e)
+ exc.raise_with_cause(exc.NotFound,
+ "Storage backend node not found: %s" % e)
except k_exc.NodeExistsError as e:
- raise exc.Duplicate("Storage backend duplicate node: %s" % e)
- except (k_exc.KazooException, k_exc.ZookeeperError) as e:
- raise exc.StorageFailure("Storage backend internal error", e)
+ exc.raise_with_cause(exc.Duplicate,
+ "Storage backend duplicate node: %s" % e)
+ except (k_exc.KazooException, k_exc.ZookeeperError):
+ exc.raise_with_cause(exc.StorageFailure,
+ "Storage backend internal error")
def _join_path(self, *parts):
return paths.join(*parts)
@@ -145,6 +151,6 @@ class ZkConnection(path_based.PathBasedConnection):
try:
if self._conf.get('check_compatible', True):
k_utils.check_compatible(self._client, MIN_ZK_VERSION)
- except exc.IncompatibleVersion as e:
- raise exc.StorageFailure("Backend storage is not a"
- " compatible version", e)
+ except exc.IncompatibleVersion:
+ exc.raise_with_cause(exc.StorageFailure, "Backend storage is"
+ " not a compatible version")