summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMorgan McClure <mcclurem@quagmire>2015-06-13 19:27:55 -0700
committerMorgan McClure <mcclurem@quagmire>2015-06-13 19:27:55 -0700
commita8c6cce404caf4a9c20faefc8f11a3e37db3ea05 (patch)
tree99f5e1d29daeedaee48e5f5fbcedbb229c1aeef4
parent235165d54618031f525f029e5686a03c273a7c7e (diff)
downloadsqlalchemy-pr/182.tar.gz
Added max_row_buffer attribute to the context execution options and usepr/182
it to prevent excess memory usage with yield_per
-rw-r--r--lib/sqlalchemy/engine/result.py9
-rw-r--r--lib/sqlalchemy/orm/query.py3
2 files changed, 9 insertions, 3 deletions
diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py
index 56c81c93e..41b30c983 100644
--- a/lib/sqlalchemy/engine/result.py
+++ b/lib/sqlalchemy/engine/result.py
@@ -1067,7 +1067,7 @@ class BufferedRowResultProxy(ResultProxy):
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
- for additional rows up to a size of 100.
+ for additional rows up to a size of 1000.
"""
def _init_metadata(self):
@@ -1083,7 +1083,10 @@ class BufferedRowResultProxy(ResultProxy):
5: 10,
10: 20,
20: 50,
- 50: 100
+ 50: 100,
+ 100: 250,
+ 250: 500,
+ 500: 1000
}
def __buffer_rows(self):
@@ -1092,6 +1095,8 @@ class BufferedRowResultProxy(ResultProxy):
size = getattr(self, '_bufsize', 1)
self.__rowbuffer = collections.deque(self.cursor.fetchmany(size))
self._bufsize = self.size_growth.get(size, size)
+ if self.context.execution_options.get('max_row_buffer') is not None:
+ self._bufsize = min(self.context.execution_options['max_row_buffer'], self._bufsize)
def _soft_close(self, **kw):
self.__rowbuffer.clear()
diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py
index 4f8c86a14..8b3df08e7 100644
--- a/lib/sqlalchemy/orm/query.py
+++ b/lib/sqlalchemy/orm/query.py
@@ -756,7 +756,8 @@ class Query(object):
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
- {"stream_results": True})
+ {"stream_results": True,
+ "max_row_buffer": count})
def get(self, ident):
"""Return an instance based on the given primary key identifier,