diff options
| author | mike bayer <mike_mp@zzzcomputing.com> | 2019-10-30 18:18:48 +0000 |
|---|---|---|
| committer | Gerrit Code Review <gerrit@bbpush.zzzcomputing.com> | 2019-10-30 18:18:48 +0000 |
| commit | fa26bb01e1cfdfc80985e0bf7cf353aef6106a0d (patch) | |
| tree | ba5febaba5bd1059ba4eedb528090e7f9616a9a5 /lib/sqlalchemy | |
| parent | ec5e4f3543cc2896c83c5854cc5c39a80cc11b2f (diff) | |
| parent | d36b1f7f03841b9b346a6fd3395dd29333dce588 (diff) | |
| download | sqlalchemy-fa26bb01e1cfdfc80985e0bf7cf353aef6106a0d.tar.gz | |
Merge "Use simple growth scale with any max size for BufferedRowResultProxy"
Diffstat (limited to 'lib/sqlalchemy')
| -rw-r--r-- | lib/sqlalchemy/dialects/postgresql/psycopg2.py | 3 | ||||
| -rw-r--r-- | lib/sqlalchemy/engine/result.py | 33 |
2 files changed, 11 insertions, 25 deletions
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index 1a4db1108..14d49ee15 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -137,7 +137,8 @@ The following DBAPI-specific options are respected when used with interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the buffer will grow to ultimately store 1000 rows at a time. - .. versionadded:: 1.0.6 + .. versionchanged:: 1.4 The ``max_row_buffer`` size can now be greater than + 1000, and the buffer will grow to that size. .. _psycopg2_executemany_mode: diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 733bd6f6a..004a84da5 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -1486,10 +1486,8 @@ class BufferedRowResultProxy(ResultProxy): The pre-fetching behavior fetches only one row initially, and then grows its buffer size by a fixed amount with each successive need - for additional rows up to a size of 1000. - - The size argument is configurable using the ``max_row_buffer`` - execution option:: + for additional rows up the ``max_row_buffer`` size, which defaults + to 1000:: with psycopg2_engine.connect() as conn: @@ -1497,7 +1495,7 @@ class BufferedRowResultProxy(ResultProxy): stream_results=True, max_row_buffer=50 ).execute("select * from table") - .. versionadded:: 1.0.6 Added the ``max_row_buffer`` option. + .. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows. .. seealso:: @@ -1506,34 +1504,21 @@ class BufferedRowResultProxy(ResultProxy): def _init_metadata(self): self._max_row_buffer = self.context.execution_options.get( - "max_row_buffer", None + "max_row_buffer", 1000 ) + self._growth_factor = 5 self.__buffer_rows() super(BufferedRowResultProxy, self)._init_metadata() - # this is a "growth chart" for the buffering of rows. - # each successive __buffer_rows call will use the next - # value in the list for the buffer size until the max - # is reached - size_growth = { - 1: 5, - 5: 10, - 10: 20, - 20: 50, - 50: 100, - 100: 250, - 250: 500, - 500: 1000, - } - def __buffer_rows(self): if self.cursor is None: return size = getattr(self, "_bufsize", 1) self.__rowbuffer = collections.deque(self.cursor.fetchmany(size)) - self._bufsize = self.size_growth.get(size, size) - if self._max_row_buffer is not None: - self._bufsize = min(self._max_row_buffer, self._bufsize) + if size < self._max_row_buffer: + self._bufsize = min( + self._max_row_buffer, size * self._growth_factor + ) def _soft_close(self, **kw): self.__rowbuffer.clear() |
