summaryrefslogtreecommitdiff
path: root/gitdb/stream.py
diff options
context:
space:
mode:
authorSebastian Thiel <byronimo@gmail.com>2015-01-01 13:47:19 +0100
committerSebastian Thiel <byronimo@gmail.com>2015-01-01 13:47:19 +0100
commitc38bd19706abe5cf0bf0e7b3e9ad2b3e554d28ef (patch)
treef9572b510b81fc1dafab8dbe049f64c41f76d570 /gitdb/stream.py
parentab4520683ab325046f2a9fe6ebf127dbbab60dfe (diff)
downloadgitdb-c38bd19706abe5cf0bf0e7b3e9ad2b3e554d28ef.tar.gz
Increased initial size of decompressed data to obtain loose object header information
This appears to fix https://github.com/gitpython-developers/GitPython/issues/220 , in this particular case. Nonetheless, we might just have gotten lucky here, and the actual issue is not yet solved and can thus re-occour. It would certainly be best to churn through plenty of loose objects to assure this truly works now. Maybe the pack could be recompressed as loose objects to get a sufficiently large data set
Diffstat (limited to 'gitdb/stream.py')
-rw-r--r--gitdb/stream.py7
1 files changed, 5 insertions, 2 deletions
diff --git a/gitdb/stream.py b/gitdb/stream.py
index edd6dd2..b0a8900 100644
--- a/gitdb/stream.py
+++ b/gitdb/stream.py
@@ -100,7 +100,9 @@ class DecompressMemMapReader(LazyMixin):
:return: parsed type_string, size"""
# read header
- maxb = 512 # should really be enough, cgit uses 8192 I believe
+ # should really be enough, cgit uses 8192 I believe
+ # And for good reason !! This needs to be that high for the header to be read correctly in all cases
+ maxb = 8192
self._s = maxb
hdr = self.read(maxb)
hdrend = hdr.find(NULL_BYTE)
@@ -243,7 +245,7 @@ class DecompressMemMapReader(LazyMixin):
# moving the window into the memory map along as we decompress, which keeps
# the tail smaller than our chunk-size. This causes 'only' the chunk to be
# copied once, and another copy of a part of it when it creates the unconsumed
- # tail. We have to use it to hand in the appropriate amount of bytes durin g
+ # tail. We have to use it to hand in the appropriate amount of bytes during
# the next read.
tail = self._zip.unconsumed_tail
if tail:
@@ -284,6 +286,7 @@ class DecompressMemMapReader(LazyMixin):
else:
unused_datalen = len(self._zip.unconsumed_tail) + len(self._zip.unused_data)
# end handle very special case ...
+
self._cbr += len(indata) - unused_datalen
self._br += len(dcompdat)