diff options
| author | Sebastian Thiel <byronimo@gmail.com> | 2014-11-14 12:45:19 +0100 |
|---|---|---|
| committer | Sebastian Thiel <byronimo@gmail.com> | 2014-11-14 12:45:19 +0100 |
| commit | 2f2fe4eea8ba4f47e63a7392a1f27f74f5ee925d (patch) | |
| tree | 176a493d114fab7cc6e930bf318b2339db386cf5 /gitdb/db/mem.py | |
| parent | 81707c606b88e971cc359e3e9f3abeeea2204860 (diff) | |
| parent | 0dcec5a27b341ce58e5ab169f91aa25b2cafec0c (diff) | |
| download | gitdb-0.6.0.tar.gz | |
Merge branch 'py2n3'0.6.0
* python 3 compatibility
* all tests work in py2.6, 2.7, 3.3, 3.4
Diffstat (limited to 'gitdb/db/mem.py')
| -rw-r--r-- | gitdb/db/mem.py | 82 |
1 files changed, 40 insertions, 42 deletions
diff --git a/gitdb/db/mem.py b/gitdb/db/mem.py index b9b2b89..1aa0d51 100644 --- a/gitdb/db/mem.py +++ b/gitdb/db/mem.py @@ -3,27 +3,28 @@ # This module is part of GitDB and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Contains the MemoryDatabase implementation""" -from loose import LooseObjectDB -from base import ( - ObjectDBR, - ObjectDBW - ) +from gitdb.db.loose import LooseObjectDB +from gitdb.db.base import ( + ObjectDBR, + ObjectDBW +) from gitdb.base import ( - OStream, - IStream, - ) + OStream, + IStream, +) from gitdb.exc import ( - BadObject, - UnsupportedOperation - ) + BadObject, + UnsupportedOperation +) + from gitdb.stream import ( - ZippedStoreShaWriter, - DecompressMemMapReader, - ) + ZippedStoreShaWriter, + DecompressMemMapReader, +) -from cStringIO import StringIO +from io import BytesIO __all__ = ("MemoryDB", ) @@ -31,46 +32,40 @@ class MemoryDB(ObjectDBR, ObjectDBW): """A memory database stores everything to memory, providing fast IO and object retrieval. It should be used to buffer results and obtain SHAs before writing it to the actual physical storage, as it allows to query whether object already - exists in the target storage before introducing actual IO - - **Note:** memory is currently not threadsafe, hence the async methods cannot be used - for storing""" - + exists in the target storage before introducing actual IO""" + def __init__(self): super(MemoryDB, self).__init__() self._db = LooseObjectDB("path/doesnt/matter") - + # maps 20 byte shas to their OStream objects self._cache = dict() - + def set_ostream(self, stream): raise UnsupportedOperation("MemoryDB's always stream into memory") - + def store(self, istream): zstream = ZippedStoreShaWriter() self._db.set_ostream(zstream) - + istream = self._db.store(istream) zstream.close() # close to flush zstream.seek(0) - - # don't provide a size, the stream is written in object format, hence the + + # don't provide a size, the stream is written in object format, hence the # header needs decompression - decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False) + decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False) self._cache[istream.binsha] = OStream(istream.binsha, istream.type, istream.size, decomp_stream) - + return istream - - def store_async(self, reader): - raise UnsupportedOperation("MemoryDBs cannot currently be used for async write access") - + def has_object(self, sha): return sha in self._cache def info(self, sha): # we always return streams, which are infos as well return self.stream(sha) - + def stream(self, sha): try: ostream = self._cache[sha] @@ -80,15 +75,18 @@ class MemoryDB(ObjectDBR, ObjectDBW): except KeyError: raise BadObject(sha) # END exception handling - + def size(self): return len(self._cache) - + def sha_iter(self): - return self._cache.iterkeys() - - - #{ Interface + try: + return self._cache.iterkeys() + except AttributeError: + return self._cache.keys() + + + #{ Interface def stream_copy(self, sha_iter, odb): """Copy the streams as identified by sha's yielded by sha_iter into the given odb The streams will be copied directly @@ -100,12 +98,12 @@ class MemoryDB(ObjectDBR, ObjectDBW): if odb.has_object(sha): continue # END check object existance - + ostream = self.stream(sha) # compressed data including header - sio = StringIO(ostream.stream.data()) + sio = BytesIO(ostream.stream.data()) istream = IStream(ostream.type, ostream.size, sio, sha) - + odb.store(istream) count += 1 # END for each sha |
