diff options
| author | Sebastian Thiel <byronimo@gmail.com> | 2014-02-09 20:51:43 +0100 |
|---|---|---|
| committer | Sebastian Thiel <byronimo@gmail.com> | 2014-02-09 20:51:43 +0100 |
| commit | 6576d5503a64d124fd7bcf639cc8955918b3ac43 (patch) | |
| tree | 847028954b05307086eda1782c2e9521c8d67a13 /gitdb/db/mem.py | |
| parent | ea54328ce05abdcb4f23300df51422e62b737f63 (diff) | |
| download | gitdb-6576d5503a64d124fd7bcf639cc8955918b3ac43.tar.gz | |
tabs to spaces
Diffstat (limited to 'gitdb/db/mem.py')
| -rw-r--r-- | gitdb/db/mem.py | 188 |
1 files changed, 94 insertions, 94 deletions
diff --git a/gitdb/db/mem.py b/gitdb/db/mem.py index 5d76c83..b9b2b89 100644 --- a/gitdb/db/mem.py +++ b/gitdb/db/mem.py @@ -5,109 +5,109 @@ """Contains the MemoryDatabase implementation""" from loose import LooseObjectDB from base import ( - ObjectDBR, - ObjectDBW - ) + ObjectDBR, + ObjectDBW + ) from gitdb.base import ( - OStream, - IStream, - ) + OStream, + IStream, + ) from gitdb.exc import ( - BadObject, - UnsupportedOperation - ) + BadObject, + UnsupportedOperation + ) from gitdb.stream import ( - ZippedStoreShaWriter, - DecompressMemMapReader, - ) + ZippedStoreShaWriter, + DecompressMemMapReader, + ) from cStringIO import StringIO __all__ = ("MemoryDB", ) class MemoryDB(ObjectDBR, ObjectDBW): - """A memory database stores everything to memory, providing fast IO and object - retrieval. It should be used to buffer results and obtain SHAs before writing - it to the actual physical storage, as it allows to query whether object already - exists in the target storage before introducing actual IO - - **Note:** memory is currently not threadsafe, hence the async methods cannot be used - for storing""" - - def __init__(self): - super(MemoryDB, self).__init__() - self._db = LooseObjectDB("path/doesnt/matter") - - # maps 20 byte shas to their OStream objects - self._cache = dict() - - def set_ostream(self, stream): - raise UnsupportedOperation("MemoryDB's always stream into memory") - - def store(self, istream): - zstream = ZippedStoreShaWriter() - self._db.set_ostream(zstream) - - istream = self._db.store(istream) - zstream.close() # close to flush - zstream.seek(0) - - # don't provide a size, the stream is written in object format, hence the - # header needs decompression - decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False) - self._cache[istream.binsha] = OStream(istream.binsha, istream.type, istream.size, decomp_stream) - - return istream - - def store_async(self, reader): - raise UnsupportedOperation("MemoryDBs cannot currently be used for async write access") - - def has_object(self, sha): - return sha in self._cache + """A memory database stores everything to memory, providing fast IO and object + retrieval. It should be used to buffer results and obtain SHAs before writing + it to the actual physical storage, as it allows to query whether object already + exists in the target storage before introducing actual IO + + **Note:** memory is currently not threadsafe, hence the async methods cannot be used + for storing""" + + def __init__(self): + super(MemoryDB, self).__init__() + self._db = LooseObjectDB("path/doesnt/matter") + + # maps 20 byte shas to their OStream objects + self._cache = dict() + + def set_ostream(self, stream): + raise UnsupportedOperation("MemoryDB's always stream into memory") + + def store(self, istream): + zstream = ZippedStoreShaWriter() + self._db.set_ostream(zstream) + + istream = self._db.store(istream) + zstream.close() # close to flush + zstream.seek(0) + + # don't provide a size, the stream is written in object format, hence the + # header needs decompression + decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False) + self._cache[istream.binsha] = OStream(istream.binsha, istream.type, istream.size, decomp_stream) + + return istream + + def store_async(self, reader): + raise UnsupportedOperation("MemoryDBs cannot currently be used for async write access") + + def has_object(self, sha): + return sha in self._cache - def info(self, sha): - # we always return streams, which are infos as well - return self.stream(sha) - - def stream(self, sha): - try: - ostream = self._cache[sha] - # rewind stream for the next one to read - ostream.stream.seek(0) - return ostream - except KeyError: - raise BadObject(sha) - # END exception handling - - def size(self): - return len(self._cache) - - def sha_iter(self): - return self._cache.iterkeys() - - - #{ Interface - def stream_copy(self, sha_iter, odb): - """Copy the streams as identified by sha's yielded by sha_iter into the given odb - The streams will be copied directly - **Note:** the object will only be written if it did not exist in the target db - :return: amount of streams actually copied into odb. If smaller than the amount - of input shas, one or more objects did already exist in odb""" - count = 0 - for sha in sha_iter: - if odb.has_object(sha): - continue - # END check object existance - - ostream = self.stream(sha) - # compressed data including header - sio = StringIO(ostream.stream.data()) - istream = IStream(ostream.type, ostream.size, sio, sha) - - odb.store(istream) - count += 1 - # END for each sha - return count - #} END interface + def info(self, sha): + # we always return streams, which are infos as well + return self.stream(sha) + + def stream(self, sha): + try: + ostream = self._cache[sha] + # rewind stream for the next one to read + ostream.stream.seek(0) + return ostream + except KeyError: + raise BadObject(sha) + # END exception handling + + def size(self): + return len(self._cache) + + def sha_iter(self): + return self._cache.iterkeys() + + + #{ Interface + def stream_copy(self, sha_iter, odb): + """Copy the streams as identified by sha's yielded by sha_iter into the given odb + The streams will be copied directly + **Note:** the object will only be written if it did not exist in the target db + :return: amount of streams actually copied into odb. If smaller than the amount + of input shas, one or more objects did already exist in odb""" + count = 0 + for sha in sha_iter: + if odb.has_object(sha): + continue + # END check object existance + + ostream = self.stream(sha) + # compressed data including header + sio = StringIO(ostream.stream.data()) + istream = IStream(ostream.type, ostream.size, sio, sha) + + odb.store(istream) + count += 1 + # END for each sha + return count + #} END interface |
