diff options
| author | Sebastian Thiel <byronimo@gmail.com> | 2011-07-05 16:40:54 +0200 |
|---|---|---|
| committer | Sebastian Thiel <byronimo@gmail.com> | 2011-07-05 16:40:54 +0200 |
| commit | ef5dc3d968b3aeed16a02ec705f89b72ad46fa84 (patch) | |
| tree | 9e203cd7a994a6ee10c91e8f19f238c62b856e23 /gitdb/test | |
| parent | 0e64168dd3f43b02857e60183d40c86480f01dc7 (diff) | |
| download | gitdb-ef5dc3d968b3aeed16a02ec705f89b72ad46fa84.tar.gz | |
Optimized test_pack_streaming not to cache the objects anymore. Instead an iterator is provided which does the job. Previously it would easily use 750 MB of ram to keep all the associated objects, more than 350k. Still a lot of memory for just 350k objects, but its python after all
Diffstat (limited to 'gitdb/test')
| -rw-r--r-- | gitdb/test/performance/test_pack_streaming.py | 5 |
1 files changed, 2 insertions, 3 deletions
diff --git a/gitdb/test/performance/test_pack_streaming.py b/gitdb/test/performance/test_pack_streaming.py index 795ed1e..3c40ed0 100644 --- a/gitdb/test/performance/test_pack_streaming.py +++ b/gitdb/test/performance/test_pack_streaming.py @@ -40,10 +40,9 @@ class TestPackStreamingPerformance(TestBigRepoR): count = 0 total_size = 0 st = time() - objs = list() for sha in pdb.sha_iter(): count += 1 - objs.append(pdb.stream(sha)) + pdb.stream(sha) if count == ni: break #END gather objects for pack-writing @@ -51,7 +50,7 @@ class TestPackStreamingPerformance(TestBigRepoR): print >> sys.stderr, "PDB Streaming: Got %i streams by sha in in %f s ( %f streams/s )" % (ni, elapsed, ni / elapsed) st = time() - PackEntity.write_pack(objs, ostream.write) + PackEntity.write_pack((pdb.stream(sha) for sha in pdb.sha_iter()), ostream.write, object_count=ni) elapsed = time() - st total_kb = ostream.bytes_written() / 1000 print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed) |
