summaryrefslogtreecommitdiff
path: root/git/test/performance
diff options
context:
space:
mode:
Diffstat (limited to 'git/test/performance')
-rw-r--r--git/test/performance/db/__init__.py1
-rw-r--r--git/test/performance/db/looseodb_impl.py65
-rw-r--r--git/test/performance/db/odb_impl.py34
-rw-r--r--git/test/performance/db/packedodb_impl.py38
-rw-r--r--git/test/performance/db/test_looseodb_cmd.py7
-rw-r--r--git/test/performance/db/test_looseodb_dulwich.py4
-rw-r--r--git/test/performance/db/test_looseodb_pure.py2
-rw-r--r--git/test/performance/db/test_looseodb_pygit2.py4
-rw-r--r--git/test/performance/db/test_odb_cmd.py2
-rw-r--r--git/test/performance/db/test_odb_dulwich.py4
-rw-r--r--git/test/performance/db/test_odb_pure.py2
-rw-r--r--git/test/performance/db/test_odb_pygit2.py4
-rw-r--r--git/test/performance/db/test_packedodb_pure.py37
-rw-r--r--git/test/performance/lib.py32
-rw-r--r--git/test/performance/objects/__init__.py1
-rw-r--r--git/test/performance/objects/test_commit.py47
-rw-r--r--git/test/performance/test_utils.py87
17 files changed, 198 insertions, 173 deletions
diff --git a/git/test/performance/db/__init__.py b/git/test/performance/db/__init__.py
index 8b137891..e69de29b 100644
--- a/git/test/performance/db/__init__.py
+++ b/git/test/performance/db/__init__.py
@@ -1 +0,0 @@
-
diff --git a/git/test/performance/db/looseodb_impl.py b/git/test/performance/db/looseodb_impl.py
index 1da69945..6cdbaa32 100644
--- a/git/test/performance/db/looseodb_impl.py
+++ b/git/test/performance/db/looseodb_impl.py
@@ -4,9 +4,9 @@ from git.base import *
from git.stream import *
from async import ChannelThreadTask
from git.util import (
- pool,
- bin_to_hex
- )
+ pool,
+ bin_to_hex
+)
import os
import sys
from time import time
@@ -15,7 +15,7 @@ from git.test.lib import (
GlobalsItemDeletorMetaCls,
make_memory_file,
with_rw_repo
- )
+)
from git.test.performance.lib import TestBigRepoR
@@ -32,16 +32,18 @@ def read_chunked_stream(stream):
# END read stream loop
assert total == stream.size
return stream
-
-
+
+
class TestStreamReader(ChannelThreadTask):
+
"""Expects input streams and reads them in chunks. It will read one at a time,
requireing a queue chunk of size 1"""
+
def __init__(self, *args):
super(TestStreamReader, self).__init__(*args)
self.fun = read_chunked_stream
self.max_chunksize = 1
-
+
#} END utilities
@@ -51,29 +53,29 @@ class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls):
class TestLooseDBWPerformanceBase(TestBigRepoR):
__metaclass__ = PerfBaseDeletorMetaClass
-
- large_data_size_bytes = 1000*1000*10 # some MiB should do it
- moderate_data_size_bytes = 1000*1000*1 # just 1 MiB
-
+
+ large_data_size_bytes = 1000 * 1000 * 10 # some MiB should do it
+ moderate_data_size_bytes = 1000 * 1000 * 1 # just 1 MiB
+
#{ Configuration
LooseODBCls = None
#} END configuration
-
+
@classmethod
def setUp(cls):
super(TestLooseDBWPerformanceBase, cls).setUp()
if cls.LooseODBCls is None:
raise AssertionError("LooseODBCls must be set in subtype")
- #END assert configuration
+ # END assert configuration
# currently there is no additional configuration
-
+
@with_rw_repo("HEAD")
def test_large_data_streaming(self, rwrepo):
# TODO: This part overlaps with the same file in git.test.performance.test_stream
# It should be shared if possible
objects_path = rwrepo.db_path('')
ldb = self.LooseODBCls(objects_path)
-
+
for randomize in range(2):
desc = (randomize and 'random ') or ''
print >> sys.stderr, "Creating %s data ..." % desc
@@ -81,8 +83,8 @@ class TestLooseDBWPerformanceBase(TestBigRepoR):
size, stream = make_memory_file(self.large_data_size_bytes, randomize)
elapsed = time() - st
print >> sys.stderr, "Done (in %f s)" % elapsed
-
- # writing - due to the compression it will seem faster than it is
+
+ # writing - due to the compression it will seem faster than it is
st = time()
binsha = ldb.store(IStream('blob', size, stream)).binsha
elapsed_add = time() - st
@@ -90,24 +92,24 @@ class TestLooseDBWPerformanceBase(TestBigRepoR):
hexsha = bin_to_hex(binsha)
db_file = os.path.join(objects_path, hexsha[:2], hexsha[2:])
fsize_kib = os.path.getsize(db_file) / 1000
-
-
+
size_kib = size / 1000
- print >> sys.stderr, "%s: Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (self.LooseODBCls.__name__, size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
-
+ print >> sys.stderr, "%s: Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (
+ self.LooseODBCls.__name__, size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
+
# reading all at once
st = time()
ostream = ldb.stream(binsha)
shadata = ostream.read()
elapsed_readall = time() - st
-
+
stream.seek(0)
assert shadata == stream.getvalue()
- print >> sys.stderr, "%s: Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
-
-
+ print >> sys.stderr, "%s: Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (
+ self.LooseODBCls.__name__, size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
+
# reading in chunks of 1 MiB
- cs = 512*1000
+ cs = 512 * 1000
chunks = list()
st = time()
ostream = ldb.stream(binsha)
@@ -118,15 +120,14 @@ class TestLooseDBWPerformanceBase(TestBigRepoR):
break
# END read in chunks
elapsed_readchunks = time() - st
-
+
stream.seek(0)
assert ''.join(chunks) == stream.getvalue()
-
+
cs_kib = cs / 1000
- print >> sys.stderr, "%s: Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks)
-
+ print >> sys.stderr, "%s: Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (
+ self.LooseODBCls.__name__, size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks)
+
# del db file so git has something to do
os.remove(db_file)
# END for each randomization factor
-
-
diff --git a/git/test/performance/db/odb_impl.py b/git/test/performance/db/odb_impl.py
index 887604c0..afe9a32b 100644
--- a/git/test/performance/db/odb_impl.py
+++ b/git/test/performance/db/odb_impl.py
@@ -7,31 +7,33 @@ import stat
from git.test.performance.lib import (
TestBigRepoR,
GlobalsItemDeletorMetaCls
- )
+)
+
class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls):
ModuleToDelete = 'TestObjDBPerformanceBase'
-
+
class TestObjDBPerformanceBase(TestBigRepoR):
__metaclass__ = PerfBaseDeletorMetaClass
-
- #{ Configuration
+
+ #{ Configuration
RepoCls = None # to be set by subclass
#} END configuration
-
+
def test_random_access_test(self):
repo = self.rorepo
-
+
# GET COMMITS
st = time()
root_commit = repo.commit(self.head_sha_2k)
commits = list(root_commit.traverse())
nc = len(commits)
elapsed = time() - st
-
- print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (type(repo.odb), nc, elapsed, nc / elapsed)
-
+
+ print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (
+ type(repo.odb), nc, elapsed, nc / elapsed)
+
# GET TREES
# walk all trees of all commits
st = time()
@@ -49,9 +51,10 @@ class TestObjDBPerformanceBase(TestBigRepoR):
blobs_per_commit.append(blobs)
# END for each commit
elapsed = time() - st
-
- print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (type(repo.odb), nt, len(commits), elapsed, nt / elapsed)
-
+
+ print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (
+ type(repo.odb), nt, len(commits), elapsed, nt / elapsed)
+
# GET BLOBS
st = time()
nb = 0
@@ -66,7 +69,6 @@ class TestObjDBPerformanceBase(TestBigRepoR):
break
# END for each bloblist
elapsed = time() - st
-
- print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes/1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed)
-
-
+
+ print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (
+ type(repo.odb), nb, data_bytes / 1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed)
diff --git a/git/test/performance/db/packedodb_impl.py b/git/test/performance/db/packedodb_impl.py
index 23d00444..2aaf99a2 100644
--- a/git/test/performance/db/packedodb_impl.py
+++ b/git/test/performance/db/packedodb_impl.py
@@ -4,9 +4,9 @@
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Performance tests for object store"""
from git.test.performance.lib import (
- TestBigRepoR,
+ TestBigRepoR,
GlobalsItemDeletorMetaCls
- )
+)
from git.exc import UnsupportedOperation
@@ -19,31 +19,32 @@ import random
class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls):
ModuleToDelete = 'TestPurePackedODBPerformanceBase'
+
class TestPurePackedODBPerformanceBase(TestBigRepoR):
__metaclass__ = PerfBaseDeletorMetaClass
-
+
#{ Configuration
PackedODBCls = None
#} END configuration
-
+
@classmethod
def setUp(cls):
super(TestPurePackedODBPerformanceBase, cls).setUp()
if cls.PackedODBCls is None:
raise AssertionError("PackedODBCls must be set in subclass")
- #END assert configuration
+ # END assert configuration
cls.ropdb = cls.PackedODBCls(cls.rorepo.db_path("pack"))
-
+
def test_pack_random_access(self):
pdb = self.ropdb
-
+
# sha lookup
st = time()
sha_list = list(pdb.sha_iter())
elapsed = time() - st
ns = len(sha_list)
print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed)
-
+
# sha lookup: best-case and worst case access
pdb_pack_info = pdb._pack_info
# END shuffle shas
@@ -52,13 +53,14 @@ class TestPurePackedODBPerformanceBase(TestBigRepoR):
pdb_pack_info(sha)
# END for each sha to look up
elapsed = time() - st
-
+
# discard cache
del(pdb._entities)
pdb.entities()
- print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed)
+ print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (
+ ns, len(pdb.entities()), elapsed, ns / elapsed)
# END for each random mode
-
+
# query info and streams only
max_items = 10000 # can wait longer when testing memory
for pdb_fun in (pdb.info, pdb.stream):
@@ -66,9 +68,10 @@ class TestPurePackedODBPerformanceBase(TestBigRepoR):
for sha in sha_list[:max_items]:
pdb_fun(sha)
elapsed = time() - st
- print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed)
+ print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (
+ max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed)
# END for each function
-
+
# retrieve stream and read all
max_items = 5000
pdb_stream = pdb.stream
@@ -80,8 +83,9 @@ class TestPurePackedODBPerformanceBase(TestBigRepoR):
total_size += stream.size
elapsed = time() - st
total_kib = total_size / 1000
- print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed)
-
+ print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (
+ max_items, total_kib, total_kib / elapsed, elapsed, max_items / elapsed)
+
def test_correctness(self):
pdb = self.ropdb
# disabled for now as it used to work perfectly, checking big repositories takes a long time
@@ -102,6 +106,6 @@ class TestPurePackedODBPerformanceBase(TestBigRepoR):
# END for each index
# END for each entity
elapsed = time() - st
- print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed)
+ print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (
+ count, crc, elapsed, count / elapsed)
# END for each verify mode
-
diff --git a/git/test/performance/db/test_looseodb_cmd.py b/git/test/performance/db/test_looseodb_cmd.py
index 9147eff6..f96e4c3e 100644
--- a/git/test/performance/db/test_looseodb_cmd.py
+++ b/git/test/performance/db/test_looseodb_cmd.py
@@ -3,9 +3,10 @@ from looseodb_impl import TestLooseDBWPerformanceBase
import sys
+
class TestCmdLooseDB(TestLooseDBWPerformanceBase):
LooseODBCls = CmdCompatibilityGitDB
-
+
def test_info(self):
- sys.stderr.write("This test does not check the write performance of the git command as it is implemented in pure python")
-
+ sys.stderr.write(
+ "This test does not check the write performance of the git command as it is implemented in pure python")
diff --git a/git/test/performance/db/test_looseodb_dulwich.py b/git/test/performance/db/test_looseodb_dulwich.py
index 174be83d..e23327f7 100644
--- a/git/test/performance/db/test_looseodb_dulwich.py
+++ b/git/test/performance/db/test_looseodb_dulwich.py
@@ -2,12 +2,12 @@ try:
from git.db.dulwich.complex import DulwichGitODB
except ImportError:
from git.db.py.complex import PureGitODB as DulwichGitODB
-#END handle import
+# END handle import
from git.test.db.dulwich.lib import DulwichRequiredMetaMixin
from looseodb_impl import TestLooseDBWPerformanceBase
+
class TestPureLooseDB(TestLooseDBWPerformanceBase):
__metaclass__ = DulwichRequiredMetaMixin
LooseODBCls = DulwichGitODB
-
diff --git a/git/test/performance/db/test_looseodb_pure.py b/git/test/performance/db/test_looseodb_pure.py
index bb080612..bc4b54fe 100644
--- a/git/test/performance/db/test_looseodb_pure.py
+++ b/git/test/performance/db/test_looseodb_pure.py
@@ -1,6 +1,6 @@
from git.db.py.loose import PureLooseObjectODB
from looseodb_impl import TestLooseDBWPerformanceBase
+
class TestPureLooseDB(TestLooseDBWPerformanceBase):
LooseODBCls = PureLooseObjectODB
-
diff --git a/git/test/performance/db/test_looseodb_pygit2.py b/git/test/performance/db/test_looseodb_pygit2.py
index a9661111..06ece5c7 100644
--- a/git/test/performance/db/test_looseodb_pygit2.py
+++ b/git/test/performance/db/test_looseodb_pygit2.py
@@ -2,12 +2,12 @@ try:
from git.db.pygit2.complex import Pygit2GitODB
except ImportError:
from git.db.py.complex import PureGitODB as Pygit2GitODB
-#END handle import
+# END handle import
from git.test.db.pygit2.lib import Pygit2RequiredMetaMixin
from looseodb_impl import TestLooseDBWPerformanceBase
+
class TestPureLooseDB(TestLooseDBWPerformanceBase):
__metaclass__ = Pygit2RequiredMetaMixin
LooseODBCls = Pygit2GitODB
-
diff --git a/git/test/performance/db/test_odb_cmd.py b/git/test/performance/db/test_odb_cmd.py
index 37af34fd..a7dcfb0d 100644
--- a/git/test/performance/db/test_odb_cmd.py
+++ b/git/test/performance/db/test_odb_cmd.py
@@ -1,6 +1,6 @@
from git.db.complex import CmdCompatibilityGitDB
from odb_impl import TestObjDBPerformanceBase
+
class TestCmdDB(TestObjDBPerformanceBase):
RepoCls = CmdCompatibilityGitDB
-
diff --git a/git/test/performance/db/test_odb_dulwich.py b/git/test/performance/db/test_odb_dulwich.py
index 33abc88c..a5b8e57c 100644
--- a/git/test/performance/db/test_odb_dulwich.py
+++ b/git/test/performance/db/test_odb_dulwich.py
@@ -2,12 +2,12 @@ try:
from git.db.dulwich.complex import DulwichCompatibilityGitDB
except ImportError:
from git.db.complex import PureCompatibilityGitDB as DulwichCompatibilityGitDB
-#END handle dulwich compatibility
+# END handle dulwich compatibility
from git.test.db.dulwich.lib import DulwichRequiredMetaMixin
from odb_impl import TestObjDBPerformanceBase
+
class TestDulwichDB(TestObjDBPerformanceBase):
__metaclass__ = DulwichRequiredMetaMixin
RepoCls = DulwichCompatibilityGitDB
-
diff --git a/git/test/performance/db/test_odb_pure.py b/git/test/performance/db/test_odb_pure.py
index 93139c57..48c42659 100644
--- a/git/test/performance/db/test_odb_pure.py
+++ b/git/test/performance/db/test_odb_pure.py
@@ -1,6 +1,6 @@
from git.db.complex import PureCompatibilityGitDB
from odb_impl import TestObjDBPerformanceBase
+
class TestPureDB(TestObjDBPerformanceBase):
RepoCls = PureCompatibilityGitDB
-
diff --git a/git/test/performance/db/test_odb_pygit2.py b/git/test/performance/db/test_odb_pygit2.py
index c5911ae3..f44bfac8 100644
--- a/git/test/performance/db/test_odb_pygit2.py
+++ b/git/test/performance/db/test_odb_pygit2.py
@@ -2,12 +2,12 @@ try:
from git.db.pygit2.complex import Pygit2CompatibilityGitDB
except ImportError:
from git.db.complex import PureCompatibilityGitDB as Pygit2CompatibilityGitDB
-#END handle pygit2 compatibility
+# END handle pygit2 compatibility
from git.test.db.pygit2.lib import Pygit2RequiredMetaMixin
from odb_impl import TestObjDBPerformanceBase
+
class TestPygit2DB(TestObjDBPerformanceBase):
__metaclass__ = Pygit2RequiredMetaMixin
RepoCls = Pygit2CompatibilityGitDB
-
diff --git a/git/test/performance/db/test_packedodb_pure.py b/git/test/performance/db/test_packedodb_pure.py
index 90e8381f..94099b83 100644
--- a/git/test/performance/db/test_packedodb_pure.py
+++ b/git/test/performance/db/test_packedodb_pure.py
@@ -18,25 +18,27 @@ from nose import SkipTest
class CountedNullStream(NullStream):
__slots__ = '_bw'
+
def __init__(self):
self._bw = 0
-
+
def bytes_written(self):
return self._bw
-
+
def write(self, d):
self._bw += NullStream.write(self, d)
-
+
class TestPurePackedODB(TestPurePackedODBPerformanceBase):
#{ Configuration
PackedODBCls = PurePackedODB
#} END configuration
-
+
def test_pack_writing_note(self):
- sys.stderr.write("test_pack_writing should be adjusted to support different databases to read from - see test for more info")
+ sys.stderr.write(
+ "test_pack_writing should be adjusted to support different databases to read from - see test for more info")
raise SkipTest()
-
+
def test_pack_writing(self):
# see how fast we can write a pack from object streams.
# This will not be fast, as we take time for decompressing the streams as well
@@ -44,7 +46,7 @@ class TestPurePackedODB(TestPurePackedODBPerformanceBase):
ostream = CountedNullStream()
# NOTE: We use the same repo twice to see whether OS caching helps
for rorepo in (self.rorepo, self.rorepo, self.ropdb):
-
+
ni = 5000
count = 0
total_size = 0
@@ -54,22 +56,23 @@ class TestPurePackedODB(TestPurePackedODBPerformanceBase):
rorepo.stream(sha)
if count == ni:
break
- #END gather objects for pack-writing
+ # END gather objects for pack-writing
elapsed = time() - st
- print >> sys.stderr, "PDB Streaming: Got %i streams from %s by sha in in %f s ( %f streams/s )" % (count, rorepo.__class__.__name__, elapsed, count / elapsed)
-
+ print >> sys.stderr, "PDB Streaming: Got %i streams from %s by sha in in %f s ( %f streams/s )" % (
+ count, rorepo.__class__.__name__, elapsed, count / elapsed)
+
st = time()
PackEntity.write_pack((rorepo.stream(sha) for sha in rorepo.sha_iter()), ostream.write, object_count=ni)
elapsed = time() - st
total_kb = ostream.bytes_written() / 1000
- print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed)
- #END for each rorepo
-
-
+ print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (
+ total_kb, elapsed, total_kb / elapsed)
+ # END for each rorepo
+
def test_stream_reading(self):
raise SkipTest("This test was only used for --with-profile runs")
pdb = self.ropdb
-
+
# streaming only, meant for --with-profile runs
ni = 5000
count = 0
@@ -85,5 +88,5 @@ class TestPurePackedODB(TestPurePackedODBPerformanceBase):
count += 1
elapsed = time() - st
total_kib = total_size / 1000
- print >> sys.stderr, "PDB Streaming: Got %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (ni, total_kib, total_kib/elapsed , elapsed, ni / elapsed)
-
+ print >> sys.stderr, "PDB Streaming: Got %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (
+ ni, total_kib, total_kib / elapsed, elapsed, ni / elapsed)
diff --git a/git/test/performance/lib.py b/git/test/performance/lib.py
index 2772fd7d..d01ef37e 100644
--- a/git/test/performance/lib.py
+++ b/git/test/performance/lib.py
@@ -1,9 +1,9 @@
"""Contains library functions"""
import os
from git.test.lib import (
- TestBase,
- GlobalsItemDeletorMetaCls
- )
+ TestBase,
+ GlobalsItemDeletorMetaCls
+)
import shutil
import tempfile
@@ -26,49 +26,51 @@ def resolve_or_fail(env_var):
#} END utilities
-#{ Base Classes
+#{ Base Classes
class TestBigRepoR(TestBase):
+
"""TestCase providing access to readonly 'big' repositories using the following
member variables:
-
+
* gitrorepo
-
+
* a big read-only git repository
"""
-
+
#{ Invariants
head_sha_2k = '235d521da60e4699e5bd59ac658b5b48bd76ddca'
head_sha_50 = '32347c375250fd470973a5d76185cac718955fd5'
- #} END invariants
-
+ #} END invariants
+
#{ Configuration
RepoCls = Repo
#} END configuration
-
+
@classmethod
def setUp(cls):
super(TestBigRepoR, cls).setUp()
if cls.RepoCls is None:
raise AssertionError("Require RepoCls in class %s to be set" % cls)
- #END assert configuration
+ # END assert configuration
cls.rorepo = cls.RepoCls(resolve_or_fail(k_env_git_repo))
class TestBigRepoRW(TestBigRepoR):
+
"""As above, but provides a big repository that we can write to.
-
+
Provides ``self.rwrepo``"""
-
+
@classmethod
def setUp(cls):
super(TestBigRepoRW, cls).setUp()
dirname = tempfile.mktemp()
os.mkdir(dirname)
cls.rwrepo = cls.rorepo.clone(dirname, shared=True, bare=True)
-
+
@classmethod
def tearDownAll(cls):
shutil.rmtree(cls.rwrepo.working_dir)
-
+
#} END base classes
diff --git a/git/test/performance/objects/__init__.py b/git/test/performance/objects/__init__.py
index 8b137891..e69de29b 100644
--- a/git/test/performance/objects/__init__.py
+++ b/git/test/performance/objects/__init__.py
@@ -1 +0,0 @@
-
diff --git a/git/test/performance/objects/test_commit.py b/git/test/performance/objects/test_commit.py
index e342e6b3..cd8866d3 100644
--- a/git/test/performance/objects/test_commit.py
+++ b/git/test/performance/objects/test_commit.py
@@ -12,8 +12,9 @@ from cStringIO import StringIO
from time import time
import sys
+
class TestPerformance(TestBigRepoRW):
-
+
# ref with about 100 commits in its history
ref_100 = 'v0.99'
@@ -26,15 +27,15 @@ class TestPerformance(TestBigRepoRW):
c.committer_tz_offset
c.message
c.parents
-
+
def test_iteration(self):
no = 0
nc = 0
-
- # find the first commit containing the given path - always do a full
- # iteration ( restricted to the path in question ), but in fact it should
+
+ # find the first commit containing the given path - always do a full
+ # iteration ( restricted to the path in question ), but in fact it should
# return quite a lot of commits, we just take one and hence abort the operation
-
+
st = time()
for c in self.rorepo.iter_commits(self.ref_100):
nc += 1
@@ -46,8 +47,9 @@ class TestPerformance(TestBigRepoRW):
# END for each commit
elapsed_time = time() - st
assert no, "Should have traversed a few objects"
- print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (nc, no, elapsed_time, no/elapsed_time)
-
+ print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (
+ nc, no, elapsed_time, no / elapsed_time)
+
def test_commit_traversal(self):
# bound to cat-file parsing performance
nc = 0
@@ -57,8 +59,8 @@ class TestPerformance(TestBigRepoRW):
self._query_commit_info(c)
# END for each traversed commit
elapsed_time = time() - st
- print >> sys.stderr, "Traversed %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time)
-
+ print >> sys.stderr, "Traversed %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc / elapsed_time)
+
def test_commit_iteration(self):
# bound to stream parsing performance
nc = 0
@@ -68,33 +70,34 @@ class TestPerformance(TestBigRepoRW):
self._query_commit_info(c)
# END for each traversed commit
elapsed_time = time() - st
- print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time)
-
+ print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc / elapsed_time)
+
def test_commit_serialization(self):
assert_commit_serialization(self.rwrepo, self.head_sha_2k, True)
-
+
rwrepo = self.rwrepo
make_object = rwrepo.store
# direct serialization - deserialization can be tested afterwards
# serialization is probably limited on IO
hc = rwrepo.commit(self.head_sha_2k)
-
+
commits = list()
nc = 5000
st = time()
for i in xrange(nc):
- cm = Commit( rwrepo, Commit.NULL_BIN_SHA, hc.tree,
- hc.author, hc.authored_date, hc.author_tz_offset,
- hc.committer, hc.committed_date, hc.committer_tz_offset,
- str(i), parents=hc.parents, encoding=hc.encoding)
-
+ cm = Commit(rwrepo, Commit.NULL_BIN_SHA, hc.tree,
+ hc.author, hc.authored_date, hc.author_tz_offset,
+ hc.committer, hc.committed_date, hc.committer_tz_offset,
+ str(i), parents=hc.parents, encoding=hc.encoding)
+
stream = StringIO()
cm._serialize(stream)
slen = stream.tell()
stream.seek(0)
-
+
cm.binsha = make_object(IStream(Commit.type, slen, stream)).binsha
# END commit creation
elapsed = time() - st
-
- print >> sys.stderr, "Serialized %i commits to loose objects in %f s ( %f commits / s )" % (nc, elapsed, nc / elapsed)
+
+ print >> sys.stderr, "Serialized %i commits to loose objects in %f s ( %f commits / s )" % (
+ nc, elapsed, nc / elapsed)
diff --git a/git/test/performance/test_utils.py b/git/test/performance/test_utils.py
index 8637af48..7db972f7 100644
--- a/git/test/performance/test_utils.py
+++ b/git/test/performance/test_utils.py
@@ -5,33 +5,37 @@ import stat
from lib import (
TestBigRepoR
- )
+)
class TestUtilPerformance(TestBigRepoR):
-
+
def test_access(self):
# compare dict vs. slot access
class Slotty(object):
__slots__ = "attr"
+
def __init__(self):
self.attr = 1
-
+
class Dicty(object):
+
def __init__(self):
self.attr = 1
-
+
class BigSlotty(object):
__slots__ = ('attr', ) + tuple('abcdefghijk')
+
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, 1)
-
+
class BigDicty(object):
+
def __init__(self):
for attr in BigSlotty.__slots__:
setattr(self, attr, 1)
-
+
ni = 1000000
for cls in (Slotty, Dicty, BigSlotty, BigDicty):
cli = cls()
@@ -40,9 +44,10 @@ class TestUtilPerformance(TestBigRepoR):
cli.attr
# END for each access
elapsed = time() - st
- print >> sys.stderr, "Accessed %s.attr %i times in %s s ( %f acc / s)" % (cls.__name__, ni, elapsed, ni / elapsed)
+ print >> sys.stderr, "Accessed %s.attr %i times in %s s ( %f acc / s)" % (
+ cls.__name__, ni, elapsed, ni / elapsed)
# END for each class type
-
+
# check num of sequence-acceses
for cls in (list, tuple):
x = 10
@@ -55,13 +60,14 @@ class TestUtilPerformance(TestBigRepoR):
# END for
elapsed = time() - st
na = ni * 3
- print >> sys.stderr, "Accessed %s[x] %i times in %s s ( %f acc / s)" % (cls.__name__, na, elapsed, na / elapsed)
- # END for each sequence
-
+ print >> sys.stderr, "Accessed %s[x] %i times in %s s ( %f acc / s)" % (
+ cls.__name__, na, elapsed, na / elapsed)
+ # END for each sequence
+
def test_instantiation(self):
ni = 100000
max_num_items = 4
- for mni in range(max_num_items+1):
+ for mni in range(max_num_items + 1):
for cls in (tuple, list):
st = time()
for i in xrange(ni):
@@ -70,71 +76,75 @@ class TestUtilPerformance(TestBigRepoR):
elif mni == 1:
cls((1,))
elif mni == 2:
- cls((1,2))
+ cls((1, 2))
elif mni == 3:
- cls((1,2,3))
+ cls((1, 2, 3))
elif mni == 4:
- cls((1,2,3,4))
+ cls((1, 2, 3, 4))
else:
cls(x for x in xrange(mni))
# END handle empty cls
# END for each item
elapsed = time() - st
- print >> sys.stderr, "Created %i %ss of size %i in %f s ( %f inst / s)" % (ni, cls.__name__, mni, elapsed, ni / elapsed)
+ print >> sys.stderr, "Created %i %ss of size %i in %f s ( %f inst / s)" % (
+ ni, cls.__name__, mni, elapsed, ni / elapsed)
# END for each type
# END for each item count
-
+
# tuple and tuple direct
st = time()
for i in xrange(ni):
- t = (1,2,3,4)
+ t = (1, 2, 3, 4)
# END for each item
elapsed = time() - st
print >> sys.stderr, "Created %i tuples (1,2,3,4) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
-
+
st = time()
for i in xrange(ni):
- t = tuple((1,2,3,4))
+ t = tuple((1, 2, 3, 4))
# END for each item
elapsed = time() - st
print >> sys.stderr, "Created %i tuples tuple((1,2,3,4)) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
-
+
def test_unpacking_vs_indexing(self):
ni = 1000000
- list_items = [1,2,3,4]
- tuple_items = (1,2,3,4)
-
+ list_items = [1, 2, 3, 4]
+ tuple_items = (1, 2, 3, 4)
+
for sequence in (list_items, tuple_items):
st = time()
for i in xrange(ni):
one, two, three, four = sequence
# END for eac iteration
elapsed = time() - st
- print >> sys.stderr, "Unpacked %i %ss of size %i in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
-
+ print >> sys.stderr, "Unpacked %i %ss of size %i in %f s ( %f acc / s)" % (
+ ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+
st = time()
for i in xrange(ni):
one, two, three, four = sequence[0], sequence[1], sequence[2], sequence[3]
# END for eac iteration
elapsed = time() - st
- print >> sys.stderr, "Unpacked %i %ss of size %i individually in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
-
+ print >> sys.stderr, "Unpacked %i %ss of size %i individually in %f s ( %f acc / s)" % (
+ ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+
st = time()
for i in xrange(ni):
one, two = sequence[0], sequence[1]
# END for eac iteration
elapsed = time() - st
- print >> sys.stderr, "Unpacked %i %ss of size %i individually (2 of 4) in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+ print >> sys.stderr, "Unpacked %i %ss of size %i individually (2 of 4) in %f s ( %f acc / s)" % (
+ ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
# END for each sequence
-
+
def test_large_list_vs_iteration(self):
# what costs more: alloc/realloc of lists, or the cpu strain of iterators ?
def slow_iter(ni):
for i in xrange(ni):
yield i
# END slow iter - be closer to the real world
-
- # alloc doesn't play a role here it seems
+
+ # alloc doesn't play a role here it seems
for ni in (500, 1000, 10000, 20000, 40000):
st = time()
for i in list(xrange(ni)):
@@ -142,7 +152,7 @@ class TestUtilPerformance(TestBigRepoR):
# END for each item
elapsed = time() - st
print >> sys.stderr, "Iterated %i items from list in %f s ( %f acc / s)" % (ni, elapsed, ni / elapsed)
-
+
st = time()
for i in slow_iter(ni):
i
@@ -150,22 +160,23 @@ class TestUtilPerformance(TestBigRepoR):
elapsed = time() - st
print >> sys.stderr, "Iterated %i items from iterator in %f s ( %f acc / s)" % (ni, elapsed, ni / elapsed)
# END for each number of iterations
-
+
def test_type_vs_inst_class(self):
class NewType(object):
pass
-
+
# lets see which way is faster
inst = NewType()
-
+
ni = 1000000
st = time()
for i in xrange(ni):
inst.__class__()
# END for each item
elapsed = time() - st
- print >> sys.stderr, "Created %i items using inst.__class__ in %f s ( %f items / s)" % (ni, elapsed, ni / elapsed)
-
+ print >> sys.stderr, "Created %i items using inst.__class__ in %f s ( %f items / s)" % (
+ ni, elapsed, ni / elapsed)
+
st = time()
for i in xrange(ni):
type(inst)()