summaryrefslogtreecommitdiff
path: root/git/test/performance
diff options
context:
space:
mode:
Diffstat (limited to 'git/test/performance')
-rw-r--r--git/test/performance/db/looseodb_impl.py210
-rw-r--r--git/test/performance/db/odb_impl.py122
-rw-r--r--git/test/performance/db/packedodb_impl.py178
-rw-r--r--git/test/performance/db/test_looseodb_cmd.py10
-rw-r--r--git/test/performance/db/test_looseodb_dulwich.py10
-rw-r--r--git/test/performance/db/test_looseodb_pure.py4
-rw-r--r--git/test/performance/db/test_looseodb_pygit2.py10
-rw-r--r--git/test/performance/db/test_odb_cmd.py4
-rw-r--r--git/test/performance/db/test_odb_dulwich.py10
-rw-r--r--git/test/performance/db/test_odb_pure.py4
-rw-r--r--git/test/performance/db/test_odb_pygit2.py10
-rw-r--r--git/test/performance/db/test_packedodb_pure.py136
-rw-r--r--git/test/performance/lib.py94
-rw-r--r--git/test/performance/objects/test_commit.py168
-rw-r--r--git/test/performance/test_utils.py330
15 files changed, 650 insertions, 650 deletions
diff --git a/git/test/performance/db/looseodb_impl.py b/git/test/performance/db/looseodb_impl.py
index 6d3c1fa6..1da69945 100644
--- a/git/test/performance/db/looseodb_impl.py
+++ b/git/test/performance/db/looseodb_impl.py
@@ -4,18 +4,18 @@ from git.base import *
from git.stream import *
from async import ChannelThreadTask
from git.util import (
- pool,
- bin_to_hex
- )
+ pool,
+ bin_to_hex
+ )
import os
import sys
from time import time
from git.test.lib import (
- GlobalsItemDeletorMetaCls,
- make_memory_file,
- with_rw_repo
- )
+ GlobalsItemDeletorMetaCls,
+ make_memory_file,
+ with_rw_repo
+ )
from git.test.performance.lib import TestBigRepoR
@@ -23,110 +23,110 @@ from git.test.performance.lib import TestBigRepoR
#{ Utilities
def read_chunked_stream(stream):
- total = 0
- while True:
- chunk = stream.read(chunk_size)
- total += len(chunk)
- if len(chunk) < chunk_size:
- break
- # END read stream loop
- assert total == stream.size
- return stream
-
-
+ total = 0
+ while True:
+ chunk = stream.read(chunk_size)
+ total += len(chunk)
+ if len(chunk) < chunk_size:
+ break
+ # END read stream loop
+ assert total == stream.size
+ return stream
+
+
class TestStreamReader(ChannelThreadTask):
- """Expects input streams and reads them in chunks. It will read one at a time,
- requireing a queue chunk of size 1"""
- def __init__(self, *args):
- super(TestStreamReader, self).__init__(*args)
- self.fun = read_chunked_stream
- self.max_chunksize = 1
-
+ """Expects input streams and reads them in chunks. It will read one at a time,
+ requireing a queue chunk of size 1"""
+ def __init__(self, *args):
+ super(TestStreamReader, self).__init__(*args)
+ self.fun = read_chunked_stream
+ self.max_chunksize = 1
+
#} END utilities
class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls):
- ModuleToDelete = 'TestLooseDBWPerformanceBase'
+ ModuleToDelete = 'TestLooseDBWPerformanceBase'
class TestLooseDBWPerformanceBase(TestBigRepoR):
- __metaclass__ = PerfBaseDeletorMetaClass
-
- large_data_size_bytes = 1000*1000*10 # some MiB should do it
- moderate_data_size_bytes = 1000*1000*1 # just 1 MiB
-
- #{ Configuration
- LooseODBCls = None
- #} END configuration
-
- @classmethod
- def setUpAll(cls):
- super(TestLooseDBWPerformanceBase, cls).setUpAll()
- if cls.LooseODBCls is None:
- raise AssertionError("LooseODBCls must be set in subtype")
- #END assert configuration
- # currently there is no additional configuration
-
- @with_rw_repo("HEAD")
- def test_large_data_streaming(self, rwrepo):
- # TODO: This part overlaps with the same file in git.test.performance.test_stream
- # It should be shared if possible
- objects_path = rwrepo.db_path('')
- ldb = self.LooseODBCls(objects_path)
-
- for randomize in range(2):
- desc = (randomize and 'random ') or ''
- print >> sys.stderr, "Creating %s data ..." % desc
- st = time()
- size, stream = make_memory_file(self.large_data_size_bytes, randomize)
- elapsed = time() - st
- print >> sys.stderr, "Done (in %f s)" % elapsed
-
- # writing - due to the compression it will seem faster than it is
- st = time()
- binsha = ldb.store(IStream('blob', size, stream)).binsha
- elapsed_add = time() - st
- assert ldb.has_object(binsha)
- hexsha = bin_to_hex(binsha)
- db_file = os.path.join(objects_path, hexsha[:2], hexsha[2:])
- fsize_kib = os.path.getsize(db_file) / 1000
-
-
- size_kib = size / 1000
- print >> sys.stderr, "%s: Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (self.LooseODBCls.__name__, size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
-
- # reading all at once
- st = time()
- ostream = ldb.stream(binsha)
- shadata = ostream.read()
- elapsed_readall = time() - st
-
- stream.seek(0)
- assert shadata == stream.getvalue()
- print >> sys.stderr, "%s: Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
-
-
- # reading in chunks of 1 MiB
- cs = 512*1000
- chunks = list()
- st = time()
- ostream = ldb.stream(binsha)
- while True:
- data = ostream.read(cs)
- chunks.append(data)
- if len(data) < cs:
- break
- # END read in chunks
- elapsed_readchunks = time() - st
-
- stream.seek(0)
- assert ''.join(chunks) == stream.getvalue()
-
- cs_kib = cs / 1000
- print >> sys.stderr, "%s: Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks)
-
- # del db file so git has something to do
- os.remove(db_file)
- # END for each randomization factor
-
+ __metaclass__ = PerfBaseDeletorMetaClass
+
+ large_data_size_bytes = 1000*1000*10 # some MiB should do it
+ moderate_data_size_bytes = 1000*1000*1 # just 1 MiB
+
+ #{ Configuration
+ LooseODBCls = None
+ #} END configuration
+
+ @classmethod
+ def setUp(cls):
+ super(TestLooseDBWPerformanceBase, cls).setUp()
+ if cls.LooseODBCls is None:
+ raise AssertionError("LooseODBCls must be set in subtype")
+ #END assert configuration
+ # currently there is no additional configuration
+
+ @with_rw_repo("HEAD")
+ def test_large_data_streaming(self, rwrepo):
+ # TODO: This part overlaps with the same file in git.test.performance.test_stream
+ # It should be shared if possible
+ objects_path = rwrepo.db_path('')
+ ldb = self.LooseODBCls(objects_path)
+
+ for randomize in range(2):
+ desc = (randomize and 'random ') or ''
+ print >> sys.stderr, "Creating %s data ..." % desc
+ st = time()
+ size, stream = make_memory_file(self.large_data_size_bytes, randomize)
+ elapsed = time() - st
+ print >> sys.stderr, "Done (in %f s)" % elapsed
+
+ # writing - due to the compression it will seem faster than it is
+ st = time()
+ binsha = ldb.store(IStream('blob', size, stream)).binsha
+ elapsed_add = time() - st
+ assert ldb.has_object(binsha)
+ hexsha = bin_to_hex(binsha)
+ db_file = os.path.join(objects_path, hexsha[:2], hexsha[2:])
+ fsize_kib = os.path.getsize(db_file) / 1000
+
+
+ size_kib = size / 1000
+ print >> sys.stderr, "%s: Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (self.LooseODBCls.__name__, size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
+
+ # reading all at once
+ st = time()
+ ostream = ldb.stream(binsha)
+ shadata = ostream.read()
+ elapsed_readall = time() - st
+
+ stream.seek(0)
+ assert shadata == stream.getvalue()
+ print >> sys.stderr, "%s: Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
+
+
+ # reading in chunks of 1 MiB
+ cs = 512*1000
+ chunks = list()
+ st = time()
+ ostream = ldb.stream(binsha)
+ while True:
+ data = ostream.read(cs)
+ chunks.append(data)
+ if len(data) < cs:
+ break
+ # END read in chunks
+ elapsed_readchunks = time() - st
+
+ stream.seek(0)
+ assert ''.join(chunks) == stream.getvalue()
+
+ cs_kib = cs / 1000
+ print >> sys.stderr, "%s: Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks)
+
+ # del db file so git has something to do
+ os.remove(db_file)
+ # END for each randomization factor
+
diff --git a/git/test/performance/db/odb_impl.py b/git/test/performance/db/odb_impl.py
index 677cf6a8..887604c0 100644
--- a/git/test/performance/db/odb_impl.py
+++ b/git/test/performance/db/odb_impl.py
@@ -5,68 +5,68 @@ import sys
import stat
from git.test.performance.lib import (
- TestBigRepoR,
- GlobalsItemDeletorMetaCls
- )
+ TestBigRepoR,
+ GlobalsItemDeletorMetaCls
+ )
class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls):
- ModuleToDelete = 'TestObjDBPerformanceBase'
-
+ ModuleToDelete = 'TestObjDBPerformanceBase'
+
class TestObjDBPerformanceBase(TestBigRepoR):
- __metaclass__ = PerfBaseDeletorMetaClass
-
- #{ Configuration
- RepoCls = None # to be set by subclass
- #} END configuration
-
- def test_random_access_test(self):
- repo = self.rorepo
-
- # GET COMMITS
- st = time()
- root_commit = repo.commit(self.head_sha_2k)
- commits = list(root_commit.traverse())
- nc = len(commits)
- elapsed = time() - st
-
- print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (type(repo.odb), nc, elapsed, nc / elapsed)
-
- # GET TREES
- # walk all trees of all commits
- st = time()
- blobs_per_commit = list()
- nt = 0
- for commit in commits:
- tree = commit.tree
- blobs = list()
- for item in tree.traverse():
- nt += 1
- if item.type == 'blob':
- blobs.append(item)
- # direct access for speed
- # END while trees are there for walking
- blobs_per_commit.append(blobs)
- # END for each commit
- elapsed = time() - st
-
- print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (type(repo.odb), nt, len(commits), elapsed, nt / elapsed)
-
- # GET BLOBS
- st = time()
- nb = 0
- too_many = 15000
- data_bytes = 0
- for blob_list in blobs_per_commit:
- for blob in blob_list:
- data_bytes += len(blob.data_stream.read())
- # END for each blobsha
- nb += len(blob_list)
- if nb > too_many:
- break
- # END for each bloblist
- elapsed = time() - st
-
- print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes/1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed)
-
-
+ __metaclass__ = PerfBaseDeletorMetaClass
+
+ #{ Configuration
+ RepoCls = None # to be set by subclass
+ #} END configuration
+
+ def test_random_access_test(self):
+ repo = self.rorepo
+
+ # GET COMMITS
+ st = time()
+ root_commit = repo.commit(self.head_sha_2k)
+ commits = list(root_commit.traverse())
+ nc = len(commits)
+ elapsed = time() - st
+
+ print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (type(repo.odb), nc, elapsed, nc / elapsed)
+
+ # GET TREES
+ # walk all trees of all commits
+ st = time()
+ blobs_per_commit = list()
+ nt = 0
+ for commit in commits:
+ tree = commit.tree
+ blobs = list()
+ for item in tree.traverse():
+ nt += 1
+ if item.type == 'blob':
+ blobs.append(item)
+ # direct access for speed
+ # END while trees are there for walking
+ blobs_per_commit.append(blobs)
+ # END for each commit
+ elapsed = time() - st
+
+ print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (type(repo.odb), nt, len(commits), elapsed, nt / elapsed)
+
+ # GET BLOBS
+ st = time()
+ nb = 0
+ too_many = 15000
+ data_bytes = 0
+ for blob_list in blobs_per_commit:
+ for blob in blob_list:
+ data_bytes += len(blob.data_stream.read())
+ # END for each blobsha
+ nb += len(blob_list)
+ if nb > too_many:
+ break
+ # END for each bloblist
+ elapsed = time() - st
+
+ print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes/1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed)
+
+
diff --git a/git/test/performance/db/packedodb_impl.py b/git/test/performance/db/packedodb_impl.py
index b95a8d13..23d00444 100644
--- a/git/test/performance/db/packedodb_impl.py
+++ b/git/test/performance/db/packedodb_impl.py
@@ -4,9 +4,9 @@
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Performance tests for object store"""
from git.test.performance.lib import (
- TestBigRepoR,
- GlobalsItemDeletorMetaCls
- )
+ TestBigRepoR,
+ GlobalsItemDeletorMetaCls
+ )
from git.exc import UnsupportedOperation
@@ -17,91 +17,91 @@ import random
class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls):
- ModuleToDelete = 'TestPurePackedODBPerformanceBase'
+ ModuleToDelete = 'TestPurePackedODBPerformanceBase'
class TestPurePackedODBPerformanceBase(TestBigRepoR):
- __metaclass__ = PerfBaseDeletorMetaClass
-
- #{ Configuration
- PackedODBCls = None
- #} END configuration
-
- @classmethod
- def setUpAll(cls):
- super(TestPurePackedODBPerformanceBase, cls).setUpAll()
- if cls.PackedODBCls is None:
- raise AssertionError("PackedODBCls must be set in subclass")
- #END assert configuration
- cls.ropdb = cls.PackedODBCls(cls.rorepo.db_path("pack"))
-
- def test_pack_random_access(self):
- pdb = self.ropdb
-
- # sha lookup
- st = time()
- sha_list = list(pdb.sha_iter())
- elapsed = time() - st
- ns = len(sha_list)
- print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed)
-
- # sha lookup: best-case and worst case access
- pdb_pack_info = pdb._pack_info
- # END shuffle shas
- st = time()
- for sha in sha_list:
- pdb_pack_info(sha)
- # END for each sha to look up
- elapsed = time() - st
-
- # discard cache
- del(pdb._entities)
- pdb.entities()
- print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed)
- # END for each random mode
-
- # query info and streams only
- max_items = 10000 # can wait longer when testing memory
- for pdb_fun in (pdb.info, pdb.stream):
- st = time()
- for sha in sha_list[:max_items]:
- pdb_fun(sha)
- elapsed = time() - st
- print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed)
- # END for each function
-
- # retrieve stream and read all
- max_items = 5000
- pdb_stream = pdb.stream
- total_size = 0
- st = time()
- for sha in sha_list[:max_items]:
- stream = pdb_stream(sha)
- stream.read()
- total_size += stream.size
- elapsed = time() - st
- total_kib = total_size / 1000
- print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed)
-
- def test_correctness(self):
- pdb = self.ropdb
- # disabled for now as it used to work perfectly, checking big repositories takes a long time
- print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)"
- for crc in range(2):
- count = 0
- st = time()
- for entity in pdb.entities():
- pack_verify = entity.is_valid_stream
- sha_by_index = entity.index().sha
- for index in xrange(entity.index().size()):
- try:
- assert pack_verify(sha_by_index(index), use_crc=crc)
- count += 1
- except UnsupportedOperation:
- pass
- # END ignore old indices
- # END for each index
- # END for each entity
- elapsed = time() - st
- print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed)
- # END for each verify mode
-
+ __metaclass__ = PerfBaseDeletorMetaClass
+
+ #{ Configuration
+ PackedODBCls = None
+ #} END configuration
+
+ @classmethod
+ def setUp(cls):
+ super(TestPurePackedODBPerformanceBase, cls).setUp()
+ if cls.PackedODBCls is None:
+ raise AssertionError("PackedODBCls must be set in subclass")
+ #END assert configuration
+ cls.ropdb = cls.PackedODBCls(cls.rorepo.db_path("pack"))
+
+ def test_pack_random_access(self):
+ pdb = self.ropdb
+
+ # sha lookup
+ st = time()
+ sha_list = list(pdb.sha_iter())
+ elapsed = time() - st
+ ns = len(sha_list)
+ print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed)
+
+ # sha lookup: best-case and worst case access
+ pdb_pack_info = pdb._pack_info
+ # END shuffle shas
+ st = time()
+ for sha in sha_list:
+ pdb_pack_info(sha)
+ # END for each sha to look up
+ elapsed = time() - st
+
+ # discard cache
+ del(pdb._entities)
+ pdb.entities()
+ print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed)
+ # END for each random mode
+
+ # query info and streams only
+ max_items = 10000 # can wait longer when testing memory
+ for pdb_fun in (pdb.info, pdb.stream):
+ st = time()
+ for sha in sha_list[:max_items]:
+ pdb_fun(sha)
+ elapsed = time() - st
+ print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed)
+ # END for each function
+
+ # retrieve stream and read all
+ max_items = 5000
+ pdb_stream = pdb.stream
+ total_size = 0
+ st = time()
+ for sha in sha_list[:max_items]:
+ stream = pdb_stream(sha)
+ stream.read()
+ total_size += stream.size
+ elapsed = time() - st
+ total_kib = total_size / 1000
+ print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed)
+
+ def test_correctness(self):
+ pdb = self.ropdb
+ # disabled for now as it used to work perfectly, checking big repositories takes a long time
+ print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)"
+ for crc in range(2):
+ count = 0
+ st = time()
+ for entity in pdb.entities():
+ pack_verify = entity.is_valid_stream
+ sha_by_index = entity.index().sha
+ for index in xrange(entity.index().size()):
+ try:
+ assert pack_verify(sha_by_index(index), use_crc=crc)
+ count += 1
+ except UnsupportedOperation:
+ pass
+ # END ignore old indices
+ # END for each index
+ # END for each entity
+ elapsed = time() - st
+ print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed)
+ # END for each verify mode
+
diff --git a/git/test/performance/db/test_looseodb_cmd.py b/git/test/performance/db/test_looseodb_cmd.py
index 9738278c..9147eff6 100644
--- a/git/test/performance/db/test_looseodb_cmd.py
+++ b/git/test/performance/db/test_looseodb_cmd.py
@@ -4,8 +4,8 @@ from looseodb_impl import TestLooseDBWPerformanceBase
import sys
class TestCmdLooseDB(TestLooseDBWPerformanceBase):
- LooseODBCls = CmdCompatibilityGitDB
-
- def test_info(self):
- sys.stderr.write("This test does not check the write performance of the git command as it is implemented in pure python")
-
+ LooseODBCls = CmdCompatibilityGitDB
+
+ def test_info(self):
+ sys.stderr.write("This test does not check the write performance of the git command as it is implemented in pure python")
+
diff --git a/git/test/performance/db/test_looseodb_dulwich.py b/git/test/performance/db/test_looseodb_dulwich.py
index e123ebf1..174be83d 100644
--- a/git/test/performance/db/test_looseodb_dulwich.py
+++ b/git/test/performance/db/test_looseodb_dulwich.py
@@ -1,13 +1,13 @@
try:
- from git.db.dulwich.complex import DulwichGitODB
+ from git.db.dulwich.complex import DulwichGitODB
except ImportError:
- from git.db.py.complex import PureGitODB as DulwichGitODB
+ from git.db.py.complex import PureGitODB as DulwichGitODB
#END handle import
from git.test.db.dulwich.lib import DulwichRequiredMetaMixin
from looseodb_impl import TestLooseDBWPerformanceBase
class TestPureLooseDB(TestLooseDBWPerformanceBase):
- __metaclass__ = DulwichRequiredMetaMixin
- LooseODBCls = DulwichGitODB
-
+ __metaclass__ = DulwichRequiredMetaMixin
+ LooseODBCls = DulwichGitODB
+
diff --git a/git/test/performance/db/test_looseodb_pure.py b/git/test/performance/db/test_looseodb_pure.py
index 46f39d5e..bb080612 100644
--- a/git/test/performance/db/test_looseodb_pure.py
+++ b/git/test/performance/db/test_looseodb_pure.py
@@ -2,5 +2,5 @@ from git.db.py.loose import PureLooseObjectODB
from looseodb_impl import TestLooseDBWPerformanceBase
class TestPureLooseDB(TestLooseDBWPerformanceBase):
- LooseODBCls = PureLooseObjectODB
-
+ LooseODBCls = PureLooseObjectODB
+
diff --git a/git/test/performance/db/test_looseodb_pygit2.py b/git/test/performance/db/test_looseodb_pygit2.py
index 326af9fb..a9661111 100644
--- a/git/test/performance/db/test_looseodb_pygit2.py
+++ b/git/test/performance/db/test_looseodb_pygit2.py
@@ -1,13 +1,13 @@
try:
- from git.db.pygit2.complex import Pygit2GitODB
+ from git.db.pygit2.complex import Pygit2GitODB
except ImportError:
- from git.db.py.complex import PureGitODB as Pygit2GitODB
+ from git.db.py.complex import PureGitODB as Pygit2GitODB
#END handle import
from git.test.db.pygit2.lib import Pygit2RequiredMetaMixin
from looseodb_impl import TestLooseDBWPerformanceBase
class TestPureLooseDB(TestLooseDBWPerformanceBase):
- __metaclass__ = Pygit2RequiredMetaMixin
- LooseODBCls = Pygit2GitODB
-
+ __metaclass__ = Pygit2RequiredMetaMixin
+ LooseODBCls = Pygit2GitODB
+
diff --git a/git/test/performance/db/test_odb_cmd.py b/git/test/performance/db/test_odb_cmd.py
index acd55cc9..37af34fd 100644
--- a/git/test/performance/db/test_odb_cmd.py
+++ b/git/test/performance/db/test_odb_cmd.py
@@ -2,5 +2,5 @@ from git.db.complex import CmdCompatibilityGitDB
from odb_impl import TestObjDBPerformanceBase
class TestCmdDB(TestObjDBPerformanceBase):
- RepoCls = CmdCompatibilityGitDB
-
+ RepoCls = CmdCompatibilityGitDB
+
diff --git a/git/test/performance/db/test_odb_dulwich.py b/git/test/performance/db/test_odb_dulwich.py
index 6802483c..33abc88c 100644
--- a/git/test/performance/db/test_odb_dulwich.py
+++ b/git/test/performance/db/test_odb_dulwich.py
@@ -1,13 +1,13 @@
try:
- from git.db.dulwich.complex import DulwichCompatibilityGitDB
+ from git.db.dulwich.complex import DulwichCompatibilityGitDB
except ImportError:
- from git.db.complex import PureCompatibilityGitDB as DulwichCompatibilityGitDB
+ from git.db.complex import PureCompatibilityGitDB as DulwichCompatibilityGitDB
#END handle dulwich compatibility
from git.test.db.dulwich.lib import DulwichRequiredMetaMixin
from odb_impl import TestObjDBPerformanceBase
class TestDulwichDB(TestObjDBPerformanceBase):
- __metaclass__ = DulwichRequiredMetaMixin
- RepoCls = DulwichCompatibilityGitDB
-
+ __metaclass__ = DulwichRequiredMetaMixin
+ RepoCls = DulwichCompatibilityGitDB
+
diff --git a/git/test/performance/db/test_odb_pure.py b/git/test/performance/db/test_odb_pure.py
index 6ed3585d..93139c57 100644
--- a/git/test/performance/db/test_odb_pure.py
+++ b/git/test/performance/db/test_odb_pure.py
@@ -2,5 +2,5 @@ from git.db.complex import PureCompatibilityGitDB
from odb_impl import TestObjDBPerformanceBase
class TestPureDB(TestObjDBPerformanceBase):
- RepoCls = PureCompatibilityGitDB
-
+ RepoCls = PureCompatibilityGitDB
+
diff --git a/git/test/performance/db/test_odb_pygit2.py b/git/test/performance/db/test_odb_pygit2.py
index bb7ed8a9..c5911ae3 100644
--- a/git/test/performance/db/test_odb_pygit2.py
+++ b/git/test/performance/db/test_odb_pygit2.py
@@ -1,13 +1,13 @@
try:
- from git.db.pygit2.complex import Pygit2CompatibilityGitDB
+ from git.db.pygit2.complex import Pygit2CompatibilityGitDB
except ImportError:
- from git.db.complex import PureCompatibilityGitDB as Pygit2CompatibilityGitDB
+ from git.db.complex import PureCompatibilityGitDB as Pygit2CompatibilityGitDB
#END handle pygit2 compatibility
from git.test.db.pygit2.lib import Pygit2RequiredMetaMixin
from odb_impl import TestObjDBPerformanceBase
class TestPygit2DB(TestObjDBPerformanceBase):
- __metaclass__ = Pygit2RequiredMetaMixin
- RepoCls = Pygit2CompatibilityGitDB
-
+ __metaclass__ = Pygit2RequiredMetaMixin
+ RepoCls = Pygit2CompatibilityGitDB
+
diff --git a/git/test/performance/db/test_packedodb_pure.py b/git/test/performance/db/test_packedodb_pure.py
index 11497d9d..90e8381f 100644
--- a/git/test/performance/db/test_packedodb_pure.py
+++ b/git/test/performance/db/test_packedodb_pure.py
@@ -17,73 +17,73 @@ from nose import SkipTest
class CountedNullStream(NullStream):
- __slots__ = '_bw'
- def __init__(self):
- self._bw = 0
-
- def bytes_written(self):
- return self._bw
-
- def write(self, d):
- self._bw += NullStream.write(self, d)
-
+ __slots__ = '_bw'
+ def __init__(self):
+ self._bw = 0
+
+ def bytes_written(self):
+ return self._bw
+
+ def write(self, d):
+ self._bw += NullStream.write(self, d)
+
class TestPurePackedODB(TestPurePackedODBPerformanceBase):
- #{ Configuration
- PackedODBCls = PurePackedODB
- #} END configuration
-
- def test_pack_writing_note(self):
- sys.stderr.write("test_pack_writing should be adjusted to support different databases to read from - see test for more info")
- raise SkipTest()
-
- def test_pack_writing(self):
- # see how fast we can write a pack from object streams.
- # This will not be fast, as we take time for decompressing the streams as well
- # For now we test the fast streaming and slow streaming versions manually
- ostream = CountedNullStream()
- # NOTE: We use the same repo twice to see whether OS caching helps
- for rorepo in (self.rorepo, self.rorepo, self.ropdb):
-
- ni = 5000
- count = 0
- total_size = 0
- st = time()
- for sha in rorepo.sha_iter():
- count += 1
- rorepo.stream(sha)
- if count == ni:
- break
- #END gather objects for pack-writing
- elapsed = time() - st
- print >> sys.stderr, "PDB Streaming: Got %i streams from %s by sha in in %f s ( %f streams/s )" % (count, rorepo.__class__.__name__, elapsed, count / elapsed)
-
- st = time()
- PackEntity.write_pack((rorepo.stream(sha) for sha in rorepo.sha_iter()), ostream.write, object_count=ni)
- elapsed = time() - st
- total_kb = ostream.bytes_written() / 1000
- print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed)
- #END for each rorepo
-
-
- def test_stream_reading(self):
- raise SkipTest("This test was only used for --with-profile runs")
- pdb = self.ropdb
-
- # streaming only, meant for --with-profile runs
- ni = 5000
- count = 0
- pdb_stream = pdb.stream
- total_size = 0
- st = time()
- for sha in pdb.sha_iter():
- if count == ni:
- break
- stream = pdb_stream(sha)
- stream.read()
- total_size += stream.size
- count += 1
- elapsed = time() - st
- total_kib = total_size / 1000
- print >> sys.stderr, "PDB Streaming: Got %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (ni, total_kib, total_kib/elapsed , elapsed, ni / elapsed)
-
+ #{ Configuration
+ PackedODBCls = PurePackedODB
+ #} END configuration
+
+ def test_pack_writing_note(self):
+ sys.stderr.write("test_pack_writing should be adjusted to support different databases to read from - see test for more info")
+ raise SkipTest()
+
+ def test_pack_writing(self):
+ # see how fast we can write a pack from object streams.
+ # This will not be fast, as we take time for decompressing the streams as well
+ # For now we test the fast streaming and slow streaming versions manually
+ ostream = CountedNullStream()
+ # NOTE: We use the same repo twice to see whether OS caching helps
+ for rorepo in (self.rorepo, self.rorepo, self.ropdb):
+
+ ni = 5000
+ count = 0
+ total_size = 0
+ st = time()
+ for sha in rorepo.sha_iter():
+ count += 1
+ rorepo.stream(sha)
+ if count == ni:
+ break
+ #END gather objects for pack-writing
+ elapsed = time() - st
+ print >> sys.stderr, "PDB Streaming: Got %i streams from %s by sha in in %f s ( %f streams/s )" % (count, rorepo.__class__.__name__, elapsed, count / elapsed)
+
+ st = time()
+ PackEntity.write_pack((rorepo.stream(sha) for sha in rorepo.sha_iter()), ostream.write, object_count=ni)
+ elapsed = time() - st
+ total_kb = ostream.bytes_written() / 1000
+ print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed)
+ #END for each rorepo
+
+
+ def test_stream_reading(self):
+ raise SkipTest("This test was only used for --with-profile runs")
+ pdb = self.ropdb
+
+ # streaming only, meant for --with-profile runs
+ ni = 5000
+ count = 0
+ pdb_stream = pdb.stream
+ total_size = 0
+ st = time()
+ for sha in pdb.sha_iter():
+ if count == ni:
+ break
+ stream = pdb_stream(sha)
+ stream.read()
+ total_size += stream.size
+ count += 1
+ elapsed = time() - st
+ total_kib = total_size / 1000
+ print >> sys.stderr, "PDB Streaming: Got %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (ni, total_kib, total_kib/elapsed , elapsed, ni / elapsed)
+
diff --git a/git/test/performance/lib.py b/git/test/performance/lib.py
index 758d402d..2772fd7d 100644
--- a/git/test/performance/lib.py
+++ b/git/test/performance/lib.py
@@ -1,9 +1,9 @@
"""Contains library functions"""
import os
from git.test.lib import (
- TestBase,
- GlobalsItemDeletorMetaCls
- )
+ TestBase,
+ GlobalsItemDeletorMetaCls
+ )
import shutil
import tempfile
@@ -16,12 +16,12 @@ k_env_git_repo = "GIT_PYTHON_TEST_GIT_REPO_BASE"
#{ Utilities
def resolve_or_fail(env_var):
- """:return: resolved environment variable or raise EnvironmentError"""
- try:
- return os.environ[env_var]
- except KeyError:
- raise EnvironmentError("Please set the %r envrionment variable and retry" % env_var)
- # END exception handling
+ """:return: resolved environment variable or raise EnvironmentError"""
+ try:
+ return os.environ[env_var]
+ except KeyError:
+ raise EnvironmentError("Please set the %r envrionment variable and retry" % env_var)
+ # END exception handling
#} END utilities
@@ -29,46 +29,46 @@ def resolve_or_fail(env_var):
#{ Base Classes
class TestBigRepoR(TestBase):
- """TestCase providing access to readonly 'big' repositories using the following
- member variables:
-
- * gitrorepo
-
- * a big read-only git repository
+ """TestCase providing access to readonly 'big' repositories using the following
+ member variables:
+
+ * gitrorepo
+
+ * a big read-only git repository
"""
-
- #{ Invariants
- head_sha_2k = '235d521da60e4699e5bd59ac658b5b48bd76ddca'
- head_sha_50 = '32347c375250fd470973a5d76185cac718955fd5'
- #} END invariants
-
- #{ Configuration
- RepoCls = Repo
- #} END configuration
-
- @classmethod
- def setUpAll(cls):
- super(TestBigRepoR, cls).setUpAll()
- if cls.RepoCls is None:
- raise AssertionError("Require RepoCls in class %s to be set" % cls)
- #END assert configuration
- cls.rorepo = cls.RepoCls(resolve_or_fail(k_env_git_repo))
+
+ #{ Invariants
+ head_sha_2k = '235d521da60e4699e5bd59ac658b5b48bd76ddca'
+ head_sha_50 = '32347c375250fd470973a5d76185cac718955fd5'
+ #} END invariants
+
+ #{ Configuration
+ RepoCls = Repo
+ #} END configuration
+
+ @classmethod
+ def setUp(cls):
+ super(TestBigRepoR, cls).setUp()
+ if cls.RepoCls is None:
+ raise AssertionError("Require RepoCls in class %s to be set" % cls)
+ #END assert configuration
+ cls.rorepo = cls.RepoCls(resolve_or_fail(k_env_git_repo))
class TestBigRepoRW(TestBigRepoR):
- """As above, but provides a big repository that we can write to.
-
- Provides ``self.rwrepo``"""
-
- @classmethod
- def setUpAll(cls):
- super(TestBigRepoRW, cls).setUpAll()
- dirname = tempfile.mktemp()
- os.mkdir(dirname)
- cls.rwrepo = cls.rorepo.clone(dirname, shared=True, bare=True)
-
- @classmethod
- def tearDownAll(cls):
- shutil.rmtree(cls.rwrepo.working_dir)
-
+ """As above, but provides a big repository that we can write to.
+
+ Provides ``self.rwrepo``"""
+
+ @classmethod
+ def setUp(cls):
+ super(TestBigRepoRW, cls).setUp()
+ dirname = tempfile.mktemp()
+ os.mkdir(dirname)
+ cls.rwrepo = cls.rorepo.clone(dirname, shared=True, bare=True)
+
+ @classmethod
+ def tearDownAll(cls):
+ shutil.rmtree(cls.rwrepo.working_dir)
+
#} END base classes
diff --git a/git/test/performance/objects/test_commit.py b/git/test/performance/objects/test_commit.py
index 685fba2f..e342e6b3 100644
--- a/git/test/performance/objects/test_commit.py
+++ b/git/test/performance/objects/test_commit.py
@@ -13,88 +13,88 @@ from time import time
import sys
class TestPerformance(TestBigRepoRW):
-
- # ref with about 100 commits in its history
- ref_100 = 'v0.99'
+
+ # ref with about 100 commits in its history
+ ref_100 = 'v0.99'
- def _query_commit_info(self, c):
- c.author
- c.authored_date
- c.author_tz_offset
- c.committer
- c.committed_date
- c.committer_tz_offset
- c.message
- c.parents
-
- def test_iteration(self):
- no = 0
- nc = 0
-
- # find the first commit containing the given path - always do a full
- # iteration ( restricted to the path in question ), but in fact it should
- # return quite a lot of commits, we just take one and hence abort the operation
-
- st = time()
- for c in self.rorepo.iter_commits(self.ref_100):
- nc += 1
- self._query_commit_info(c)
- for obj in c.tree.traverse():
- obj.size
- no += 1
- # END for each object
- # END for each commit
- elapsed_time = time() - st
- assert no, "Should have traversed a few objects"
- print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (nc, no, elapsed_time, no/elapsed_time)
-
- def test_commit_traversal(self):
- # bound to cat-file parsing performance
- nc = 0
- st = time()
- for c in self.rorepo.commit(self.head_sha_2k).traverse(branch_first=False):
- nc += 1
- self._query_commit_info(c)
- # END for each traversed commit
- elapsed_time = time() - st
- print >> sys.stderr, "Traversed %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time)
-
- def test_commit_iteration(self):
- # bound to stream parsing performance
- nc = 0
- st = time()
- for c in Commit.iter_items(self.rorepo, self.head_sha_2k):
- nc += 1
- self._query_commit_info(c)
- # END for each traversed commit
- elapsed_time = time() - st
- print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time)
-
- def test_commit_serialization(self):
- assert_commit_serialization(self.rwrepo, self.head_sha_2k, True)
-
- rwrepo = self.rwrepo
- make_object = rwrepo.store
- # direct serialization - deserialization can be tested afterwards
- # serialization is probably limited on IO
- hc = rwrepo.commit(self.head_sha_2k)
-
- commits = list()
- nc = 5000
- st = time()
- for i in xrange(nc):
- cm = Commit( rwrepo, Commit.NULL_BIN_SHA, hc.tree,
- hc.author, hc.authored_date, hc.author_tz_offset,
- hc.committer, hc.committed_date, hc.committer_tz_offset,
- str(i), parents=hc.parents, encoding=hc.encoding)
-
- stream = StringIO()
- cm._serialize(stream)
- slen = stream.tell()
- stream.seek(0)
-
- cm.binsha = make_object(IStream(Commit.type, slen, stream)).binsha
- # END commit creation
- elapsed = time() - st
-
- print >> sys.stderr, "Serialized %i commits to loose objects in %f s ( %f commits / s )" % (nc, elapsed, nc / elapsed)
+ def _query_commit_info(self, c):
+ c.author
+ c.authored_date
+ c.author_tz_offset
+ c.committer
+ c.committed_date
+ c.committer_tz_offset
+ c.message
+ c.parents
+
+ def test_iteration(self):
+ no = 0
+ nc = 0
+
+ # find the first commit containing the given path - always do a full
+ # iteration ( restricted to the path in question ), but in fact it should
+ # return quite a lot of commits, we just take one and hence abort the operation
+
+ st = time()
+ for c in self.rorepo.iter_commits(self.ref_100):
+ nc += 1
+ self._query_commit_info(c)
+ for obj in c.tree.traverse():
+ obj.size
+ no += 1
+ # END for each object
+ # END for each commit
+ elapsed_time = time() - st
+ assert no, "Should have traversed a few objects"
+ print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (nc, no, elapsed_time, no/elapsed_time)
+
+ def test_commit_traversal(self):
+ # bound to cat-file parsing performance
+ nc = 0
+ st = time()
+ for c in self.rorepo.commit(self.head_sha_2k).traverse(branch_first=False):
+ nc += 1
+ self._query_commit_info(c)
+ # END for each traversed commit
+ elapsed_time = time() - st
+ print >> sys.stderr, "Traversed %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time)
+
+ def test_commit_iteration(self):
+ # bound to stream parsing performance
+ nc = 0
+ st = time()
+ for c in Commit.iter_items(self.rorepo, self.head_sha_2k):
+ nc += 1
+ self._query_commit_info(c)
+ # END for each traversed commit
+ elapsed_time = time() - st
+ print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time)
+
+ def test_commit_serialization(self):
+ assert_commit_serialization(self.rwrepo, self.head_sha_2k, True)
+
+ rwrepo = self.rwrepo
+ make_object = rwrepo.store
+ # direct serialization - deserialization can be tested afterwards
+ # serialization is probably limited on IO
+ hc = rwrepo.commit(self.head_sha_2k)
+
+ commits = list()
+ nc = 5000
+ st = time()
+ for i in xrange(nc):
+ cm = Commit( rwrepo, Commit.NULL_BIN_SHA, hc.tree,
+ hc.author, hc.authored_date, hc.author_tz_offset,
+ hc.committer, hc.committed_date, hc.committer_tz_offset,
+ str(i), parents=hc.parents, encoding=hc.encoding)
+
+ stream = StringIO()
+ cm._serialize(stream)
+ slen = stream.tell()
+ stream.seek(0)
+
+ cm.binsha = make_object(IStream(Commit.type, slen, stream)).binsha
+ # END commit creation
+ elapsed = time() - st
+
+ print >> sys.stderr, "Serialized %i commits to loose objects in %f s ( %f commits / s )" % (nc, elapsed, nc / elapsed)
diff --git a/git/test/performance/test_utils.py b/git/test/performance/test_utils.py
index 19c1e84a..8637af48 100644
--- a/git/test/performance/test_utils.py
+++ b/git/test/performance/test_utils.py
@@ -4,171 +4,171 @@ import sys
import stat
from lib import (
- TestBigRepoR
- )
+ TestBigRepoR
+ )
class TestUtilPerformance(TestBigRepoR):
-
- def test_access(self):
- # compare dict vs. slot access
- class Slotty(object):
- __slots__ = "attr"
- def __init__(self):
- self.attr = 1
-
- class Dicty(object):
- def __init__(self):
- self.attr = 1
-
- class BigSlotty(object):
- __slots__ = ('attr', ) + tuple('abcdefghijk')
- def __init__(self):
- for attr in self.__slots__:
- setattr(self, attr, 1)
-
- class BigDicty(object):
- def __init__(self):
- for attr in BigSlotty.__slots__:
- setattr(self, attr, 1)
-
- ni = 1000000
- for cls in (Slotty, Dicty, BigSlotty, BigDicty):
- cli = cls()
- st = time()
- for i in xrange(ni):
- cli.attr
- # END for each access
- elapsed = time() - st
- print >> sys.stderr, "Accessed %s.attr %i times in %s s ( %f acc / s)" % (cls.__name__, ni, elapsed, ni / elapsed)
- # END for each class type
-
- # check num of sequence-acceses
- for cls in (list, tuple):
- x = 10
- st = time()
- s = cls(range(x))
- for i in xrange(ni):
- s[0]
- s[1]
- s[2]
- # END for
- elapsed = time() - st
- na = ni * 3
- print >> sys.stderr, "Accessed %s[x] %i times in %s s ( %f acc / s)" % (cls.__name__, na, elapsed, na / elapsed)
- # END for each sequence
-
- def test_instantiation(self):
- ni = 100000
- max_num_items = 4
- for mni in range(max_num_items+1):
- for cls in (tuple, list):
- st = time()
- for i in xrange(ni):
- if mni == 0:
- cls()
- elif mni == 1:
- cls((1,))
- elif mni == 2:
- cls((1,2))
- elif mni == 3:
- cls((1,2,3))
- elif mni == 4:
- cls((1,2,3,4))
- else:
- cls(x for x in xrange(mni))
- # END handle empty cls
- # END for each item
- elapsed = time() - st
- print >> sys.stderr, "Created %i %ss of size %i in %f s ( %f inst / s)" % (ni, cls.__name__, mni, elapsed, ni / elapsed)
- # END for each type
- # END for each item count
-
- # tuple and tuple direct
- st = time()
- for i in xrange(ni):
- t = (1,2,3,4)
- # END for each item
- elapsed = time() - st
- print >> sys.stderr, "Created %i tuples (1,2,3,4) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
-
- st = time()
- for i in xrange(ni):
- t = tuple((1,2,3,4))
- # END for each item
- elapsed = time() - st
- print >> sys.stderr, "Created %i tuples tuple((1,2,3,4)) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
-
- def test_unpacking_vs_indexing(self):
- ni = 1000000
- list_items = [1,2,3,4]
- tuple_items = (1,2,3,4)
-
- for sequence in (list_items, tuple_items):
- st = time()
- for i in xrange(ni):
- one, two, three, four = sequence
- # END for eac iteration
- elapsed = time() - st
- print >> sys.stderr, "Unpacked %i %ss of size %i in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
-
- st = time()
- for i in xrange(ni):
- one, two, three, four = sequence[0], sequence[1], sequence[2], sequence[3]
- # END for eac iteration
- elapsed = time() - st
- print >> sys.stderr, "Unpacked %i %ss of size %i individually in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
-
- st = time()
- for i in xrange(ni):
- one, two = sequence[0], sequence[1]
- # END for eac iteration
- elapsed = time() - st
- print >> sys.stderr, "Unpacked %i %ss of size %i individually (2 of 4) in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
- # END for each sequence
-
- def test_large_list_vs_iteration(self):
- # what costs more: alloc/realloc of lists, or the cpu strain of iterators ?
- def slow_iter(ni):
- for i in xrange(ni):
- yield i
- # END slow iter - be closer to the real world
-
- # alloc doesn't play a role here it seems
- for ni in (500, 1000, 10000, 20000, 40000):
- st = time()
- for i in list(xrange(ni)):
- i
- # END for each item
- elapsed = time() - st
- print >> sys.stderr, "Iterated %i items from list in %f s ( %f acc / s)" % (ni, elapsed, ni / elapsed)
-
- st = time()
- for i in slow_iter(ni):
- i
- # END for each item
- elapsed = time() - st
- print >> sys.stderr, "Iterated %i items from iterator in %f s ( %f acc / s)" % (ni, elapsed, ni / elapsed)
- # END for each number of iterations
-
- def test_type_vs_inst_class(self):
- class NewType(object):
- pass
-
- # lets see which way is faster
- inst = NewType()
-
- ni = 1000000
- st = time()
- for i in xrange(ni):
- inst.__class__()
- # END for each item
- elapsed = time() - st
- print >> sys.stderr, "Created %i items using inst.__class__ in %f s ( %f items / s)" % (ni, elapsed, ni / elapsed)
-
- st = time()
- for i in xrange(ni):
- type(inst)()
- # END for each item
- elapsed = time() - st
- print >> sys.stderr, "Created %i items using type(inst)() in %f s ( %f items / s)" % (ni, elapsed, ni / elapsed)
+
+ def test_access(self):
+ # compare dict vs. slot access
+ class Slotty(object):
+ __slots__ = "attr"
+ def __init__(self):
+ self.attr = 1
+
+ class Dicty(object):
+ def __init__(self):
+ self.attr = 1
+
+ class BigSlotty(object):
+ __slots__ = ('attr', ) + tuple('abcdefghijk')
+ def __init__(self):
+ for attr in self.__slots__:
+ setattr(self, attr, 1)
+
+ class BigDicty(object):
+ def __init__(self):
+ for attr in BigSlotty.__slots__:
+ setattr(self, attr, 1)
+
+ ni = 1000000
+ for cls in (Slotty, Dicty, BigSlotty, BigDicty):
+ cli = cls()
+ st = time()
+ for i in xrange(ni):
+ cli.attr
+ # END for each access
+ elapsed = time() - st
+ print >> sys.stderr, "Accessed %s.attr %i times in %s s ( %f acc / s)" % (cls.__name__, ni, elapsed, ni / elapsed)
+ # END for each class type
+
+ # check num of sequence-acceses
+ for cls in (list, tuple):
+ x = 10
+ st = time()
+ s = cls(range(x))
+ for i in xrange(ni):
+ s[0]
+ s[1]
+ s[2]
+ # END for
+ elapsed = time() - st
+ na = ni * 3
+ print >> sys.stderr, "Accessed %s[x] %i times in %s s ( %f acc / s)" % (cls.__name__, na, elapsed, na / elapsed)
+ # END for each sequence
+
+ def test_instantiation(self):
+ ni = 100000
+ max_num_items = 4
+ for mni in range(max_num_items+1):
+ for cls in (tuple, list):
+ st = time()
+ for i in xrange(ni):
+ if mni == 0:
+ cls()
+ elif mni == 1:
+ cls((1,))
+ elif mni == 2:
+ cls((1,2))
+ elif mni == 3:
+ cls((1,2,3))
+ elif mni == 4:
+ cls((1,2,3,4))
+ else:
+ cls(x for x in xrange(mni))
+ # END handle empty cls
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Created %i %ss of size %i in %f s ( %f inst / s)" % (ni, cls.__name__, mni, elapsed, ni / elapsed)
+ # END for each type
+ # END for each item count
+
+ # tuple and tuple direct
+ st = time()
+ for i in xrange(ni):
+ t = (1,2,3,4)
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Created %i tuples (1,2,3,4) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
+
+ st = time()
+ for i in xrange(ni):
+ t = tuple((1,2,3,4))
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Created %i tuples tuple((1,2,3,4)) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
+
+ def test_unpacking_vs_indexing(self):
+ ni = 1000000
+ list_items = [1,2,3,4]
+ tuple_items = (1,2,3,4)
+
+ for sequence in (list_items, tuple_items):
+ st = time()
+ for i in xrange(ni):
+ one, two, three, four = sequence
+ # END for eac iteration
+ elapsed = time() - st
+ print >> sys.stderr, "Unpacked %i %ss of size %i in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+
+ st = time()
+ for i in xrange(ni):
+ one, two, three, four = sequence[0], sequence[1], sequence[2], sequence[3]
+ # END for eac iteration
+ elapsed = time() - st
+ print >> sys.stderr, "Unpacked %i %ss of size %i individually in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+
+ st = time()
+ for i in xrange(ni):
+ one, two = sequence[0], sequence[1]
+ # END for eac iteration
+ elapsed = time() - st
+ print >> sys.stderr, "Unpacked %i %ss of size %i individually (2 of 4) in %f s ( %f acc / s)" % (ni, type(sequence).__name__, len(sequence), elapsed, ni / elapsed)
+ # END for each sequence
+
+ def test_large_list_vs_iteration(self):
+ # what costs more: alloc/realloc of lists, or the cpu strain of iterators ?
+ def slow_iter(ni):
+ for i in xrange(ni):
+ yield i
+ # END slow iter - be closer to the real world
+
+ # alloc doesn't play a role here it seems
+ for ni in (500, 1000, 10000, 20000, 40000):
+ st = time()
+ for i in list(xrange(ni)):
+ i
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Iterated %i items from list in %f s ( %f acc / s)" % (ni, elapsed, ni / elapsed)
+
+ st = time()
+ for i in slow_iter(ni):
+ i
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Iterated %i items from iterator in %f s ( %f acc / s)" % (ni, elapsed, ni / elapsed)
+ # END for each number of iterations
+
+ def test_type_vs_inst_class(self):
+ class NewType(object):
+ pass
+
+ # lets see which way is faster
+ inst = NewType()
+
+ ni = 1000000
+ st = time()
+ for i in xrange(ni):
+ inst.__class__()
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Created %i items using inst.__class__ in %f s ( %f items / s)" % (ni, elapsed, ni / elapsed)
+
+ st = time()
+ for i in xrange(ni):
+ type(inst)()
+ # END for each item
+ elapsed = time() - st
+ print >> sys.stderr, "Created %i items using type(inst)() in %f s ( %f items / s)" % (ni, elapsed, ni / elapsed)