summaryrefslogtreecommitdiff
path: root/builtin/pack-objects.c
diff options
context:
space:
mode:
authorDan McGee <dpmcgee@gmail.com>2011-10-18 00:21:23 -0500
committerJunio C Hamano <gitster@pobox.com>2011-10-20 17:17:49 -0700
commit38d4debb6d180ca53fcb12b8115e81fd4c5262d0 (patch)
treedecc9cb8da6d4f9bd4abf102e3bf98e907ed3102 /builtin/pack-objects.c
parentf380872f0abc7fe98022696996d346df99c53f1a (diff)
downloadgit-38d4debb6d180ca53fcb12b8115e81fd4c5262d0.tar.gz
pack-objects: don't traverse objects unnecessarily
This brings back some of the performance lost in optimizing recency order inside pack objects. We were doing extreme amounts of object re-traversal: for the 2.14 million objects in the Linux kernel repository, we were calling add_to_write_order() over 1.03 billion times (a 0.2% hit rate, making 99.8% of of these calls extraneous). Two optimizations take place here- we can start our objects array iteration from a known point where we left off before we started trying to find our tags, and we don't need to do the deep dives required by add_family_to_write_order() if the object has already been marked as filled. These two optimizations bring some pretty spectacular results via `perf stat`: task-clock: 83373 ms --> 43800 ms (50% faster) cycles: 221,633,461,676 --> 116,307,209,986 (47% fewer) instructions: 149,299,179,939 --> 122,998,800,184 (18% fewer) Helped-by: Ramsay Jones (format string fix in "die" message) Signed-off-by: Dan McGee <dpmcgee@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
Diffstat (limited to 'builtin/pack-objects.c')
-rw-r--r--builtin/pack-objects.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 5b544bf444..80ab6c39f9 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -520,7 +520,7 @@ static void add_family_to_write_order(struct object_entry **wo,
static struct object_entry **compute_write_order(void)
{
- unsigned int i, wo_end;
+ unsigned int i, wo_end, last_untagged;
struct object_entry **wo = xmalloc(nr_objects * sizeof(*wo));
@@ -551,7 +551,7 @@ static struct object_entry **compute_write_order(void)
for_each_tag_ref(mark_tagged, NULL);
/*
- * Give the commits in the original recency order until
+ * Give the objects in the original recency order until
* we see a tagged tip.
*/
for (i = wo_end = 0; i < nr_objects; i++) {
@@ -559,6 +559,7 @@ static struct object_entry **compute_write_order(void)
break;
add_to_write_order(wo, &wo_end, &objects[i]);
}
+ last_untagged = i;
/*
* Then fill all the tagged tips.
@@ -571,7 +572,7 @@ static struct object_entry **compute_write_order(void)
/*
* And then all remaining commits and tags.
*/
- for (i = 0; i < nr_objects; i++) {
+ for (i = last_untagged; i < nr_objects; i++) {
if (objects[i].type != OBJ_COMMIT &&
objects[i].type != OBJ_TAG)
continue;
@@ -581,7 +582,7 @@ static struct object_entry **compute_write_order(void)
/*
* And then all the trees.
*/
- for (i = 0; i < nr_objects; i++) {
+ for (i = last_untagged; i < nr_objects; i++) {
if (objects[i].type != OBJ_TREE)
continue;
add_to_write_order(wo, &wo_end, &objects[i]);
@@ -590,8 +591,13 @@ static struct object_entry **compute_write_order(void)
/*
* Finally all the rest in really tight order
*/
- for (i = 0; i < nr_objects; i++)
- add_family_to_write_order(wo, &wo_end, &objects[i]);
+ for (i = last_untagged; i < nr_objects; i++) {
+ if (!objects[i].filled)
+ add_family_to_write_order(wo, &wo_end, &objects[i]);
+ }
+
+ if (wo_end != nr_objects)
+ die("ordered %u objects, expected %"PRIu32, wo_end, nr_objects);
return wo;
}