summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/main.yml15
-rw-r--r--.gitignore5
-rw-r--r--Documentation/CodingGuidelines51
-rw-r--r--Documentation/Makefile11
-rw-r--r--Documentation/MyFirstContribution.txt2
-rw-r--r--Documentation/RelNotes/2.38.2.txt60
-rw-r--r--Documentation/RelNotes/2.39.0.txt276
-rw-r--r--Documentation/SubmittingPatches4
-rwxr-xr-xDocumentation/build-docdep.perl5
-rw-r--r--Documentation/config.txt6
-rw-r--r--Documentation/config/bundle.txt24
-rw-r--r--Documentation/config/core.txt2
-rw-r--r--Documentation/config/extensions.txt76
-rw-r--r--Documentation/config/feature.txt3
-rw-r--r--Documentation/config/fsck.txt4
-rw-r--r--Documentation/config/fsmonitor--daemon.txt11
-rw-r--r--Documentation/config/index.txt8
-rw-r--r--Documentation/config/log.txt6
-rw-r--r--Documentation/config/mergetool.txt2
-rw-r--r--Documentation/config/push.txt14
-rw-r--r--Documentation/config/refs.txt13
-rw-r--r--Documentation/config/submodule.txt12
-rw-r--r--Documentation/fsck-msgids.txt161
-rw-r--r--Documentation/git-annotate.txt2
-rw-r--r--Documentation/git-clean.txt10
-rw-r--r--Documentation/git-clone.txt8
-rw-r--r--Documentation/git-commit-graph.txt5
-rw-r--r--Documentation/git-credential-cache--daemon.txt4
-rw-r--r--Documentation/git-credential-cache.txt4
-rw-r--r--Documentation/git-credential.txt2
-rw-r--r--Documentation/git-diff-files.txt2
-rw-r--r--Documentation/git-diff.txt8
-rw-r--r--Documentation/git-fast-export.txt2
-rw-r--r--Documentation/git-fsck.txt12
-rw-r--r--Documentation/git-fsmonitor--daemon.txt37
-rw-r--r--Documentation/git-hash-object.txt3
-rw-r--r--Documentation/git-interpret-trailers.txt5
-rw-r--r--Documentation/git-ls-files.txt4
-rw-r--r--Documentation/git-maintenance.txt20
-rw-r--r--Documentation/git-merge-base.txt4
-rw-r--r--Documentation/git-merge-tree.txt92
-rw-r--r--Documentation/git-multi-pack-index.txt7
-rw-r--r--Documentation/git-mv.txt4
-rw-r--r--Documentation/git-notes.txt10
-rw-r--r--Documentation/git-pack-redundant.txt4
-rw-r--r--Documentation/git-patch-id.txt24
-rw-r--r--Documentation/git-prune-packed.txt2
-rw-r--r--Documentation/git-push.txt6
-rw-r--r--Documentation/git-read-tree.txt2
-rw-r--r--Documentation/git-rebase.txt32
-rw-r--r--Documentation/git-receive-pack.txt4
-rw-r--r--Documentation/git-reflog.txt17
-rw-r--r--Documentation/git-repack.txt6
-rw-r--r--Documentation/git-rerere.txt2
-rw-r--r--Documentation/git-rev-list.txt2
-rw-r--r--Documentation/git-rev-parse.txt7
-rw-r--r--Documentation/git-send-email.txt15
-rw-r--r--Documentation/git-send-pack.txt5
-rw-r--r--Documentation/git-shortlog.txt8
-rw-r--r--Documentation/git-show-branch.txt4
-rw-r--r--Documentation/git-show-ref.txt4
-rw-r--r--Documentation/git-sparse-checkout.txt2
-rw-r--r--Documentation/git-stash.txt17
-rw-r--r--Documentation/git-status.txt61
-rw-r--r--Documentation/git-symbolic-ref.txt11
-rw-r--r--Documentation/git-tag.txt18
-rw-r--r--Documentation/git-update-server-info.txt8
-rw-r--r--Documentation/git-upload-archive.txt4
-rw-r--r--Documentation/git-var.txt2
-rw-r--r--Documentation/git-verify-commit.txt2
-rw-r--r--Documentation/git-verify-pack.txt2
-rw-r--r--Documentation/git-verify-tag.txt2
-rw-r--r--Documentation/git-worktree.txt15
-rw-r--r--Documentation/git.txt43
-rw-r--r--Documentation/gitcredentials.txt16
-rw-r--r--Documentation/gitformat-chunk.txt26
-rw-r--r--Documentation/gitformat-commit-graph.txt6
-rw-r--r--Documentation/glossary-content.txt27
-rw-r--r--Documentation/howto/coordinate-embargoed-releases.txt165
-rw-r--r--Documentation/howto/maintain-git.txt11
-rwxr-xr-xDocumentation/lint-fsck-msgids.perl70
-rw-r--r--Documentation/rev-list-options.txt7
-rw-r--r--Documentation/revisions.txt2
-rw-r--r--Documentation/technical/api-trace2.txt190
-rw-r--r--Documentation/technical/bundle-uri.txt8
-rw-r--r--Documentation/technical/commit-graph.txt8
-rw-r--r--Documentation/technical/parallel-checkout.txt2
-rw-r--r--Documentation/technical/repository-version.txt4
-rw-r--r--Documentation/technical/sparse-checkout.txt1103
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--INSTALL4
-rw-r--r--Makefile452
l---------RelNotes2
-rw-r--r--add-interactive.c9
-rw-r--r--apply.c30
-rw-r--r--archive-tar.c1
-rw-r--r--archive.c10
-rw-r--r--attr.c41
-rw-r--r--bisect.c12
-rw-r--r--builtin.h2
-rw-r--r--builtin/add.c21
-rw-r--r--builtin/am.c12
-rw-r--r--builtin/bisect.c (renamed from builtin/bisect--helper.c)358
-rw-r--r--builtin/blame.c22
-rw-r--r--builtin/branch.c119
-rw-r--r--builtin/bugreport.c12
-rw-r--r--builtin/bundle.c43
-rw-r--r--builtin/cat-file.c2
-rw-r--r--builtin/checkout.c2
-rw-r--r--builtin/clean.c2
-rw-r--r--builtin/clone.c56
-rw-r--r--builtin/commit-graph.c10
-rw-r--r--builtin/commit-tree.c5
-rw-r--r--builtin/commit.c28
-rw-r--r--builtin/config.c42
-rw-r--r--builtin/credential-cache--daemon.c4
-rw-r--r--builtin/describe.c5
-rw-r--r--builtin/diagnose.c3
-rw-r--r--builtin/diff-files.c1
-rw-r--r--builtin/diff-index.c3
-rw-r--r--builtin/diff-tree.c6
-rw-r--r--builtin/diff.c28
-rw-r--r--builtin/difftool.c24
-rw-r--r--builtin/fetch.c34
-rw-r--r--builtin/for-each-repo.c7
-rw-r--r--builtin/fsck.c11
-rw-r--r--builtin/fsmonitor--daemon.c15
-rw-r--r--builtin/gc.c190
-rw-r--r--builtin/grep.c48
-rw-r--r--builtin/hash-object.c5
-rw-r--r--builtin/help.c2
-rw-r--r--builtin/init-db.c5
-rw-r--r--builtin/interpret-trailers.c4
-rw-r--r--builtin/log.c3
-rw-r--r--builtin/ls-files.c1
-rw-r--r--builtin/ls-remote.c2
-rw-r--r--builtin/merge-base.c2
-rw-r--r--builtin/merge-index.c4
-rw-r--r--builtin/merge-tree.c103
-rw-r--r--builtin/merge.c54
-rw-r--r--builtin/multi-pack-index.c7
-rw-r--r--builtin/notes.c22
-rw-r--r--builtin/pack-objects.c4
-rw-r--r--builtin/pack-redundant.c2
-rw-r--r--builtin/pack-refs.c2
-rw-r--r--builtin/patch-id.c113
-rw-r--r--builtin/pull.c147
-rw-r--r--builtin/push.c16
-rw-r--r--builtin/read-tree.c8
-rw-r--r--builtin/rebase.c313
-rw-r--r--builtin/receive-pack.c10
-rw-r--r--builtin/reflog.c3
-rw-r--r--builtin/remote.c90
-rw-r--r--builtin/repack.c194
-rw-r--r--builtin/rerere.c2
-rw-r--r--builtin/reset.c4
-rw-r--r--builtin/rev-list.c4
-rw-r--r--builtin/rev-parse.c19
-rw-r--r--builtin/revert.c13
-rw-r--r--builtin/rm.c7
-rw-r--r--builtin/send-pack.c1
-rw-r--r--builtin/shortlog.c87
-rw-r--r--builtin/show-branch.c3
-rw-r--r--builtin/show-ref.c4
-rw-r--r--builtin/sparse-checkout.c2
-rw-r--r--builtin/stash.c75
-rw-r--r--builtin/submodule--helper.c282
-rw-r--r--builtin/symbolic-ref.c21
-rw-r--r--builtin/tag.c10
-rw-r--r--builtin/unpack-file.c3
-rw-r--r--builtin/update-index.c10
-rw-r--r--builtin/update-server-info.c2
-rw-r--r--builtin/upload-archive.c2
-rw-r--r--builtin/upload-pack.c3
-rw-r--r--builtin/verify-commit.c2
-rw-r--r--builtin/verify-pack.c2
-rw-r--r--builtin/verify-tag.c2
-rw-r--r--builtin/worktree.c179
-rw-r--r--bundle-uri.c458
-rw-r--r--bundle-uri.h93
-rw-r--r--bundle.c42
-rw-r--r--bundle.h15
-rw-r--r--cache.h3
-rw-r--r--chunk-format.c109
-rw-r--r--chunk-format.h18
-rwxr-xr-xci/lib.sh8
-rwxr-xr-xci/run-build-and-tests.sh14
-rw-r--r--combine-diff.c3
-rw-r--r--commit-graph.c2
-rw-r--r--commit.c8
-rw-r--r--commit.h13
-rw-r--r--common-main.c1
-rw-r--r--compat/fsmonitor/fsm-ipc-darwin.c52
-rw-r--r--compat/fsmonitor/fsm-ipc-win32.c9
-rw-r--r--compat/fsmonitor/fsm-listen-darwin.c16
-rw-r--r--compat/fsmonitor/fsm-path-utils-darwin.c135
-rw-r--r--compat/fsmonitor/fsm-path-utils-win32.c145
-rw-r--r--compat/fsmonitor/fsm-settings-darwin.c77
-rw-r--r--compat/fsmonitor/fsm-settings-win32.c174
-rw-r--r--compat/mingw.c11
-rw-r--r--compat/nonblock.c2
-rw-r--r--config.c32
-rw-r--r--config.h1
-rw-r--r--config.mak.dev25
-rw-r--r--connected.c9
-rw-r--r--connected.h7
-rw-r--r--contrib/buildsystems/CMakeLists.txt191
-rw-r--r--contrib/coccinelle/.gitignore2
-rw-r--r--contrib/coccinelle/README49
-rw-r--r--contrib/coccinelle/hashmap.cocci2
-rw-r--r--contrib/coccinelle/preincr.cocci2
-rwxr-xr-xcontrib/coccinelle/spatchcache304
-rw-r--r--contrib/coccinelle/strbuf.cocci2
-rw-r--r--contrib/coccinelle/swap.cocci2
-rw-r--r--contrib/coccinelle/the_repository.pending.cocci1
-rw-r--r--contrib/completion/git-completion.bash51
-rwxr-xr-xcontrib/credential/netrc/git-credential-netrc.perl5
-rw-r--r--contrib/credential/osxkeychain/git-credential-osxkeychain.c5
-rw-r--r--contrib/credential/wincred/git-credential-wincred.c7
-rwxr-xr-xcontrib/subtree/git-subtree.sh177
-rw-r--r--contrib/subtree/git-subtree.txt16
-rwxr-xr-xcontrib/subtree/t/t7900-subtree.sh60
-rw-r--r--convert.c4
-rw-r--r--csum-file.c14
-rw-r--r--csum-file.h7
-rw-r--r--date.c6
-rw-r--r--delta-islands.c71
-rw-r--r--diff-lib.c105
-rw-r--r--diff-merges.c40
-rw-r--r--diff.c147
-rw-r--r--diff.h2
-rw-r--r--diffcore-pickaxe.c4
-rw-r--r--dir.c16
-rw-r--r--dir.h1
-rw-r--r--exec-cmd.c2
-rw-r--r--fsck.h8
-rw-r--r--fsmonitor--daemon.h3
-rw-r--r--fsmonitor-ipc.c28
-rw-r--r--fsmonitor-ipc.h4
-rw-r--r--fsmonitor-path-utils.h60
-rw-r--r--fsmonitor-settings.c68
-rw-r--r--fsmonitor-settings.h4
-rw-r--r--fsmonitor.c9
-rw-r--r--gettext.c2
-rwxr-xr-xgit-bisect.sh84
-rw-r--r--git-compat-util.h22
-rwxr-xr-xgit-submodule.sh3
-rw-r--r--git.c33
-rw-r--r--gpg-interface.c7
-rw-r--r--grep.c15
-rw-r--r--grep.h1
-rw-r--r--help.c2
-rw-r--r--hook.c23
-rw-r--r--http.c47
-rw-r--r--ll-merge.c25
-rw-r--r--ls-refs.c13
-rw-r--r--mailinfo.c2
-rw-r--r--merge-ort.c132
-rw-r--r--merge.c18
-rw-r--r--midx.c48
-rw-r--r--negotiator/skipping.c29
-rw-r--r--object-file.c21
-rw-r--r--object.c8
-rw-r--r--oss-fuzz/.gitignore3
-rw-r--r--oss-fuzz/fuzz-commit-graph.c (renamed from fuzz-commit-graph.c)0
-rw-r--r--oss-fuzz/fuzz-pack-headers.c (renamed from fuzz-pack-headers.c)0
-rw-r--r--oss-fuzz/fuzz-pack-idx.c (renamed from fuzz-pack-idx.c)0
-rw-r--r--pack-bitmap-write.c8
-rw-r--r--pack-bitmap.c13
-rw-r--r--patch-ids.c10
-rw-r--r--patch-ids.h2
-rw-r--r--path.c8
-rw-r--r--perl/Git.pm36
-rw-r--r--promisor-remote.c23
-rw-r--r--promisor-remote.h11
-rw-r--r--read-cache.c92
-rw-r--r--ref-filter.c11
-rw-r--r--reflog-walk.c2
-rw-r--r--reflog.c13
-rw-r--r--refs.c126
-rw-r--r--refs.h43
-rw-r--r--refs/files-backend.c88
-rw-r--r--refs/packed-backend.c882
-rw-r--r--refs/packed-backend.h281
-rw-r--r--refs/packed-format-v1.c456
-rw-r--r--refs/packed-format-v2.c624
-rw-r--r--refs/refs-internal.h9
-rw-r--r--repo-settings.c14
-rw-r--r--repository.c2
-rw-r--r--repository.h9
-rw-r--r--reset.c1
-rw-r--r--revision.c161
-rw-r--r--revision.h61
-rw-r--r--run-command.c284
-rw-r--r--run-command.h129
-rw-r--r--scalar.c72
-rw-r--r--sequencer.c142
-rw-r--r--sequencer.h6
-rw-r--r--setup.c26
-rw-r--r--sha1dc_git.h1
-rw-r--r--shared.mak8
-rw-r--r--shell.c17
-rw-r--r--shortlog.h5
-rw-r--r--sparse-index.c30
-rw-r--r--string-list.c2
-rw-r--r--string-list.h7
-rw-r--r--submodule-config.c2
-rw-r--r--submodule.c310
-rw-r--r--submodule.h13
-rw-r--r--t/Makefile2
-rw-r--r--t/README3
-rwxr-xr-xt/chainlint.pl171
-rw-r--r--t/chainlint/block-comment.expect2
-rw-r--r--t/chainlint/case-comment.expect3
-rw-r--r--t/chainlint/close-subshell.expect3
-rw-r--r--t/chainlint/comment.expect4
-rw-r--r--t/chainlint/double-here-doc.expect14
-rw-r--r--t/chainlint/empty-here-doc.expect3
-rw-r--r--t/chainlint/for-loop.expect4
-rw-r--r--t/chainlint/here-doc-close-subshell.expect4
-rw-r--r--t/chainlint/here-doc-indent-operator.expect10
-rw-r--r--t/chainlint/here-doc-multi-line-command-subst.expect5
-rw-r--r--t/chainlint/here-doc-multi-line-string.expect4
-rw-r--r--t/chainlint/here-doc.expect24
-rw-r--r--t/chainlint/if-then-else.expect4
-rw-r--r--t/chainlint/incomplete-line.expect10
-rw-r--r--t/chainlint/inline-comment.expect4
-rw-r--r--t/chainlint/loop-detect-status.expect2
-rw-r--r--t/chainlint/nested-here-doc.expect27
-rw-r--r--t/chainlint/nested-subshell-comment.expect2
-rw-r--r--t/chainlint/subshell-here-doc.expect28
-rw-r--r--t/chainlint/t7900-subtree.expect4
-rw-r--r--t/chainlint/while-loop.expect4
-rwxr-xr-xt/check-non-portable-shell.pl1
-rw-r--r--t/helper/test-bundle-uri.c95
-rw-r--r--t/helper/test-cache-tree.c64
-rw-r--r--t/helper/test-fake-ssh.c8
-rw-r--r--t/helper/test-path-utils.c3
-rw-r--r--t/helper/test-proc-receive.c2
-rw-r--r--t/helper/test-run-command.c98
-rw-r--r--t/helper/test-sha1.c8
-rw-r--r--t/helper/test-submodule.c92
-rw-r--r--t/helper/test-tool.c3
-rw-r--r--t/helper/test-tool.h3
-rw-r--r--t/helper/test-trace2.c191
-rw-r--r--t/lib-gettext.sh2
-rw-r--r--t/lib-gitweb.sh2
-rw-r--r--t/lib-httpd.sh5
-rw-r--r--t/lib-httpd/apache.conf21
-rwxr-xr-xt/perf/p0006-read-tree-checkout.sh8
-rwxr-xr-xt/perf/p0090-cache-tree.sh36
-rwxr-xr-xt/perf/p1401-ref-operations.sh52
-rwxr-xr-xt/perf/p2000-sparse-operations.sh1
-rwxr-xr-xt/perf/p7102-reset.sh21
-rwxr-xr-xt/perf/run4
-rwxr-xr-xt/t0013-sha1dc.sh6
-rwxr-xr-xt/t0033-safe-directory.sh9
-rwxr-xr-xt/t0035-safe-bare-repository.sh9
-rwxr-xr-xt/t0061-run-command.sh54
-rwxr-xr-xt/t0068-for-each-repo.sh7
-rwxr-xr-xt/t0070-fundamental.sh1
-rwxr-xr-xt/t0211-trace2-perf.sh95
-rw-r--r--t/t0211/scrub_perf.perl6
-rwxr-xr-xt/t0410-partial-clone.sh14
-rwxr-xr-xt/t0450-txt-doc-vs-help.sh172
-rw-r--r--t/t0450/txt-help-mismatches58
-rwxr-xr-xt/t1002-read-tree-m-u-2way.sh16
-rwxr-xr-xt/t1011-read-tree-sparse-checkout.sh1
-rwxr-xr-xt/t1022-read-tree-partial-clone.sh4
-rwxr-xr-xt/t1092-sparse-checkout-compatibility.sh72
-rwxr-xr-xt/t1304-default-acl.sh4
-rwxr-xr-xt/t1401-symbolic-ref.sh14
-rwxr-xr-xt/t1404-update-ref-errors.sh2
-rwxr-xr-xt/t1409-avoid-packing-refs.sh23
-rwxr-xr-xt/t1413-reflog-detach.sh1
-rwxr-xr-xt/t1501-work-tree.sh2
-rwxr-xr-xt/t1600-index.sh8
-rwxr-xr-xt/t1800-hook.sh2
-rwxr-xr-xt/t2012-checkout-last.sh1
-rwxr-xr-xt/t2018-checkout-branch.sh1
-rwxr-xr-xt/t2025-checkout-no-overlay.sh1
-rwxr-xr-xt/t2400-worktree-add.sh45
-rwxr-xr-xt/t3009-ls-files-others-nonsubmodule.sh1
-rwxr-xr-xt/t3010-ls-files-killed-modified.sh2
-rwxr-xr-xt/t3050-subprojects-fetch.sh1
-rwxr-xr-xt/t3060-ls-files-with-tree.sh2
-rwxr-xr-xt/t3200-branch.sh54
-rwxr-xr-xt/t3202-show-branch.sh46
-rwxr-xr-xt/t3204-branch-name-interpretation.sh24
-rwxr-xr-xt/t3210-pack-refs.sh8
-rwxr-xr-xt/t3212-ref-formats.sh100
-rwxr-xr-xt/t3301-notes.sh18
-rwxr-xr-xt/t3305-notes-fanout.sh2
-rwxr-xr-xt/t3404-rebase-interactive.sh113
-rwxr-xr-xt/t3406-rebase-message.sh185
-rwxr-xr-xt/t3409-rebase-environ.sh1
-rwxr-xr-xt/t3413-rebase-hook.sh1
-rwxr-xr-xt/t3415-rebase-autosquash.sh13
-rwxr-xr-xt/t3416-rebase-onto-threedots.sh62
-rwxr-xr-xt/t3419-rebase-patch-id.sh63
-rwxr-xr-xt/t3428-rebase-signoff.sh1
-rwxr-xr-xt/t3429-rebase-edit-todo.sh1
-rwxr-xr-xt/t3430-rebase-merges.sh17
-rwxr-xr-xt/t3431-rebase-fork-point.sh2
-rwxr-xr-xt/t3433-rebase-across-mode-change.sh1
-rwxr-xr-xt/t3435-rebase-gpg-sign.sh8
-rwxr-xr-xt/t3438-rebase-broken-files.sh59
-rwxr-xr-xt/t3700-add.sh2
-rwxr-xr-xt/t3702-add-edit.sh2
-rwxr-xr-xt/t4012-diff-binary.sh14
-rwxr-xr-xt/t4014-format-patch.sh8
-rwxr-xr-xt/t4015-diff-whitespace.sh4
-rwxr-xr-xt/t4027-diff-submodule.sh19
-rwxr-xr-xt/t4038-diff-combined.sh10
-rwxr-xr-xt/t4045-diff-relative.sh2
-rwxr-xr-xt/t4052-stat-output.sh1
-rwxr-xr-xt/t4053-diff-no-index.sh1
-rwxr-xr-xt/t4067-diff-partial-clone.sh1
-rwxr-xr-xt/t4111-apply-subdir.sh1
-rwxr-xr-xt/t4135-apply-weird-filenames.sh1
-rwxr-xr-xt/t4141-apply-too-large.sh23
-rwxr-xr-xt/t4201-shortlog.sh39
-rwxr-xr-xt/t4202-log.sh9
-rwxr-xr-xt/t4204-patch-id.sh95
-rwxr-xr-xt/t4213-log-tabexpand.sh1
-rwxr-xr-xt/t4301-merge-tree-write-tree.sh112
-rwxr-xr-xt/t5000-tar-tree.sh7
-rwxr-xr-xt/t5100-mailinfo.sh4
-rwxr-xr-xt/t5310-pack-bitmaps.sh5
-rwxr-xr-xt/t5319-multi-pack-index.sh94
-rwxr-xr-xt/t5320-delta-islands.sh2
-rwxr-xr-xt/t5326-multi-pack-bitmaps.sh24
-rwxr-xr-xt/t5502-quickfetch.sh2
-rwxr-xr-xt/t5505-remote.sh11
-rwxr-xr-xt/t5516-fetch-push.sh31
-rwxr-xr-xt/t5526-fetch-submodules.sh16
-rwxr-xr-xt/t5531-deep-submodule-push.sh50
-rwxr-xr-xt/t5539-fetch-http-shallow.sh7
-rwxr-xr-xt/t5541-http-push-smart.sh7
-rwxr-xr-xt/t5542-push-http-shallow.sh7
-rwxr-xr-xt/t5544-pack-objects-hook.sh2
-rwxr-xr-xt/t5550-http-fetch-dumb.sh2
-rwxr-xr-xt/t5551-http-fetch-smart.sh97
-rwxr-xr-xt/t5554-noop-fetch-negotiator.sh2
-rwxr-xr-xt/t5558-clone-bundle-uri.sh282
-rwxr-xr-xt/t5559-http-fetch-smart-http2.sh4
-rwxr-xr-xt/t5601-clone.sh45
-rwxr-xr-xt/t5606-clone-options.sh9
-rwxr-xr-xt/t5610-clone-detached.sh1
-rwxr-xr-xt/t5611-clone-config.sh1
-rwxr-xr-xt/t5614-clone-submodules-shallow.sh1
-rwxr-xr-xt/t5617-clone-submodules.sh (renamed from t/t5617-clone-submodules-remote.sh)41
-rwxr-xr-xt/t5618-alternate-refs.sh2
-rwxr-xr-xt/t5702-protocol-v2.sh2
-rwxr-xr-xt/t5750-bundle-uri-parse.sh171
-rwxr-xr-xt/t6018-rev-list-glob.sh40
-rwxr-xr-xt/t6021-rev-list-exclude-hidden.sh163
-rwxr-xr-xt/t6030-bisect-porcelain.sh158
-rwxr-xr-xt/t6060-merge-index.sh2
-rwxr-xr-xt/t6102-rev-list-unexpected-objects.sh4
-rwxr-xr-xt/t6300-for-each-ref.sh40
-rwxr-xr-xt/t6301-for-each-ref-errors.sh1
-rwxr-xr-xt/t6401-merge-criss-cross.sh2
-rwxr-xr-xt/t6406-merge-attr.sh1
-rwxr-xr-xt/t6407-merge-binary.sh1
-rwxr-xr-xt/t6415-merge-dir-to-symlink.sh1
-rwxr-xr-xt/t6423-merge-rename-directories.sh56
-rwxr-xr-xt/t6435-merge-sparse.sh1
-rwxr-xr-xt/t6500-gc.sh96
-rwxr-xr-xt/t7001-mv.sh62
-rwxr-xr-xt/t7003-filter-branch.sh4
-rwxr-xr-xt/t7065-wtstatus-slow.sh70
-rwxr-xr-xt/t7103-reset-bare.sh2
-rwxr-xr-xt/t7400-submodule-basic.sh10
-rwxr-xr-xt/t7406-submodule-update.sh156
-rwxr-xr-xt/t7407-submodule-foreach.sh5
-rwxr-xr-xt/t7411-submodule-config.sh36
-rwxr-xr-xt/t7412-submodule-absorbgitdirs.sh36
-rwxr-xr-xt/t7418-submodule-sparse-gitmodules.sh4
-rwxr-xr-xt/t7422-submodule-output.sh170
-rwxr-xr-xt/t7504-commit-msg-hook.sh1
-rwxr-xr-xt/t7506-status-submodule.sh19
-rwxr-xr-xt/t7517-per-repo-email.sh1
-rwxr-xr-xt/t7520-ignored-hook-warning.sh1
-rwxr-xr-xt/t7527-builtin-fsmonitor.sh18
-rwxr-xr-xt/t7605-merge-resolve.sh1
-rwxr-xr-xt/t7609-mergetool--lib.sh2
-rwxr-xr-xt/t7610-mergetool.sh4
-rwxr-xr-xt/t7614-merge-signoff.sh1
-rwxr-xr-xt/t7700-repack.sh182
-rwxr-xr-xt/t7701-repack-unpack-unreachable.sh4
-rwxr-xr-xt/t7703-repack-geometric.sh6
-rwxr-xr-xt/t7810-grep.sh15
-rwxr-xr-xt/t7900-maintenance.sh30
-rwxr-xr-xt/t9001-send-email.sh8
-rwxr-xr-xt/t9003-help-autocorrect.sh2
-rwxr-xr-xt/t9115-git-svn-dcommit-funky-renames.sh1
-rwxr-xr-xt/t9133-git-svn-nested-git-repo.sh6
-rwxr-xr-xt/t9134-git-svn-ignore-paths.sh8
-rwxr-xr-xt/t9140-git-svn-reset.sh4
-rwxr-xr-xt/t9146-git-svn-empty-dirs.sh1
-rwxr-xr-xt/t9147-git-svn-include-paths.sh8
-rwxr-xr-xt/t9148-git-svn-propset.sh1
-rwxr-xr-xt/t9160-git-svn-preserve-empty-dirs.sh1
-rwxr-xr-xt/t9210-scalar.sh19
-rwxr-xr-xt/t9700-perl-git.sh4
-rwxr-xr-xt/t9700/test.pl12
-rwxr-xr-xt/t9814-git-p4-rename.sh2
-rwxr-xr-xt/t9815-git-p4-submit-fail.sh4
-rwxr-xr-xt/t9902-completion.sh46
-rwxr-xr-xt/t9903-bash-prompt.sh2
-rw-r--r--t/test-lib-functions.sh33
-rw-r--r--t/test-lib.sh41
-rw-r--r--tmp-objdir.c40
-rw-r--r--tmp-objdir.h6
-rw-r--r--trace2.c121
-rw-r--r--trace2.h101
-rw-r--r--trace2/tr2_ctr.c101
-rw-r--r--trace2/tr2_ctr.h104
-rw-r--r--trace2/tr2_tgt.h16
-rw-r--r--trace2/tr2_tgt_event.c47
-rw-r--r--trace2/tr2_tgt_normal.c39
-rw-r--r--trace2/tr2_tgt_perf.c43
-rw-r--r--trace2/tr2_tls.c34
-rw-r--r--trace2/tr2_tls.h55
-rw-r--r--trace2/tr2_tmr.c182
-rw-r--r--trace2/tr2_tmr.h140
-rw-r--r--transport.c2
-rw-r--r--unpack-trees.c6
-rw-r--r--unpack-trees.h3
-rw-r--r--upload-pack.c30
-rw-r--r--worktree.c59
-rw-r--r--worktree.h10
-rw-r--r--write-or-die.c1
-rw-r--r--wt-status.c28
535 files changed, 16183 insertions, 4406 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 831f4df56c..4928cab0f1 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -37,14 +37,14 @@ jobs:
echo "::set-output name=enabled::$enabled"
- name: skip if the commit or tree was already tested
id: skip-if-redundant
- uses: actions/github-script@v3
+ uses: actions/github-script@v6
if: steps.check-ref.outputs.enabled == 'yes'
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
try {
// Figure out workflow ID, commit and tree
- const { data: run } = await github.actions.getWorkflowRun({
+ const { data: run } = await github.rest.actions.getWorkflowRun({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.runId,
@@ -54,7 +54,7 @@ jobs:
const tree_id = run.head_commit.tree_id;
// See whether there is a successful run for that commit or tree
- const { data: runs } = await github.actions.listWorkflowRuns({
+ const { data: runs } = await github.rest.actions.listWorkflowRuns({
owner: context.repo.owner,
repo: context.repo.repo,
per_page: 500,
@@ -238,6 +238,9 @@ jobs:
os: ubuntu
cc_package: gcc-8
pool: ubuntu-latest
+ - jobname: linux-cmake-ctest
+ cc: gcc
+ pool: ubuntu-latest
- jobname: osx-clang
cc: clang
pool: macos-latest
@@ -251,6 +254,12 @@ jobs:
- jobname: linux-leaks
cc: gcc
pool: ubuntu-latest
+ - jobname: linux-asan
+ cc: gcc
+ pool: ubuntu-latest
+ - jobname: linux-ubsan
+ cc: gcc
+ pool: ubuntu-latest
env:
CC: ${{matrix.vector.cc}}
CC_PACKAGE: ${{matrix.vector.cc_package}}
diff --git a/.gitignore b/.gitignore
index b3dcafcb33..b9d91eaed1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,4 @@
-/fuzz-commit-graph
/fuzz_corpora
-/fuzz-pack-headers
-/fuzz-pack-idx
/GIT-BUILD-OPTIONS
/GIT-CFLAGS
/GIT-LDFLAGS
@@ -10,6 +7,7 @@
/GIT-PERL-HEADER
/GIT-PYTHON-VARS
/GIT-SCRIPT-DEFINES
+/GIT-SPATCH-DEFINES
/GIT-USER-AGENT
/GIT-VERSION-FILE
/bin-wrappers/
@@ -22,7 +20,6 @@
/git-archimport
/git-archive
/git-bisect
-/git-bisect--helper
/git-blame
/git-branch
/git-bugreport
diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines
index 9fca21cc5f..9d5c27807a 100644
--- a/Documentation/CodingGuidelines
+++ b/Documentation/CodingGuidelines
@@ -162,8 +162,6 @@ For shell scripts specifically (not exhaustive):
- We do not use \{m,n\};
- - We do not use -E;
-
- We do not use ? or + (which are \{0,1\} and \{1,\}
respectively in BRE) but that goes without saying as these
are ERE elements not BRE (note that \? and \+ are not even part
@@ -204,10 +202,19 @@ For C programs:
by e.g. "echo DEVELOPER=1 >>config.mak".
- We try to support a wide range of C compilers to compile Git with,
- including old ones. You should not use features from newer C
+ including old ones. As of Git v2.35.0 Git requires C99 (we check
+ "__STDC_VERSION__"). You should not use features from a newer C
standard, even if your compiler groks them.
- There are a few exceptions to this guideline:
+ New C99 features have been phased in gradually, if something's new
+ in C99 but not used yet don't assume that it's safe to use, some
+ compilers we target have only partial support for it. These are
+ considered safe to use:
+
+ . since around 2007 with 2b6854c863a, we have been using
+ initializer elements which are not computable at load time. E.g.:
+
+ const char *args[] = {"constant", variable, NULL};
. since early 2012 with e1327023ea, we have been using an enum
definition whose last element is followed by a comma. This, like
@@ -223,18 +230,24 @@ For C programs:
. since early 2021 with 765dc168882, we have been using variadic
macros, mostly for printf-like trace and debug macros.
- These used to be forbidden, but we have not heard any breakage
- report, and they are assumed to be safe.
+ . since late 2021 with 44ba10d6, we have had variables declared in
+ the for loop "for (int i = 0; i < 10; i++)".
+
+ New C99 features that we cannot use yet:
+
+ . %z and %zu as a printf() argument for a size_t (the %z being for
+ the POSIX-specific ssize_t). Instead you should use
+ printf("%"PRIuMAX, (uintmax_t)v). These days the MSVC version we
+ rely on supports %z, but the C library used by MinGW does not.
+
+ . Shorthand like ".a.b = *c" in struct initializations is known to
+ trip up an older IBM XLC version, use ".a = { .b = *c }" instead.
+ See the 33665d98 (reftable: make assignments portable to AIX xlc
+ v12.01, 2022-03-28).
- Variables have to be declared at the beginning of the block, before
the first statement (i.e. -Wdeclaration-after-statement).
- - Declaring a variable in the for loop "for (int i = 0; i < 10; i++)"
- is still not allowed in this codebase. We are in the process of
- allowing it by waiting to see that 44ba10d6 (revision: use C99
- declaration of variable in for() loop, 2021-11-14) does not get
- complaints. Let's revisit this around November 2022.
-
- NULL pointers shall be written as NULL, not as 0.
- When declaring pointers, the star sides with the variable
@@ -650,8 +663,8 @@ Writing Documentation:
(One or more of <file>.)
Optional parts are enclosed in square brackets:
- [<extra>]
- (Zero or one <extra>.)
+ [<file>...]
+ (Zero or more of <file>.)
--exec-path[=<path>]
(Option with an optional argument. Note that the "=" is inside the
@@ -665,6 +678,16 @@ Writing Documentation:
[-q | --quiet]
[--utf8 | --no-utf8]
+ Use spacing around "|" token(s), but not immediately after opening or
+ before closing a [] or () pair:
+ Do: [-q | --quiet]
+ Don't: [-q|--quiet]
+
+ Don't use spacing around "|" tokens when they're used to seperate the
+ alternate arguments of an option:
+ Do: --track[=(direct|inherit)]
+ Don't: --track[=(direct | inherit)]
+
Parentheses are used for grouping:
[(<rev> | <range>)...]
(Any number of either <rev> or <range>. Parens are needed to make
diff --git a/Documentation/Makefile b/Documentation/Makefile
index d47acb2e25..5e1a7f655c 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -476,8 +476,19 @@ $(LINT_DOCS_MAN_SECTION_ORDER): .build/lint-docs/man-section-order/%.ok: %.txt
.PHONY: lint-docs-man-section-order
lint-docs-man-section-order: $(LINT_DOCS_MAN_SECTION_ORDER)
+.PHONY: lint-docs-fsck-msgids
+LINT_DOCS_FSCK_MSGIDS = .build/lint-docs/fsck-msgids.ok
+$(LINT_DOCS_FSCK_MSGIDS): lint-fsck-msgids.perl
+$(LINT_DOCS_FSCK_MSGIDS): ../fsck.h fsck-msgids.txt
+ $(call mkdir_p_parent_template)
+ $(QUIET_GEN)$(PERL_PATH) lint-fsck-msgids.perl \
+ ../fsck.h fsck-msgids.txt $@
+
+lint-docs-fsck-msgids: $(LINT_DOCS_FSCK_MSGIDS)
+
## Lint: list of targets above
.PHONY: lint-docs
+lint-docs: lint-docs-fsck-msgids
lint-docs: lint-docs-gitlink
lint-docs: lint-docs-man-end-blurb
lint-docs: lint-docs-man-section-order
diff --git a/Documentation/MyFirstContribution.txt b/Documentation/MyFirstContribution.txt
index 1a4be8ee0a..ccfd0cb5f3 100644
--- a/Documentation/MyFirstContribution.txt
+++ b/Documentation/MyFirstContribution.txt
@@ -736,7 +736,7 @@ the {lore}[Git mailing list archive]:
2022-02-21 1:43 ` John Cai
2022-02-21 1:50 ` Taylor Blau
2022-02-23 19:50 ` John Cai
-2022-02-18 20:00 ` // other replies ellided
+2022-02-18 20:00 ` // other replies elided
2022-02-18 18:40 ` [PATCH 2/3] reflog: call reflog_delete from reflog.c John Cai via GitGitGadget
2022-02-18 19:15 ` Ævar Arnfjörð Bjarmason
2022-02-18 20:26 ` Junio C Hamano
diff --git a/Documentation/RelNotes/2.38.2.txt b/Documentation/RelNotes/2.38.2.txt
new file mode 100644
index 0000000000..086b900f6c
--- /dev/null
+++ b/Documentation/RelNotes/2.38.2.txt
@@ -0,0 +1,60 @@
+Git 2.38.2 Release Notes
+========================
+
+This is to backport various fixes accumulated during the development
+towards Git 2.39, the next feature release.
+
+
+Fixes since v2.38.1
+-------------------
+
+ * Update CodingGuidelines to clarify what features to use and avoid
+ in C99.
+
+ * The codepath that reads from the index v4 had unaligned memory
+ accesses, which has been corrected.
+
+ * "git remote rename" failed to rename a remote without fetch
+ refspec, which has been corrected.
+
+ * "git clone" did not like to see the "--bare" and the "--origin"
+ options used together without a good reason.
+
+ * Fix messages incorrectly marked for translation.
+
+ * "git fsck" failed to release contents of tree objects already used
+ from the memory, which has been fixed.
+
+ * "git rebase -i" can mistakenly attempt to apply a fixup to a commit
+ itself, which has been corrected.
+
+ * In read-only repositories, "git merge-tree" tried to come up with a
+ merge result tree object, which it failed (which is not wrong) and
+ led to a segfault (which is bad), which has been corrected.
+
+ * Force C locale while running tests around httpd to make sure we can
+ find expected error messages in the log.
+
+ * Fix a logic in "mailinfo -b" that miscomputed the length of a
+ substring, which lead to an out-of-bounds access.
+
+ * The codepath to sign learned to report errors when it fails to read
+ from "ssh-keygen".
+
+ * "GIT_EDITOR=: git branch --edit-description" resulted in failure,
+ which has been corrected.
+
+ * Documentation on various Boolean GIT_* environment variables have
+ been clarified.
+
+ * "git multi-pack-index repack/expire" used to repack unreachable
+ cruft into a new pack, which have been corrected.
+
+ * The code to clean temporary object directories (used for
+ quarantine) tried to remove them inside its signal handler, which
+ was a no-no.
+
+ * "git branch --edit-description" on an unborh branch misleadingly
+ said that no such branch exists, which has been corrected.
+
+Also contains various documentation updates and code clean-ups.
diff --git a/Documentation/RelNotes/2.39.0.txt b/Documentation/RelNotes/2.39.0.txt
new file mode 100644
index 0000000000..153bf6d89b
--- /dev/null
+++ b/Documentation/RelNotes/2.39.0.txt
@@ -0,0 +1,276 @@
+Git v2.39 Release Notes
+=======================
+
+UI, Workflows & Features
+------------------------
+
+ * "git grep" learned to expand the sparse-index more lazily and on
+ demand in a sparse checkout.
+
+ * By default, use of fsmonitor on a repository on networked
+ filesystem is disabled. Add knobs to make it workable on macOS.
+
+ * After checking out a "branch" that is a symbolic-ref that points at
+ another branch, "git symbolic-ref HEAD" reports the underlying
+ branch, not the symbolic-ref the user gave checkout as argument.
+ The command learned the "--no-recurse" option to stop after
+ dereferencing a symbolic-ref only once.
+
+ * "git branch --edit-description @{-1}" is now a way to edit branch
+ description of the branch you were on before switching to the
+ current branch.
+
+ * "git merge-tree --stdin" is a new way to request a series of merges
+ and report the merge results.
+
+ * "git shortlog" learned to group by the "format" string.
+
+ * A new "--include-whitespace" option is added to "git patch-id", and
+ existing bugs in the internal patch-id logic that did not match
+ what "git patch-id" produces have been corrected.
+
+ * Enable gc.cruftpacks by default for those who opt into
+ feature.experimental setting.
+
+ * "git repack" learns to send cruft objects out of the way into
+ packfiles outside the repository.
+
+Performance, Internal Implementation, Development Support etc.
+--------------------------------------------------------------
+
+ * With a bit of header twiddling, use the native regexp library on
+ macOS instead of the compat/ one.
+
+ * Prepare for GNU [ef]grep that throw warning of their uses.
+
+ * Sources related to fuzz testing have been moved down to their own
+ directory.
+
+ * Most credential helpers ignored unknown entries in a credential
+ description, but a few died upon seeing them. The latter were
+ taught to ignore them, too
+
+ * "scalar unregister" in a repository that is already been
+ unregistered reported an error.
+
+ * Remove error detection from a function that fetches from promisor
+ remotes, and make it die when such a fetch fails to bring all the
+ requested objects, to give an early failure to various operations.
+
+ * Update CodingGuidelines to clarify what features to use and avoid
+ in C99.
+
+ * Avoid false-positive from LSan whose assumption may be broken with
+ higher optimization levels.
+
+ * Enable address and undefined sanitizer tasks at GitHub Actions CI.
+
+ * More UNUSED annotation to help using -Wunused option with the
+ compiler.
+ (merge 4b992f0a24 jk/unused-anno-more later to maint).
+
+ * Rewrite a deep recursion in the skipping negotiator to use a loop
+ with on-heap prio queue to avoid stack wastage.
+
+ * Add documentation for message IDs in fsck error messages.
+
+ * Define the logical elements of a "bundle list", data structure to
+ store them in-core, format to transfer them, and code to parse
+ them.
+
+ * The role the security mailing list plays in an embargoed release
+ has been documented.
+
+ * Two new facilities, "timer" and "counter", are introduced to the
+ trace2 API.
+
+ * Code simplification by using strvec_pushf() instead of building an
+ argument in a separate strbuf.
+
+ * Make sure generated dependency file is stably sorted to help
+ developers debugging their build issues.
+
+ * The glossary entries for "commit-graph file" and "reachability
+ bitmap" have been added.
+
+ * Various tests exercising the transfer.credentialsInUrl
+ configuration are taught to avoid making requests which require
+ resolving localhost to reduce CI-flakiness.
+
+ * A redundant diagnostic message is dropped from test_path_is_missing().
+
+ * Simplify the run-command API.
+
+ * Update the actions/github-script dependency in CI to avoid a
+ deprecation warning.
+
+ * Progress on being able to initialize a rev_info struct with a
+ macro.
+
+ * Add trace2 counters to the region to clear skip worktree bits in a
+ sparse checkout.
+
+ * Modernize test script to avoid "test -f" and friends.
+
+ * Avoid calling 'cache_tree_update()' when doing so would be
+ redundant.
+
+ * Update the credential-cache documentation to provide a more
+ realistic example.
+
+ * Makefile comments updates and reordering to clarify knobs used to
+ choose SHA implementations.
+
+ * A design document for sparse-checkout's future directions has been
+ added.
+
+Fixes since v2.38
+-----------------
+
+ * The codepath that reads from the index v4 had unaligned memory
+ accesses, which has been corrected.
+
+ * Fix messages incorrectly marked for translation.
+
+ * "git fsck" failed to release contents of tree objects already used
+ from the memory, which has been fixed.
+
+ * "git clone" did not like to see the "--bare" and the "--origin"
+ options used together without a good reason.
+
+ * "git remote rename" failed to rename a remote without fetch
+ refspec, which has been corrected.
+
+ * Documentation on various Boolean GIT_* environment variables have
+ been clarified.
+
+ * "git rebase -i" can mistakenly attempt to apply a fixup to a commit
+ itself, which has been corrected.
+
+ * "git multi-pack-index repack/expire" used to repack unreachable
+ cruft into a new pack, which have been corrected.
+
+ * In read-only repositories, "git merge-tree" tried to come up with a
+ merge result tree object, which it failed (which is not wrong) and
+ led to a segfault (which is bad), which has been corrected.
+
+ * Force C locale while running tests around httpd to make sure we can
+ find expected error messages in the log.
+
+ * Fix a logic in "mailinfo -b" that miscomputed the length of a
+ substring, which lead to an out-of-bounds access.
+
+ * The codepath to sign learned to report errors when it fails to read
+ from "ssh-keygen".
+
+ * Code clean-up that results in plugging a leak.
+
+ * "GIT_EDITOR=: git branch --edit-description" resulted in failure,
+ which has been corrected.
+
+ * The code to clean temporary object directories (used for
+ quarantine) tried to remove them inside its signal handler, which
+ was a no-no.
+
+ * Update comment in the Makefile about the RUNTIME_PREFIX config knob.
+
+ * Clarify that "the sentence after <area>: prefix does not begin with
+ a capital letter" rule applies only to the commit title.
+
+ * "git branch --edit-description" on an unborh branch misleadingly
+ said that no such branch exists, which has been corrected.
+
+ * Work around older clang that warns against C99 zero initialization
+ syntax for struct.
+
+ * Giving "--invert-grep" and "--all-match" without "--grep" to the
+ "git log" command resulted in an attempt to access grep pattern
+ expression structure that has not been allocated, which has been
+ corrected.
+ (merge db84376f98 ab/grep-simplify-extended-expression later to maint).
+
+ * "git diff rev^!" did not show combined diff to go to the rev from
+ its parents.
+ (merge a79c6b6081 rs/diff-caret-bang-with-parents later to maint).
+
+ * Allow configuration files in "protected" scopes to include other
+ configuration files.
+ (merge ecec57b3c9 gc/bare-repo-discovery later to maint).
+
+ * Give a bit more diversity to macOS CI by using sha1dc in one of the
+ jobs (the other one tests Apple Common Crypto).
+ (merge 1ad5c3df35 jc/ci-osx-with-sha1dc later to maint).
+
+ * A bugfix with tracing support in midx codepath
+ (merge e9c3839944 tb/midx-bitmap-selection-fix later to maint).
+
+ * When geometric repacking feature is in use together with the
+ --pack-kept-objects option, we lost packs marked with .keep files.
+ (merge 197443e80a tb/save-keep-pack-during-geometric-repack later to maint).
+
+ * Move a global variable added as a hack during regression fixes to
+ its proper place in the API.
+ (merge 0b0ab95f17 ab/run-hook-api-cleanup later to maint).
+
+ * Update to build procedure with VS using CMake/CTest.
+ (merge c858750b41 js/cmake-updates later to maint).
+
+ * The short-help text shown by "git cmd -h" and the synopsis text
+ shown at the beginning of "git help cmd" have been made more
+ consistent.
+
+ * When creating a multi-pack bitmap, remove per-pack bitmap files
+ unconditionally as they will never be consulted.
+ (merge 55d902cd61 tb/remove-unused-pack-bitmap later to maint).
+
+ * Fix a longstanding syntax error in Git.pm error codepath.
+
+ * "git diff --stat" etc. were invented back when everything was ASCII
+ and strlen() was a way to measure the display width of a string;
+ adjust them to compute the display width assuming UTF-8 pathnames.
+ (merge ce8529b2bb tb/diffstat-with-utf8-strwidth later to maint).
+
+ * "git branch --edit-description" can exit with status -1 which is
+ not a good practice; it learned to use 1 as everybody else instead.
+
+ * "git apply" limits its input to a bit less than 1 GiB.
+
+ * Merging a branch with directory renames into a branch that changes
+ the directory to a symlink was mishandled by the ort merge
+ strategy, which has been corrected.
+
+ * A bugfix to "git subtree" in its split and merge features.
+
+ * Fix some bugs in the reflog messages when rebasing and changes the
+ reflog messages of "rebase --apply" to match "rebase --merge" with
+ the aim of making the reflog easier to parse.
+
+ * "git rebase --keep-base" used to discard the commits that are
+ already cherry-picked to the upstream, even when "keep-base" meant
+ that the base, on top of which the history is being rebuilt, does
+ not yet include these cherry-picked commits. The --keep-base
+ option now implies --reapply-cherry-picks and --no-fork-point
+ options.
+
+ * The way "git repack" creared temporary files when it received a
+ signal was prone to deadlocking, which has been corrected.
+
+ * Various tests exercising the transfer.credentialsInUrl
+ configuration are taught to avoid making requests which require
+ resolving localhost to reduce CI-flakiness.
+
+ * The adjust_shared_perm() helper function learned to refrain from
+ setting the "g+s" bit on directories when it is not necessary.
+
+ * "git archive" mistakenly complained twice about a missing
+ executable, which has been corrected.
+
+ * Fix a bug where `git branch -d` did not work on an orphaned HEAD.
+
+ * `git rebase --update-refs` would delete references when all
+ `update-ref` commands in the sequencer were removed, which has been
+ corrected.
+
+ * Other code cleanup, docfix, build fix, etc.
+ (merge 413bc6d20a ds/cmd-main-reorder later to maint).
+ (merge 8d2863e4ed nw/t1002-cleanup later to maint).
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 5bd795e5db..927f7329a5 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -153,7 +153,9 @@ files you are modifying to see the current conventions.
[[summary-section]]
The title sentence after the "area:" prefix omits the full stop at the
-end, and its first word is not capitalized unless there is a reason to
+end, and its first word is not capitalized (the omission
+of capitalization applies only to the word after the "area:"
+prefix of the title) unless there is a reason to
capitalize it other than because it is the first word in the sentence.
E.g. "doc: clarify...", not "doc: Clarify...", or "githooks.txt:
improve...", not "githooks.txt: Improve...". But "refs: HEAD is also
diff --git a/Documentation/build-docdep.perl b/Documentation/build-docdep.perl
index ba4205e030..1b3ac8fdd9 100755
--- a/Documentation/build-docdep.perl
+++ b/Documentation/build-docdep.perl
@@ -38,9 +38,10 @@ while ($changed) {
}
}
-while (my ($text, $included) = each %include) {
+foreach my $text (sort keys %include) {
+ my $included = $include{$text};
if (! exists $included{$text} &&
(my $base = $text) =~ s/\.txt$//) {
- print "$base.html $base.xml : ", join(" ", keys %$included), "\n";
+ print "$base.html $base.xml : ", join(" ", sort keys %$included), "\n";
}
}
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 5b5b976569..e480f99c3e 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -387,6 +387,8 @@ include::config/branch.txt[]
include::config/browser.txt[]
+include::config/bundle.txt[]
+
include::config/checkout.txt[]
include::config/clean.txt[]
@@ -423,6 +425,8 @@ include::config/filter.txt[]
include::config/fsck.txt[]
+include::config/fsmonitor--daemon.txt[]
+
include::config/gc.txt[]
include::config/gitcvs.txt[]
@@ -489,6 +493,8 @@ include::config/rebase.txt[]
include::config/receive.txt[]
+include::config/refs.txt[]
+
include::config/remote.txt[]
include::config/remotes.txt[]
diff --git a/Documentation/config/bundle.txt b/Documentation/config/bundle.txt
new file mode 100644
index 0000000000..daa21eb674
--- /dev/null
+++ b/Documentation/config/bundle.txt
@@ -0,0 +1,24 @@
+bundle.*::
+ The `bundle.*` keys may appear in a bundle list file found via the
+ `git clone --bundle-uri` option. These keys currently have no effect
+ if placed in a repository config file, though this will change in the
+ future. See link:technical/bundle-uri.html[the bundle URI design
+ document] for more details.
+
+bundle.version::
+ This integer value advertises the version of the bundle list format
+ used by the bundle list. Currently, the only accepted value is `1`.
+
+bundle.mode::
+ This string value should be either `all` or `any`. This value describes
+ whether all of the advertised bundles are required to unbundle a
+ complete understanding of the bundled information (`all`) or if any one
+ of the listed bundle URIs is sufficient (`any`).
+
+bundle.<id>.*::
+ The `bundle.<id>.*` keys are used to describe a single item in the
+ bundle list, grouped under `<id>` for identification purposes.
+
+bundle.<id>.uri::
+ This string value defines the URI by which Git can reach the contents
+ of this `<id>`. This URI may be a bundle file or another bundle list.
diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index 37afbaf5a4..dfbdaf00b8 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -618,7 +618,7 @@ but risks losing recent work in the event of an unclean system shutdown.
* `loose-object` hardens objects added to the repo in loose-object form.
* `pack` hardens objects added to the repo in packfile form.
* `pack-metadata` hardens packfile bitmaps and indexes.
-* `commit-graph` hardens the commit graph file.
+* `commit-graph` hardens the commit-graph file.
* `index` hardens the index when it is modified.
* `objects` is an aggregate option that is equivalent to
`loose-object,pack`.
diff --git a/Documentation/config/extensions.txt b/Documentation/config/extensions.txt
index bccaec7a96..05abb821e0 100644
--- a/Documentation/config/extensions.txt
+++ b/Documentation/config/extensions.txt
@@ -7,6 +7,69 @@ Note that this setting should only be set by linkgit:git-init[1] or
linkgit:git-clone[1]. Trying to change it after initialization will not
work and will produce hard-to-diagnose issues.
+extensions.refFormat::
+ Specify the reference storage mechanisms used by the repoitory as a
+ multi-valued list. The acceptable values are `files` and `packed`.
+ If not specified, the list of `files` and `packed` is assumed. It
+ is an error to specify this key unless `core.repositoryFormatVersion`
+ is 1.
++
+As new ref formats are added, Git commands may modify this list before and
+after upgrading the on-disk reference storage files. The specific values
+indicate the existence of different layers:
++
+--
+`files`;;
+ When present, references may be stored as "loose" reference files
+ in the `$GIT_DIR/refs/` directory. The name of the reference
+ corresponds to the filename after `$GIT_DIR` and the file contains
+ an object ID as a hexadecimal string. If a loose reference file
+ exists, then its value takes precedence over all other formats.
+
+`packed`;;
+ When present, references may be stored as a group in a
+ `packed-refs` file in its version 1 format. When grouped with
+ `"files"` or provided on its own, this file is located at
+ `$GIT_DIR/packed-refs`. This file contains a list of distinct
+ reference names, paired with their object IDs. When combined with
+ `files`, the `packed` format will only be used to group multiple
+ loose object files upon request via the `git pack-refs` command or
+ via the `pack-refs` maintenance task.
+
+`packed-v2`;;
+ When present, references may be stored as a group in a
+ `packed-refs` file in its version 2 format. This file is in the
+ same position and interacts with loose refs the same as when the
+ `packed` value exists. Both `packed` and `packed-v2` must exist to
+ upgrade an existing `packed-refs` file from version 1 to version 2
+ or to downgrade from version 2 to version 1. When both are
+ present, the `refs.packedRefsVersion` config value indicates which
+ file format version is used during writes, but both versions are
+ understood when reading the file.
+--
++
+The following combinations are supported by this version of Git:
++
+--
+`files` and (`packed` and/or `packed-v2`);;
+ This set of values indicates that references are stored both as
+ loose reference files and in the `packed-refs` file. Loose
+ references are preferred, and the `packed-refs` file is updated
+ only when deleting a reference that is stored in the `packed-refs`
+ file or during a `git pack-refs` command.
++
+The presence of `packed` and `packed-v2` specifies whether the `packed-refs`
+file is allowed to be in its v1 or v2 formats, respectively. When only one
+is present, Git will refuse to read the `packed-refs` file that do not
+match the expected format. When both are present, the `refs.packedRefsVersion`
+config option indicates which file format is used during writes.
+
+`files`;;
+ When only this value is present, Git will ignore the `packed-refs`
+ file and refuse to write one during `git pack-refs`. All references
+ will be read from and written to loose reference files.
+--
+
extensions.worktreeConfig::
If enabled, then worktrees will load config settings from the
`$GIT_DIR/config.worktree` file in addition to the
@@ -21,10 +84,15 @@ When enabling `extensions.worktreeConfig`, you must be careful to move
certain values from the common config file to the main working tree's
`config.worktree` file, if present:
+
-* `core.worktree` must be moved from `$GIT_COMMON_DIR/config` to
- `$GIT_COMMON_DIR/config.worktree`.
-* If `core.bare` is true, then it must be moved from `$GIT_COMMON_DIR/config`
- to `$GIT_COMMON_DIR/config.worktree`.
+--
+`core.worktree`;;
+ This config value must be moved from `$GIT_COMMON_DIR/config` to
+ `$GIT_COMMON_DIR/config.worktree`.
+
+`core.bare`;;
+ If true, then this value must be moved from
+ `$GIT_COMMON_DIR/config` to `$GIT_COMMON_DIR/config.worktree`.
+--
+
It may also be beneficial to adjust the locations of `core.sparseCheckout`
and `core.sparseCheckoutCone` depending on your desire for customizable
diff --git a/Documentation/config/feature.txt b/Documentation/config/feature.txt
index cdecd04e5b..95975e5091 100644
--- a/Documentation/config/feature.txt
+++ b/Documentation/config/feature.txt
@@ -14,6 +14,9 @@ feature.experimental::
+
* `fetch.negotiationAlgorithm=skipping` may improve fetch negotiation times by
skipping more commits at a time, reducing the number of round trips.
++
+* `gc.cruftPacks=true` reduces disk space used by unreachable objects during
+garbage collection, preventing loose object explosions.
feature.manyFiles::
Enable config options that optimize for repos with many files in the
diff --git a/Documentation/config/fsck.txt b/Documentation/config/fsck.txt
index 450e8c38e3..a3c865df56 100644
--- a/Documentation/config/fsck.txt
+++ b/Documentation/config/fsck.txt
@@ -35,6 +35,10 @@ allow new instances of the same breakages go unnoticed.
Setting an unknown `fsck.<msg-id>` value will cause fsck to die, but
doing the same for `receive.fsck.<msg-id>` and `fetch.fsck.<msg-id>`
will only cause git to warn.
++
+See `Fsck Messages` section of linkgit:git-fsck[1] for supported
+values of `<msg-id>`.
+
fsck.skipList::
The path to a list of object names (i.e. one unabbreviated SHA-1 per
diff --git a/Documentation/config/fsmonitor--daemon.txt b/Documentation/config/fsmonitor--daemon.txt
new file mode 100644
index 0000000000..c225c6c9e7
--- /dev/null
+++ b/Documentation/config/fsmonitor--daemon.txt
@@ -0,0 +1,11 @@
+fsmonitor.allowRemote::
+ By default, the fsmonitor daemon refuses to work against network-mounted
+ repositories. Setting `fsmonitor.allowRemote` to `true` overrides this
+ behavior. Only respected when `core.fsmonitor` is set to `true`.
+
+fsmonitor.socketDir::
+ This Mac OS-specific option, if set, specifies the directory in
+ which to create the Unix domain socket used for communication
+ between the fsmonitor daemon and various Git commands. The directory must
+ reside on a native Mac OS filesystem. Only respected when `core.fsmonitor`
+ is set to `true`.
diff --git a/Documentation/config/index.txt b/Documentation/config/index.txt
index 75f3a2d105..709ba72f62 100644
--- a/Documentation/config/index.txt
+++ b/Documentation/config/index.txt
@@ -30,3 +30,11 @@ index.version::
Specify the version with which new index files should be
initialized. This does not affect existing repositories.
If `feature.manyFiles` is enabled, then the default is 4.
+
+index.computeHash::
+ When enabled, compute the hash of the index file as it is written
+ and store the hash at the end of the content. This is enabled by
+ default.
++
+If you disable `index.computHash`, then older Git clients may report that
+your index is corrupt during `git fsck`.
diff --git a/Documentation/config/log.txt b/Documentation/config/log.txt
index bc63bc3939..5f96cf87fb 100644
--- a/Documentation/config/log.txt
+++ b/Documentation/config/log.txt
@@ -34,9 +34,9 @@ log.excludeDecoration::
option.
log.diffMerges::
- Set default diff format to be used for merge commits. See
- `--diff-merges` in linkgit:git-log[1] for details.
- Defaults to `separate`.
+ Set diff format to be used when `--diff-merges=on` is
+ specified, see `--diff-merges` in linkgit:git-log[1] for
+ details. Defaults to `separate`.
log.follow::
If `true`, `git log` will act as if the `--follow` option was used when
diff --git a/Documentation/config/mergetool.txt b/Documentation/config/mergetool.txt
index 90b3809700..e779a122d8 100644
--- a/Documentation/config/mergetool.txt
+++ b/Documentation/config/mergetool.txt
@@ -59,7 +59,7 @@ mergetool.hideResolved::
possible and write the 'MERGED' file containing conflict markers around
any conflicts that it cannot resolve; 'LOCAL' and 'REMOTE' normally
represent the versions of the file from before Git's conflict
- resolution. This flag causes 'LOCAL' and 'REMOTE' to be overwriten so
+ resolution. This flag causes 'LOCAL' and 'REMOTE' to be overwritten so
that only the unresolved conflicts are presented to the merge tool. Can
be configured per-tool via the `mergetool.<tool>.hideResolved`
configuration variable. Defaults to `false`.
diff --git a/Documentation/config/push.txt b/Documentation/config/push.txt
index 7386fea225..43338b65e8 100644
--- a/Documentation/config/push.txt
+++ b/Documentation/config/push.txt
@@ -110,18 +110,8 @@ This will result in only b (a and c are cleared).
----
push.recurseSubmodules::
- Make sure all submodule commits used by the revisions to be pushed
- are available on a remote-tracking branch. If the value is 'check'
- then Git will verify that all submodule commits that changed in the
- revisions to be pushed are available on at least one remote of the
- submodule. If any commits are missing, the push will be aborted and
- exit with non-zero status. If the value is 'on-demand' then all
- submodules that changed in the revisions to be pushed will be
- pushed. If on-demand was not able to push all necessary revisions
- it will also be aborted and exit with non-zero status. If the value
- is 'no' then default behavior of ignoring submodules when pushing
- is retained. You may override this configuration at time of push by
- specifying '--recurse-submodules=check|on-demand|no'.
+ May be "check", "on-demand", "only", or "no", with the same behavior
+ as that of "push --recurse-submodules".
If not set, 'no' is used by default, unless 'submodule.recurse' is
set (in which case a 'true' value means 'on-demand').
diff --git a/Documentation/config/refs.txt b/Documentation/config/refs.txt
new file mode 100644
index 0000000000..b2fdb2923f
--- /dev/null
+++ b/Documentation/config/refs.txt
@@ -0,0 +1,13 @@
+refs.packedRefsVersion::
+ Specifies the file format version to use when writing a `packed-refs`
+ file. Defaults to `1`.
++
+The only other value currently allowed is `2`, which uses a structured file
+format to result in a smaller `packed-refs` file. In order to write this
+file format version, the repository must also have the `packed-v2` extension
+enabled. The most typical setup will include the
+`core.repositoryFormatVersion=1` config value and the `extensions.refFormat`
+key will have three values: `files`, `packed`, and `packed-v2`.
++
+If `extensions.refFormat` has the value `packed-v2` and not `packed`, then
+`refs.packedRefsVersion` defaults to `2`.
diff --git a/Documentation/config/submodule.txt b/Documentation/config/submodule.txt
index 6490527b45..1144a5ad74 100644
--- a/Documentation/config/submodule.txt
+++ b/Documentation/config/submodule.txt
@@ -93,6 +93,18 @@ submodule.fetchJobs::
in parallel. A value of 0 will give some reasonable default.
If unset, it defaults to 1.
+submodule.diffJobs::
+ Specifies how many submodules are diffed at the same time. A
+ positive integer allows up to that number of submodules diffed
+ in parallel. A value of 0 will give the number of logical cores.
+ If unset, it defaults to 1. The diff operation is used by many
+ other git commands such as add, merge, diff, status, stash and
+ more. Note that the expensive part of the diff operation is
+ reading the index from cache or memory. Therefore multiple jobs
+ may be detrimental to performance if your hardware does not
+ support parallel reads or if the number of jobs greatly exceeds
+ the amount of supported reads.
+
submodule.alternateLocation::
Specifies how the submodules obtain alternates when submodules are
cloned. Possible values are `no`, `superproject`.
diff --git a/Documentation/fsck-msgids.txt b/Documentation/fsck-msgids.txt
new file mode 100644
index 0000000000..7af76ff99a
--- /dev/null
+++ b/Documentation/fsck-msgids.txt
@@ -0,0 +1,161 @@
+`badDate`::
+ (ERROR) Invalid date format in an author/committer line.
+
+`badDateOverflow`::
+ (ERROR) Invalid date value in an author/committer line.
+
+`badEmail`::
+ (ERROR) Invalid email format in an author/committer line.
+
+`badFilemode`::
+ (INFO) A tree contains a bad filemode entry.
+
+`badName`::
+ (ERROR) An author/committer name is empty.
+
+`badObjectSha1`::
+ (ERROR) An object has a bad sha1.
+
+`badParentSha1`::
+ (ERROR) A commit object has a bad parent sha1.
+
+`badTagName`::
+ (INFO) A tag has an invalid format.
+
+`badTimezone`::
+ (ERROR) Found an invalid time zone in an author/committer line.
+
+`badTree`::
+ (ERROR) A tree cannot be parsed.
+
+`badTreeSha1`::
+ (ERROR) A tree has an invalid format.
+
+`badType`::
+ (ERROR) Found an invalid object type.
+
+`duplicateEntries`::
+ (ERROR) A tree contains duplicate file entries.
+
+`emptyName`::
+ (WARN) A path contains an empty name.
+
+`extraHeaderEntry`::
+ (IGNORE) Extra headers found after `tagger`.
+
+`fullPathname`::
+ (WARN) A path contains the full path starting with "/".
+
+`gitattributesSymlink`::
+ (INFO) `.gitattributes` is a symlink.
+
+`gitignoreSymlink`::
+ (INFO) `.gitignore` is a symlink.
+
+`gitmodulesBlob`::
+ (ERROR) A non-blob found at `.gitmodules`.
+
+`gitmodulesLarge`::
+ (ERROR) The `.gitmodules` file is too large to parse.
+
+`gitmodulesMissing`::
+ (ERROR) Unable to read `.gitmodules` blob.
+
+`gitmodulesName`::
+ (ERROR) A submodule name is invalid.
+
+`gitmodulesParse`::
+ (INFO) Could not parse `.gitmodules` blob.
+
+`gitmodulesLarge`;
+ (ERROR) `.gitmodules` blob is too large to parse.
+
+`gitmodulesPath`::
+ (ERROR) `.gitmodules` path is invalid.
+
+`gitmodulesSymlink`::
+ (ERROR) `.gitmodules` is a symlink.
+
+`gitmodulesUpdate`::
+ (ERROR) Found an invalid submodule update setting.
+
+`gitmodulesUrl`::
+ (ERROR) Found an invalid submodule url.
+
+`hasDot`::
+ (WARN) A tree contains an entry named `.`.
+
+`hasDotdot`::
+ (WARN) A tree contains an entry named `..`.
+
+`hasDotgit`::
+ (WARN) A tree contains an entry named `.git`.
+
+`mailmapSymlink`::
+ (INFO) `.mailmap` is a symlink.
+
+`missingAuthor`::
+ (ERROR) Author is missing.
+
+`missingCommitter`::
+ (ERROR) Committer is missing.
+
+`missingEmail`::
+ (ERROR) Email is missing in an author/committer line.
+
+`missingNameBeforeEmail`::
+ (ERROR) Missing name before an email in an author/committer line.
+
+`missingObject`::
+ (ERROR) Missing `object` line in tag object.
+
+`missingSpaceBeforeDate`::
+ (ERROR) Missing space before date in an author/committer line.
+
+`missingSpaceBeforeEmail`::
+ (ERROR) Missing space before the email in author/committer line.
+
+`missingTag`::
+ (ERROR) Unexpected end after `type` line in a tag object.
+
+`missingTagEntry`::
+ (ERROR) Missing `tag` line in a tag object.
+
+`missingTaggerEntry`::
+ (INFO) Missing `tagger` line in a tag object.
+
+`missingTree`::
+ (ERROR) Missing `tree` line in a commit object.
+
+`missingType`::
+ (ERROR) Invalid type value on the `type` line in a tag object.
+
+`missingTypeEntry`::
+ (ERROR) Missing `type` line in a tag object.
+
+`multipleAuthors`::
+ (ERROR) Multiple author lines found in a commit.
+
+`nulInCommit`::
+ (WARN) Found a NUL byte in the commit object body.
+
+`nulInHeader`::
+ (FATAL) NUL byte exists in the object header.
+
+`nullSha1`::
+ (WARN) Tree contains entries pointing to a null sha1.
+
+`treeNotSorted`::
+ (ERROR) A tree is not properly sorted.
+
+`unknownType`::
+ (ERROR) Found an unknown object type.
+
+`unterminatedHeader`::
+ (FATAL) Missing end-of-line in the object header.
+
+`zeroPaddedDate`::
+ (ERROR) Found a zero padded date in an author/commiter line.
+
+`zeroPaddedFilemode`::
+ (WARN) Found a zero padded filemode in a tree.
diff --git a/Documentation/git-annotate.txt b/Documentation/git-annotate.txt
index e44a831339..5ae8aabe0f 100644
--- a/Documentation/git-annotate.txt
+++ b/Documentation/git-annotate.txt
@@ -8,7 +8,7 @@ git-annotate - Annotate file lines with commit information
SYNOPSIS
--------
[verse]
-'git annotate' [<options>] <file> [<revision>]
+'git annotate' [<options>] [<rev-opts>] [<rev>] [--] <file>
DESCRIPTION
-----------
diff --git a/Documentation/git-clean.txt b/Documentation/git-clean.txt
index 91742633fa..160d08b86b 100644
--- a/Documentation/git-clean.txt
+++ b/Documentation/git-clean.txt
@@ -8,7 +8,7 @@ git-clean - Remove untracked files from the working tree
SYNOPSIS
--------
[verse]
-'git clean' [-d] [-f] [-i] [-n] [-q] [-e <pattern>] [-x | -X] [--] <path>...
+'git clean' [-d] [-f] [-i] [-n] [-q] [-e <pattern>] [-x | -X] [--] [<pathspec>...]
DESCRIPTION
-----------
@@ -20,16 +20,16 @@ Normally, only files unknown to Git are removed, but if the `-x`
option is specified, ignored files are also removed. This can, for
example, be useful to remove all build products.
-If any optional `<path>...` arguments are given, only those paths
-are affected.
+If any optional `<pathspec>...` arguments are given, only those paths
+that match the pathspec are affected.
OPTIONS
-------
-d::
- Normally, when no <path> is specified, git clean will not
+ Normally, when no <pathspec> is specified, git clean will not
recurse into untracked directories to avoid removing too much.
Specify -d to have it recurse into such directories as well.
- If any paths are specified, -d is irrelevant; all untracked
+ If a <pathspec> is specified, -d is irrelevant; all untracked
files matching the specified paths (with exceptions for nested
git directories mentioned under `--force`) will be removed.
diff --git a/Documentation/git-clone.txt b/Documentation/git-clone.txt
index d6434d262d..6a4e5d31b4 100644
--- a/Documentation/git-clone.txt
+++ b/Documentation/git-clone.txt
@@ -16,7 +16,7 @@ SYNOPSIS
[--depth <depth>] [--[no-]single-branch] [--no-tags]
[--recurse-submodules[=<pathspec>]] [--[no-]shallow-submodules]
[--[no-]remote-submodules] [--jobs <n>] [--sparse] [--[no-]reject-shallow]
- [--filter=<filter> [--also-filter-submodules]] [--] <repository>
+ [--filter=<filter> [--also-filter-submodules] [--detach]] [--] <repository>
[<directory>]
DESCRIPTION
@@ -210,6 +210,12 @@ objects from the source repository into a pack in the cloned repository.
`--branch` can also take tags and detaches the HEAD at that commit
in the resulting repository.
+--detach::
+ If the cloned repository's HEAD points to a branch, point the newly
+ created HEAD to the branch's commit instead of the branch itself.
+ Additionally, in a non-bare repository, the corresponding local branch
+ will not be created.
+
-u <upload-pack>::
--upload-pack <upload-pack>::
When given, and the repository to clone from is accessed
diff --git a/Documentation/git-commit-graph.txt b/Documentation/git-commit-graph.txt
index 36fe56c2c7..c8dbceba01 100644
--- a/Documentation/git-commit-graph.txt
+++ b/Documentation/git-commit-graph.txt
@@ -10,7 +10,10 @@ SYNOPSIS
--------
[verse]
'git commit-graph verify' [--object-dir <dir>] [--shallow] [--[no-]progress]
-'git commit-graph write' <options> [--object-dir <dir>] [--[no-]progress]
+'git commit-graph write' [--object-dir <dir>] [--append]
+ [--split[=<strategy>]] [--reachable | --stdin-packs | --stdin-commits]
+ [--changed-paths] [--[no-]max-new-filters <n>] [--[no-]progress]
+ <split options>
DESCRIPTION
diff --git a/Documentation/git-credential-cache--daemon.txt b/Documentation/git-credential-cache--daemon.txt
index 01e1c214dd..650a15a7ed 100644
--- a/Documentation/git-credential-cache--daemon.txt
+++ b/Documentation/git-credential-cache--daemon.txt
@@ -8,7 +8,7 @@ git-credential-cache--daemon - Temporarily store user credentials in memory
SYNOPSIS
--------
[verse]
-'git credential-cache{litdd}daemon' [--debug] <socket>
+'git credential-cache{litdd}daemon' [--debug] <socket-path>
DESCRIPTION
-----------
@@ -16,7 +16,7 @@ DESCRIPTION
NOTE: You probably don't want to invoke this command yourself; it is
started automatically when you use linkgit:git-credential-cache[1].
-This command listens on the Unix domain socket specified by `<socket>`
+This command listens on the Unix domain socket specified by `<socket-path>`
for `git-credential-cache` clients. Clients may store and retrieve
credentials. Each credential is held for a timeout specified by the
client; once no credentials are held, the daemon exits.
diff --git a/Documentation/git-credential-cache.txt b/Documentation/git-credential-cache.txt
index 0216c18ef8..432e159d95 100644
--- a/Documentation/git-credential-cache.txt
+++ b/Documentation/git-credential-cache.txt
@@ -69,10 +69,10 @@ $ git push http://example.com/repo.git
------------------------------------
You can provide options via the credential.helper configuration
-variable (this example drops the cache time to 5 minutes):
+variable (this example increases the cache time to 1 hour):
-------------------------------------------------------
-$ git config credential.helper 'cache --timeout=300'
+$ git config credential.helper 'cache --timeout=3600'
-------------------------------------------------------
GIT
diff --git a/Documentation/git-credential.txt b/Documentation/git-credential.txt
index f18673017f..ac2818b9f6 100644
--- a/Documentation/git-credential.txt
+++ b/Documentation/git-credential.txt
@@ -160,6 +160,8 @@ empty string.
Components which are missing from the URL (e.g., there is no
username in the example above) will be left unset.
+Unrecognised attributes are silently discarded.
+
GIT
---
Part of the linkgit:git[1] suite
diff --git a/Documentation/git-diff-files.txt b/Documentation/git-diff-files.txt
index bf1febb9ae..591e3801b7 100644
--- a/Documentation/git-diff-files.txt
+++ b/Documentation/git-diff-files.txt
@@ -9,7 +9,7 @@ git-diff-files - Compares files in the working tree and the index
SYNOPSIS
--------
[verse]
-'git diff-files' [-q] [-0|-1|-2|-3|-c|--cc] [<common-diff-options>] [<path>...]
+'git diff-files' [-q] [-0 | -1 | -2 | -3 | -c | --cc] [<common-diff-options>] [<path>...]
DESCRIPTION
-----------
diff --git a/Documentation/git-diff.txt b/Documentation/git-diff.txt
index 85ae6d6d08..52b679256c 100644
--- a/Documentation/git-diff.txt
+++ b/Documentation/git-diff.txt
@@ -79,10 +79,10 @@ If --merge-base is given, use the merge base of the two commits for the
This form is to view the results of a merge commit. The first
listed <commit> must be the merge itself; the remaining two or
- more commits should be its parents. A convenient way to produce
- the desired set of revisions is to use the `^@` suffix.
- For instance, if `master` names a merge commit, `git diff master
- master^@` gives the same combined diff as `git show master`.
+ more commits should be its parents. Convenient ways to produce
+ the desired set of revisions are to use the suffixes `^@` and
+ `^!`. If A is a merge commit, then `git diff A A^@`,
+ `git diff A^!` and `git show A` all give the same combined diff.
'git diff' [<options>] <commit>..<commit> [--] [<path>...]::
diff --git a/Documentation/git-fast-export.txt b/Documentation/git-fast-export.txt
index 1978dbdc6a..4643ddbe68 100644
--- a/Documentation/git-fast-export.txt
+++ b/Documentation/git-fast-export.txt
@@ -9,7 +9,7 @@ git-fast-export - Git data exporter
SYNOPSIS
--------
[verse]
-'git fast-export [<options>]' | 'git fast-import'
+'git fast-export' [<options>] | 'git fast-import'
DESCRIPTION
-----------
diff --git a/Documentation/git-fsck.txt b/Documentation/git-fsck.txt
index 29318ea957..b6a0f8a085 100644
--- a/Documentation/git-fsck.txt
+++ b/Documentation/git-fsck.txt
@@ -152,6 +152,18 @@ hash mismatch <object>::
object database value.
This indicates a serious data integrity problem.
+
+FSCK MESSAGES
+-------------
+
+The following lists the types of errors `git fsck` detects and what
+each error means, with their default severity. The severity of the
+error, other than those that are marked as "(FATAL)", can be tweaked
+by setting the corresponding `fsck.<msg-id>` configuration variable.
+
+include::fsck-msgids.txt[]
+
+
Environment Variables
---------------------
diff --git a/Documentation/git-fsmonitor--daemon.txt b/Documentation/git-fsmonitor--daemon.txt
index cc142fb861..8238eadb0e 100644
--- a/Documentation/git-fsmonitor--daemon.txt
+++ b/Documentation/git-fsmonitor--daemon.txt
@@ -3,7 +3,7 @@ git-fsmonitor{litdd}daemon(1)
NAME
----
-git-fsmonitor--daemon - A Built-in File System Monitor
+git-fsmonitor--daemon - A Built-in Filesystem Monitor
SYNOPSIS
--------
@@ -17,7 +17,7 @@ DESCRIPTION
-----------
A daemon to watch the working directory for file and directory
-changes using platform-specific file system notification facilities.
+changes using platform-specific filesystem notification facilities.
This daemon communicates directly with commands like `git status`
using the link:technical/api-simple-ipc.html[simple IPC] interface
@@ -63,13 +63,44 @@ CAVEATS
-------
The fsmonitor daemon does not currently know about submodules and does
-not know to filter out file system events that happen within a
+not know to filter out filesystem events that happen within a
submodule. If fsmonitor daemon is watching a super repo and a file is
modified within the working directory of a submodule, it will report
the change (as happening against the super repo). However, the client
will properly ignore these extra events, so performance may be affected
but it will not cause an incorrect result.
+By default, the fsmonitor daemon refuses to work against network-mounted
+repositories; this may be overridden by setting `fsmonitor.allowRemote` to
+`true`. Note, however, that the fsmonitor daemon is not guaranteed to work
+correctly with all network-mounted repositories and such use is considered
+experimental.
+
+On Mac OS, the inter-process communication (IPC) between various Git
+commands and the fsmonitor daemon is done via a Unix domain socket (UDS) -- a
+special type of file -- which is supported by native Mac OS filesystems,
+but not on network-mounted filesystems, NTFS, or FAT32. Other filesystems
+may or may not have the needed support; the fsmonitor daemon is not guaranteed
+to work with these filesystems and such use is considered experimental.
+
+By default, the socket is created in the `.git` directory, however, if the
+`.git` directory is on a network-mounted filesystem, it will be instead be
+created at `$HOME/.git-fsmonitor-*` unless `$HOME` itself is on a
+network-mounted filesystem in which case you must set the configuration
+variable `fsmonitor.socketDir` to the path of a directory on a Mac OS native
+filesystem in which to create the socket file.
+
+If none of the above directories (`.git`, `$HOME`, or `fsmonitor.socketDir`)
+is on a native Mac OS file filesystem the fsmonitor daemon will report an
+error that will cause the daemon and the currently running command to exit.
+
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.txt[]
+
+include::config/fsmonitor--daemon.txt[]
+
GIT
---
Part of the linkgit:git[1] suite
diff --git a/Documentation/git-hash-object.txt b/Documentation/git-hash-object.txt
index df9e2c58bd..472b5bb995 100644
--- a/Documentation/git-hash-object.txt
+++ b/Documentation/git-hash-object.txt
@@ -9,7 +9,8 @@ git-hash-object - Compute object ID and optionally creates a blob from a file
SYNOPSIS
--------
[verse]
-'git hash-object' [-t <type>] [-w] [--path=<file>|--no-filters] [--stdin [--literally]] [--] <file>...
+'git hash-object' [-t <type>] [-w] [--path=<file> | --no-filters]
+ [--stdin [--literally]] [--] <file>...
'git hash-object' [-t <type>] [-w] --stdin-paths [--no-filters]
DESCRIPTION
diff --git a/Documentation/git-interpret-trailers.txt b/Documentation/git-interpret-trailers.txt
index 6d6197cd0a..22ff3a603e 100644
--- a/Documentation/git-interpret-trailers.txt
+++ b/Documentation/git-interpret-trailers.txt
@@ -8,8 +8,9 @@ git-interpret-trailers - Add or parse structured information in commit messages
SYNOPSIS
--------
[verse]
-'git interpret-trailers' [<options>] [(--trailer <token>[(=|:)<value>])...] [<file>...]
-'git interpret-trailers' [<options>] [--parse] [<file>...]
+'git interpret-trailers' [--in-place] [--trim-empty]
+ [(--trailer <token>[(=|:)<value>])...]
+ [--parse] [<file>...]
DESCRIPTION
-----------
diff --git a/Documentation/git-ls-files.txt b/Documentation/git-ls-files.txt
index d7986419c2..440043cdb8 100644
--- a/Documentation/git-ls-files.txt
+++ b/Documentation/git-ls-files.txt
@@ -10,8 +10,8 @@ SYNOPSIS
--------
[verse]
'git ls-files' [-z] [-t] [-v] [-f]
- [-c|--cached] [-d|--deleted] [-o|--others] [-i|--|ignored]
- [-s|--stage] [-u|--unmerged] [-k|--|killed] [-m|--modified]
+ [-c|--cached] [-d|--deleted] [-o|--others] [-i|--ignored]
+ [-s|--stage] [-u|--unmerged] [-k|--killed] [-m|--modified]
[--directory [--no-empty-directory]] [--eol]
[--deduplicate]
[-x <pattern>|--exclude=<pattern>]
diff --git a/Documentation/git-maintenance.txt b/Documentation/git-maintenance.txt
index 9c630efe19..805e5a2e3a 100644
--- a/Documentation/git-maintenance.txt
+++ b/Documentation/git-maintenance.txt
@@ -11,7 +11,7 @@ SYNOPSIS
[verse]
'git maintenance' run [<options>]
'git maintenance' start [--scheduler=<scheduler>]
-'git maintenance' (stop|register|unregister)
+'git maintenance' (stop|register|unregister) [<options>]
DESCRIPTION
@@ -50,13 +50,13 @@ stop::
the background maintenance is restarted later.
register::
- Initialize Git config values so any scheduled maintenance will
- start running on this repository. This adds the repository to the
- `maintenance.repo` config variable in the current user's global
- config and enables some recommended configuration values for
- `maintenance.<task>.schedule`. The tasks that are enabled are safe
- for running in the background without disrupting foreground
- processes.
+ Initialize Git config values so any scheduled maintenance will start
+ running on this repository. This adds the repository to the
+ `maintenance.repo` config variable in the current user's global config,
+ or the config specified by --config-file option, and enables some
+ recommended configuration values for `maintenance.<task>.schedule`. The
+ tasks that are enabled are safe for running in the background without
+ disrupting foreground processes.
+
The `register` subcommand will also set the `maintenance.strategy` config
value to `incremental`, if this value is not previously set. The
@@ -79,6 +79,10 @@ unregister::
Remove the current repository from background maintenance. This
only removes the repository from the configured list. It does not
stop the background maintenance processes from running.
++
+The `unregister` subcommand will report an error if the current repository
+is not already registered. Use the `--force` option to return success even
+when the current repository is not registered.
TASKS
-----
diff --git a/Documentation/git-merge-base.txt b/Documentation/git-merge-base.txt
index 2d944e0851..b01ba3d356 100644
--- a/Documentation/git-merge-base.txt
+++ b/Documentation/git-merge-base.txt
@@ -9,8 +9,8 @@ git-merge-base - Find as good common ancestors as possible for a merge
SYNOPSIS
--------
[verse]
-'git merge-base' [-a|--all] <commit> <commit>...
-'git merge-base' [-a|--all] --octopus <commit>...
+'git merge-base' [-a | --all] <commit> <commit>...
+'git merge-base' [-a | --all] --octopus <commit>...
'git merge-base' --is-ancestor <commit> <commit>
'git merge-base' --independent <commit>...
'git merge-base' --fork-point <ref> [<commit>]
diff --git a/Documentation/git-merge-tree.txt b/Documentation/git-merge-tree.txt
index d6c356740e..298c133fdb 100644
--- a/Documentation/git-merge-tree.txt
+++ b/Documentation/git-merge-tree.txt
@@ -64,6 +64,11 @@ OPTIONS
share no common history. This flag can be given to override that
check and make the merge proceed anyway.
+--merge-base=<commit>::
+ Instead of finding the merge-bases for <branch1> and <branch2>,
+ specify a merge-base for the merge. This option is incompatible
+ with `--stdin`.
+
[[OUTPUT]]
OUTPUT
------
@@ -81,6 +86,31 @@ Whereas for a conflicted merge, the output is by default of the form:
These are discussed individually below.
+However, there is an exception. If `--stdin` is passed, then there is
+an extra section at the beginning, a NUL character at the end, and then
+all the sections repeat for each line of input. Thus, if the first merge
+is conflicted and the second is clean, the output would be of the form:
+
+ <Merge status>
+ <OID of toplevel tree>
+ <Conflicted file info>
+ <Informational messages>
+ NUL
+ <Merge status>
+ <OID of toplevel tree>
+ NUL
+
+[[MS]]
+Merge status
+~~~~~~~~~~~~
+
+This is an integer status followed by a NUL character. The integer status is:
+
+ 0: merge had conflicts
+ 1: merge was clean
+ &lt;0: something prevented the merge from running (e.g. access to repository
+ objects denied by filesystem)
+
[[OIDTLT]]
OID of toplevel tree
~~~~~~~~~~~~~~~~~~~~
@@ -108,18 +138,50 @@ character instead of a newline character.
Informational messages
~~~~~~~~~~~~~~~~~~~~~~
-This always starts with a blank line (or NUL if `-z` is passed) to
-separate it from the previous sections, and then has free-form
-messages about the merge, such as:
+This section provides informational messages, typically about
+conflicts. The format of the section varies significantly depending
+on whether `-z` is passed.
+
+If `-z` is passed:
+
+The output format is zero or more conflict informational records, each
+of the form:
+
+ <list-of-paths><conflict-type>NUL<conflict-message>NUL
+
+where <list-of-paths> is of the form
+
+ <number-of-paths>NUL<path1>NUL<path2>NUL...<pathN>NUL
+
+and includes paths (or branch names) affected by the conflict or
+informational message in <conflict-message>. Also, <conflict-type> is a
+stable string explaining the type of conflict, such as
+
+ * "Auto-merging"
+ * "CONFLICT (rename/delete)"
+ * "CONFLICT (submodule lacks merge base)"
+ * "CONFLICT (binary)"
+
+and <conflict-message> is a more detailed message about the conflict which often
+(but not always) embeds the <stable-short-type-description> within it. These
+strings may change in future Git versions. Some examples:
* "Auto-merging <file>"
* "CONFLICT (rename/delete): <oldfile> renamed...but deleted in..."
- * "Failed to merge submodule <submodule> (<reason>)"
+ * "Failed to merge submodule <submodule> (no merge base)"
* "Warning: cannot merge binary files: <filename>"
-Note that these free-form messages will never have a NUL character
-in or between them, even if -z is passed. It is simply a large block
-of text taking up the remainder of the output.
+If `-z` is NOT passed:
+
+This section starts with a blank line to separate it from the previous
+sections, and then only contains the <conflict-message> information
+from the previous section (separated by newlines). These are
+non-stable strings that should not be parsed by scripts, and are just
+meant for human consumption. Also, note that while <conflict-message>
+strings usually do not contain embedded newlines, they sometimes do.
+(However, the free-form messages will never have an embedded NUL
+character). So, the entire block of information is meant for human
+readers as an agglomeration of all conflict messages.
EXIT STATUS
-----------
@@ -127,7 +189,10 @@ EXIT STATUS
For a successful, non-conflicted merge, the exit status is 0. When the
merge has conflicts, the exit status is 1. If the merge is not able to
complete (or start) due to some kind of error, the exit status is
-something other than 0 or 1 (and the output is unspecified).
+something other than 0 or 1 (and the output is unspecified). When
+--stdin is passed, the return status is 0 for both successful and
+conflicted merges, and something other than 0 or 1 if it cannot complete
+all the requested merges.
USAGE NOTES
-----------
@@ -156,6 +221,17 @@ with linkgit:git-merge[1]:
* any messages that would have been printed to stdout (the
<<IM,Informational messages>>)
+INPUT FORMAT
+------------
+'git merge-tree --stdin' input format is fully text based. Each line
+has this format:
+
+ [<base-commit> -- ]<branch1> <branch2>
+
+If one line is separated by `--`, the string before the separator is
+used for specifying a merge-base for the merge and the string after
+the separator describes the branches to be merged.
+
MISTAKES TO AVOID
-----------------
diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt
index a48c3d5ea6..3696506eb3 100644
--- a/Documentation/git-multi-pack-index.txt
+++ b/Documentation/git-multi-pack-index.txt
@@ -70,9 +70,10 @@ verify::
Verify the contents of the MIDX file.
expire::
- Delete the pack-files that are tracked by the MIDX file, but
- have no objects referenced by the MIDX. Rewrite the MIDX file
- afterward to remove all references to these pack-files.
+ Delete the pack-files that are tracked by the MIDX file, but
+ have no objects referenced by the MIDX (with the exception of
+ `.keep` packs and cruft packs). Rewrite the MIDX file afterward
+ to remove all references to these pack-files.
repack::
Create a new pack-file containing objects in small pack-files
diff --git a/Documentation/git-mv.txt b/Documentation/git-mv.txt
index 79449bf98f..fb0220fd18 100644
--- a/Documentation/git-mv.txt
+++ b/Documentation/git-mv.txt
@@ -9,7 +9,7 @@ git-mv - Move or rename a file, a directory, or a symlink
SYNOPSIS
--------
[verse]
-'git mv' <options>... <args>...
+'git mv' [<options>] <source>... <destination>
DESCRIPTION
-----------
@@ -30,7 +30,7 @@ OPTIONS
-------
-f::
--force::
- Force renaming or moving of a file even if the target exists
+ Force renaming or moving of a file even if the <destination> exists.
-k::
Skip move or rename actions which would lead to an error
condition. An error happens when a source is neither existing nor
diff --git a/Documentation/git-notes.txt b/Documentation/git-notes.txt
index efbc10f0f5..50b198c2b2 100644
--- a/Documentation/git-notes.txt
+++ b/Documentation/git-notes.txt
@@ -11,7 +11,7 @@ SYNOPSIS
'git notes' [list [<object>]]
'git notes' add [-f] [--allow-empty] [-F <file> | -m <msg> | (-c | -C) <object>] [<object>]
'git notes' copy [-f] ( --stdin | <from-object> [<to-object>] )
-'git notes' append [--allow-empty] [-F <file> | -m <msg> | (-c | -C) <object>] [<object>]
+'git notes' append [--allow-empty] [--no-blank-line] [-F <file> | -m <msg> | (-c | -C) <object>] [<object>]
'git notes' edit [--allow-empty] [<object>]
'git notes' show [<object>]
'git notes' merge [-v | -q] [-s <strategy> ] <notes-ref>
@@ -86,7 +86,9 @@ the command can read the input given to the `post-rewrite` hook.)
append::
Append to the notes of an existing object (defaults to HEAD).
- Creates a new notes object if needed.
+ Creates a new notes object if needed. If the note of the given
+ object and the note to be appended are not empty, a blank line
+ will be inserted between them.
edit::
Edit the notes for a given object (defaults to HEAD).
@@ -159,6 +161,10 @@ OPTIONS
Allow an empty note object to be stored. The default behavior is
to automatically remove empty notes.
+--no-blank-line::
+ Do not insert a blank line before the inserted notes (insert
+ a blank line is the default).
+
--ref <ref>::
Manipulate the notes tree in <ref>. This overrides
`GIT_NOTES_REF` and the "core.notesRef" configuration. The ref
diff --git a/Documentation/git-pack-redundant.txt b/Documentation/git-pack-redundant.txt
index ee7034b5e5..99ef13839d 100644
--- a/Documentation/git-pack-redundant.txt
+++ b/Documentation/git-pack-redundant.txt
@@ -9,7 +9,7 @@ git-pack-redundant - Find redundant pack files
SYNOPSIS
--------
[verse]
-'git pack-redundant' [ --verbose ] [ --alt-odb ] ( --all | <pack-filename>... )
+'git pack-redundant' [--verbose] [--alt-odb] (--all | <pack-filename>...)
DESCRIPTION
-----------
@@ -34,7 +34,7 @@ OPTIONS
--alt-odb::
Don't require objects present in packs from alternate object
- directories to be present in local packs.
+ database (odb) directories to be present in local packs.
--verbose::
Outputs some statistics to stderr. Has a small performance penalty.
diff --git a/Documentation/git-patch-id.txt b/Documentation/git-patch-id.txt
index 442caff8a9..1d15fa45d5 100644
--- a/Documentation/git-patch-id.txt
+++ b/Documentation/git-patch-id.txt
@@ -8,18 +8,18 @@ git-patch-id - Compute unique ID for a patch
SYNOPSIS
--------
[verse]
-'git patch-id' [--stable | --unstable]
+'git patch-id' [--stable | --unstable | --verbatim]
DESCRIPTION
-----------
Read a patch from the standard input and compute the patch ID for it.
A "patch ID" is nothing but a sum of SHA-1 of the file diffs associated with a
-patch, with whitespace and line numbers ignored. As such, it's "reasonably
-stable", but at the same time also reasonably unique, i.e., two patches that
-have the same "patch ID" are almost guaranteed to be the same thing.
+patch, with line numbers ignored. As such, it's "reasonably stable", but at
+the same time also reasonably unique, i.e., two patches that have the same
+"patch ID" are almost guaranteed to be the same thing.
-IOW, you can use this thing to look for likely duplicate commits.
+The main usecase for this command is to look for likely duplicate commits.
When dealing with 'git diff-tree' output, it takes advantage of
the fact that the patch is prefixed with the object name of the
@@ -30,6 +30,12 @@ This can be used to make a mapping from patch ID to commit ID.
OPTIONS
-------
+--verbatim::
+ Calculate the patch-id of the input as it is given, do not strip
+ any whitespace.
+
+ This is the default if patchid.verbatim is true.
+
--stable::
Use a "stable" sum of hashes as the patch ID. With this option:
- Reordering file diffs that make up a patch does not affect the ID.
@@ -45,14 +51,16 @@ OPTIONS
of "-O<orderfile>", thereby making existing databases storing such
"unstable" or historical patch-ids unusable.
+ - All whitespace within the patch is ignored and does not affect the id.
+
This is the default if patchid.stable is set to true.
--unstable::
Use an "unstable" hash as the patch ID. With this option,
the result produced is compatible with the patch-id value produced
- by git 1.9 and older. Users with pre-existing databases storing
- patch-ids produced by git 1.9 and older (who do not deal with reordered
- patches) may want to use this option.
+ by git 1.9 and older and whitespace is ignored. Users with pre-existing
+ databases storing patch-ids produced by git 1.9 and older (who do not deal
+ with reordered patches) may want to use this option.
This is the default.
diff --git a/Documentation/git-prune-packed.txt b/Documentation/git-prune-packed.txt
index 9fed59a317..844d6f808a 100644
--- a/Documentation/git-prune-packed.txt
+++ b/Documentation/git-prune-packed.txt
@@ -9,7 +9,7 @@ git-prune-packed - Remove extra objects that are already in pack files
SYNOPSIS
--------
[verse]
-'git prune-packed' [-n|--dry-run] [-q|--quiet]
+'git prune-packed' [-n | --dry-run] [-q | --quiet]
DESCRIPTION
diff --git a/Documentation/git-push.txt b/Documentation/git-push.txt
index def7657ef9..5bb1d5aae2 100644
--- a/Documentation/git-push.txt
+++ b/Documentation/git-push.txt
@@ -409,10 +409,14 @@ Specifying `--no-force-if-includes` disables this behavior.
all submodules that changed in the revisions to be pushed will be
pushed. If on-demand was not able to push all necessary revisions it will
also be aborted and exit with non-zero status. If 'only' is used all
- submodules will be recursively pushed while the superproject is left
+ submodules will be pushed while the superproject is left
unpushed. A value of 'no' or using `--no-recurse-submodules` can be used
to override the push.recurseSubmodules configuration variable when no
submodule recursion is required.
++
+When using 'on-demand' or 'only', if a submodule has a
+"push.recurseSubmodules={on-demand,only}" or "submodule.recurse" configuration,
+further recursion will occur. In this case, "only" is treated as "on-demand".
--[no-]verify::
Toggle the pre-push hook (see linkgit:githooks[5]). The
diff --git a/Documentation/git-read-tree.txt b/Documentation/git-read-tree.txt
index b9bfdc0a31..7567955bad 100644
--- a/Documentation/git-read-tree.txt
+++ b/Documentation/git-read-tree.txt
@@ -9,7 +9,7 @@ git-read-tree - Reads tree information into the index
SYNOPSIS
--------
[verse]
-'git read-tree' [[-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>]
+'git read-tree' [(-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>)
[-u | -i]] [--index-output=<file>] [--no-sparse-checkout]
(--empty | <tree-ish1> [<tree-ish2> [<tree-ish3>]])
diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt
index 9cb8931c7a..f9675bd24e 100644
--- a/Documentation/git-rebase.txt
+++ b/Documentation/git-rebase.txt
@@ -218,12 +218,14 @@ leave out at most one of A and B, in which case it defaults to HEAD.
merge base of `<upstream>` and `<branch>`. Running
`git rebase --keep-base <upstream> <branch>` is equivalent to
running
- `git rebase --onto <upstream>...<branch> <upstream> <branch>`.
+ `git rebase --reapply-cherry-picks --no-fork-point --onto <upstream>...<branch> <upstream> <branch>`.
+
This option is useful in the case where one is developing a feature on
top of an upstream branch. While the feature is being worked on, the
upstream branch may advance and it may not be the best idea to keep
-rebasing on top of the upstream but to keep the base commit as-is.
+rebasing on top of the upstream but to keep the base commit as-is. As
+the base commit is unchanged this option implies `--reapply-cherry-picks`
+to avoid losing commits.
+
Although both this option and `--fork-point` find the merge base between
`<upstream>` and `<branch>`, this option uses the merge base as the _starting
@@ -278,7 +280,8 @@ See also INCOMPATIBLE OPTIONS below.
Note that commits which start empty are kept (unless `--no-keep-empty`
is specified), and commits which are clean cherry-picks (as determined
by `git log --cherry-mark ...`) are detected and dropped as a
-preliminary step (unless `--reapply-cherry-picks` is passed).
+preliminary step (unless `--reapply-cherry-picks` or `--keep-base` is
+passed).
+
See also INCOMPATIBLE OPTIONS below.
@@ -311,13 +314,16 @@ See also INCOMPATIBLE OPTIONS below.
upstream changes, the behavior towards them is controlled by
the `--empty` flag.)
+
-By default (or if `--no-reapply-cherry-picks` is given), these commits
-will be automatically dropped. Because this necessitates reading all
-upstream commits, this can be expensive in repos with a large number
-of upstream commits that need to be read. When using the 'merge'
-backend, warnings will be issued for each dropped commit (unless
-`--quiet` is given). Advice will also be issued unless
-`advice.skippedCherryPicks` is set to false (see linkgit:git-config[1]).
+
+In the absence of `--keep-base` (or if `--no-reapply-cherry-picks` is
+given), these commits will be automatically dropped. Because this
+necessitates reading all upstream commits, this can be expensive in
+repositories with a large number of upstream commits that need to be
+read. When using the 'merge' backend, warnings will be issued for each
+dropped commit (unless `--quiet` is given). Advice will also be issued
+unless `advice.skippedCherryPicks` is set to false (see
+linkgit:git-config[1]).
+
+
`--reapply-cherry-picks` allows rebase to forgo reading all upstream
commits, potentially improving performance.
@@ -443,9 +449,9 @@ When `--fork-point` is active, 'fork_point' will be used instead of
<branch>` command (see linkgit:git-merge-base[1]). If 'fork_point'
ends up being empty, the `<upstream>` will be used as a fallback.
+
-If `<upstream>` is given on the command line, then the default is
-`--no-fork-point`, otherwise the default is `--fork-point`. See also
-`rebase.forkpoint` in linkgit:git-config[1].
+If `<upstream>` or `--keep-base` is given on the command line, then
+the default is `--no-fork-point`, otherwise the default is
+`--fork-point`. See also `rebase.forkpoint` in linkgit:git-config[1].
+
If your branch was based on `<upstream>` but `<upstream>` was rewound and
your branch contains commits which were dropped, this option can be used
diff --git a/Documentation/git-receive-pack.txt b/Documentation/git-receive-pack.txt
index 014a78409b..65ff518ccf 100644
--- a/Documentation/git-receive-pack.txt
+++ b/Documentation/git-receive-pack.txt
@@ -9,7 +9,7 @@ git-receive-pack - Receive what is pushed into the repository
SYNOPSIS
--------
[verse]
-'git-receive-pack' <directory>
+'git receive-pack' <git-dir>
DESCRIPTION
-----------
@@ -38,7 +38,7 @@ its behavior, see linkgit:git-config[1].
OPTIONS
-------
-<directory>::
+<git-dir>::
The repository to sync into.
--http-backend-info-refs::
diff --git a/Documentation/git-reflog.txt b/Documentation/git-reflog.txt
index db9d46edfa..ec64cbff4c 100644
--- a/Documentation/git-reflog.txt
+++ b/Documentation/git-reflog.txt
@@ -9,15 +9,7 @@ git-reflog - Manage reflog information
SYNOPSIS
--------
[verse]
-'git reflog' <subcommand> <options>
-
-DESCRIPTION
------------
-The command takes various subcommands, and different options
-depending on the subcommand:
-
-[verse]
-'git reflog' ['show'] [<log-options>] [<ref>]
+'git reflog' [show] [<log-options>] [<ref>]
'git reflog expire' [--expire=<time>] [--expire-unreachable=<time>]
[--rewrite] [--updateref] [--stale-fix]
[--dry-run | -n] [--verbose] [--all [--single-worktree] | <refs>...]
@@ -25,6 +17,10 @@ depending on the subcommand:
[--dry-run | -n] [--verbose] <ref>@{<specifier>}...
'git reflog exists' <ref>
+DESCRIPTION
+-----------
+This command manages the information recorded in the reflogs.
+
Reference logs, or "reflogs", record when the tips of branches and
other references were updated in the local repository. Reflogs are
useful in various Git commands, to specify the old value of a
@@ -33,7 +29,8 @@ moves ago", `master@{one.week.ago}` means "where master used to point
to one week ago in this local repository", and so on. See
linkgit:gitrevisions[7] for more details.
-This command manages the information recorded in the reflogs.
+The command takes various subcommands, and different options
+depending on the subcommand:
The "show" subcommand (which is also the default, in the absence of
any subcommands) shows the log of the reference provided in the
diff --git a/Documentation/git-repack.txt b/Documentation/git-repack.txt
index 0bf13893d8..4017157949 100644
--- a/Documentation/git-repack.txt
+++ b/Documentation/git-repack.txt
@@ -74,6 +74,12 @@ to the new separate pack will be written.
immediately instead of waiting for the next `git gc` invocation.
Only useful with `--cruft -d`.
+--expire-to=<dir>::
+ Write a cruft pack containing pruned objects (if any) to the
+ directory `<dir>`. This option is useful for keeping a copy of
+ any pruned objects in a separate directory as a backup. Only
+ useful with `--cruft -d`.
+
-l::
Pass the `--local` option to 'git pack-objects'. See
linkgit:git-pack-objects[1].
diff --git a/Documentation/git-rerere.txt b/Documentation/git-rerere.txt
index 4cfc883378..992b469270 100644
--- a/Documentation/git-rerere.txt
+++ b/Documentation/git-rerere.txt
@@ -8,7 +8,7 @@ git-rerere - Reuse recorded resolution of conflicted merges
SYNOPSIS
--------
[verse]
-'git rerere' ['clear'|'forget' <pathspec>|'diff'|'remaining'|'status'|'gc']
+'git rerere' [clear | forget <pathspec>... | diff | status | remaining | gc]
DESCRIPTION
-----------
diff --git a/Documentation/git-rev-list.txt b/Documentation/git-rev-list.txt
index 20bb8e8217..51029a2271 100644
--- a/Documentation/git-rev-list.txt
+++ b/Documentation/git-rev-list.txt
@@ -9,7 +9,7 @@ git-rev-list - Lists commit objects in reverse chronological order
SYNOPSIS
--------
[verse]
-'git rev-list' [<options>] <commit>... [[--] <path>...]
+'git rev-list' [<options>] <commit>... [--] [<path>...]
DESCRIPTION
-----------
diff --git a/Documentation/git-rev-parse.txt b/Documentation/git-rev-parse.txt
index 6b8ca085aa..bcd8069287 100644
--- a/Documentation/git-rev-parse.txt
+++ b/Documentation/git-rev-parse.txt
@@ -197,6 +197,13 @@ respectively, and they must begin with `refs/` when applied to `--glob`
or `--all`. If a trailing '/{asterisk}' is intended, it must be given
explicitly.
+--exclude-hidden=[receive|uploadpack]::
+ Do not include refs that would be hidden by `git-receive-pack` or
+ `git-upload-pack` by consulting the appropriate `receive.hideRefs` or
+ `uploadpack.hideRefs` configuration along with `transfer.hideRefs` (see
+ linkgit:git-config[1]). This option affects the next pseudo-ref option
+ `--all` or `--glob` and is cleared after processing them.
+
--disambiguate=<prefix>::
Show every object whose name begins with the given prefix.
The <prefix> must be at least 4 hexadecimal digits long to
diff --git a/Documentation/git-send-email.txt b/Documentation/git-send-email.txt
index 3290043053..765b2df853 100644
--- a/Documentation/git-send-email.txt
+++ b/Documentation/git-send-email.txt
@@ -178,9 +178,18 @@ Sending
for `sendmail` in `/usr/sbin`, `/usr/lib` and $PATH.
--smtp-encryption=<encryption>::
- Specify the encryption to use, either 'ssl' or 'tls'. Any other
- value reverts to plain SMTP. Default is the value of
- `sendemail.smtpEncryption`.
+ Specify in what way encrypting begins for the SMTP connection.
+ Valid values are 'ssl' and 'tls'. Any other value reverts to plain
+ (unencrypted) SMTP, which defaults to port 25.
+ Despite the names, both values will use the same newer version of TLS,
+ but for historic reasons have these names. 'ssl' refers to "implicit"
+ encryption (sometimes called SMTPS), that uses port 465 by default.
+ 'tls' refers to "explicit" encryption (often known as STARTTLS),
+ that uses port 25 by default. Other ports might be used by the SMTP
+ server, which are not the default. Commonly found alternative port for
+ 'tls' and unencrypted is 587. You need to check your provider's
+ documentation or your server configuration to make sure
+ for your own case. Default is the value of `sendemail.smtpEncryption`.
--smtp-domain=<FQDN>::
Specifies the Fully Qualified Domain Name (FQDN) used in the
diff --git a/Documentation/git-send-pack.txt b/Documentation/git-send-pack.txt
index be41f11974..595b002152 100644
--- a/Documentation/git-send-pack.txt
+++ b/Documentation/git-send-pack.txt
@@ -9,9 +9,10 @@ git-send-pack - Push objects over Git protocol to another repository
SYNOPSIS
--------
[verse]
-'git send-pack' [--dry-run] [--force] [--receive-pack=<git-receive-pack>]
+'git send-pack' [--mirror] [--dry-run] [--force]
+ [--receive-pack=<git-receive-pack>]
[--verbose] [--thin] [--atomic]
- [--[no-]signed|--signed=(true|false|if-asked)]
+ [--[no-]signed | --signed=(true|false|if-asked)]
[<host>:]<directory> (--all | <ref>...)
DESCRIPTION
diff --git a/Documentation/git-shortlog.txt b/Documentation/git-shortlog.txt
index f64e77047b..7d0277d033 100644
--- a/Documentation/git-shortlog.txt
+++ b/Documentation/git-shortlog.txt
@@ -47,6 +47,11 @@ OPTIONS
Each pretty-printed commit will be rewrapped before it is shown.
+--date=<format>::
+ Show dates formatted according to the given date string. (See
+ the `--date` option in the "Commit Formatting" section of
+ linkgit:git-log[1]). Useful with `--group=format:<format>`.
+
--group=<type>::
Group commits based on `<type>`. If no `--group` option is
specified, the default is `author`. `<type>` is one of:
@@ -59,6 +64,9 @@ OPTIONS
example, if your project uses `Reviewed-by` trailers, you might want
to see who has been reviewing with
`git shortlog -ns --group=trailer:reviewed-by`.
+ - `format:<format>`, any string accepted by the `--format` option of
+ 'git log'. (See the "PRETTY FORMATS" section of
+ linkgit:git-log[1].)
+
Note that commits that do not include the trailer will not be counted.
Likewise, commits with multiple trailers (e.g., multiple signoffs) may
diff --git a/Documentation/git-show-branch.txt b/Documentation/git-show-branch.txt
index e5ec6b467f..71f608b1ff 100644
--- a/Documentation/git-show-branch.txt
+++ b/Documentation/git-show-branch.txt
@@ -8,12 +8,12 @@ git-show-branch - Show branches and their commits
SYNOPSIS
--------
[verse]
-'git show-branch' [-a|--all] [-r|--remotes] [--topo-order | --date-order]
+'git show-branch' [-a | --all] [-r | --remotes] [--topo-order | --date-order]
[--current] [--color[=<when>] | --no-color] [--sparse]
[--more=<n> | --list | --independent | --merge-base]
[--no-name | --sha1-name] [--topics]
[(<rev> | <glob>)...]
-'git show-branch' (-g|--reflog)[=<n>[,<base>]] [--list] [<ref>]
+'git show-branch' (-g | --reflog)[=<n>[,<base>]] [--list] [<ref>]
DESCRIPTION
-----------
diff --git a/Documentation/git-show-ref.txt b/Documentation/git-show-ref.txt
index ab4d271925..d1d56f68b4 100644
--- a/Documentation/git-show-ref.txt
+++ b/Documentation/git-show-ref.txt
@@ -8,8 +8,8 @@ git-show-ref - List references in a local repository
SYNOPSIS
--------
[verse]
-'git show-ref' [-q|--quiet] [--verify] [--head] [-d|--dereference]
- [-s|--hash[=<n>]] [--abbrev[=<n>]] [--tags]
+'git show-ref' [-q | --quiet] [--verify] [--head] [-d | --dereference]
+ [-s | --hash[=<n>]] [--abbrev[=<n>]] [--tags]
[--heads] [--] [<pattern>...]
'git show-ref' --exclude-existing[=<pattern>]
diff --git a/Documentation/git-sparse-checkout.txt b/Documentation/git-sparse-checkout.txt
index 3776705bf5..68392d2a56 100644
--- a/Documentation/git-sparse-checkout.txt
+++ b/Documentation/git-sparse-checkout.txt
@@ -9,7 +9,7 @@ git-sparse-checkout - Reduce your working tree to a subset of tracked files
SYNOPSIS
--------
[verse]
-'git sparse-checkout <subcommand> [<options>]'
+'git sparse-checkout' (init | list | set | add | reapply | disable) [<options>]
DESCRIPTION
diff --git a/Documentation/git-stash.txt b/Documentation/git-stash.txt
index c5d7091828..f4bb6114d9 100644
--- a/Documentation/git-stash.txt
+++ b/Documentation/git-stash.txt
@@ -9,17 +9,20 @@ SYNOPSIS
--------
[verse]
'git stash' list [<log-options>]
-'git stash' show [-u|--include-untracked|--only-untracked] [<diff-options>] [<stash>]
-'git stash' drop [-q|--quiet] [<stash>]
-'git stash' ( pop | apply ) [--index] [-q|--quiet] [<stash>]
+'git stash' show [-u | --include-untracked | --only-untracked] [<diff-options>] [<stash>]
+'git stash' drop [-q | --quiet] [<stash>]
+'git stash' pop [--index] [-q | --quiet] [<stash>]
+'git stash' apply [--index] [-q | --quiet] [<stash>]
'git stash' branch <branchname> [<stash>]
-'git stash' [push [-p|--patch] [-S|--staged] [-k|--[no-]keep-index] [-q|--quiet]
- [-u|--include-untracked] [-a|--all] [-m|--message <message>]
+'git stash' [push [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]
+ [-u | --include-untracked] [-a | --all] [(-m | --message) <message>]
[--pathspec-from-file=<file> [--pathspec-file-nul]]
[--] [<pathspec>...]]
+'git stash' save [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]
+ [-u | --include-untracked] [-a | --all] [<message>]
'git stash' clear
'git stash' create [<message>]
-'git stash' store [-m|--message <message>] [-q|--quiet] <commit>
+'git stash' store [(-m | --message) <message>] [-q | --quiet] <commit>
DESCRIPTION
-----------
@@ -47,7 +50,7 @@ stash index (e.g. the integer `n` is equivalent to `stash@{n}`).
COMMANDS
--------
-push [-p|--patch] [-S|--staged] [-k|--[no-]keep-index] [-u|--include-untracked] [-a|--all] [-q|--quiet] [-m|--message <message>] [--pathspec-from-file=<file> [--pathspec-file-nul]] [--] [<pathspec>...]::
+push [-p|--patch] [-S|--staged] [-k|--[no-]keep-index] [-u|--include-untracked] [-a|--all] [-q|--quiet] [(-m|--message) <message>] [--pathspec-from-file=<file> [--pathspec-file-nul]] [--] [<pathspec>...]::
Save your local modifications to a new 'stash entry' and roll them
back to HEAD (in the working tree and in the index).
diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt
index 54a4b29b47..570c36e07c 100644
--- a/Documentation/git-status.txt
+++ b/Documentation/git-status.txt
@@ -9,7 +9,7 @@ git-status - Show the working tree status
SYNOPSIS
--------
[verse]
-'git status' [<options>...] [--] [<pathspec>...]
+'git status' [<options>] [--] [<pathspec>...]
DESCRIPTION
-----------
@@ -457,6 +457,65 @@ during the write may conflict with other simultaneous processes, causing
them to fail. Scripts running `status` in the background should consider
using `git --no-optional-locks status` (see linkgit:git[1] for details).
+UNTRACKED FILES AND STATUS SPEED
+--------------------------------
+
+`git status` can be very slow in large worktrees if/when it
+needs to search for untracked files and directories. There are
+many configuration options available to speed this up by either
+avoiding the work or making use of cached results from previous
+Git commands. There is no single optimum set of settings right
+for everyone. Here is a brief summary of the relevant options
+to help you choose which is right for you.
+
+* First, you may want to run `git status` again. Your current
+ configuration may already be caching `git status` results,
+ so it could be faster on subsequent runs.
+
+* The `--untracked-files=no` flag or the
+ `status.showUntrackedfiles=false` config (see above for both) :
+ indicate that `git status` should not report untracked
+ files. This is the fastest option. `git status` will not list
+ the untracked files, so you need to be careful to remember if
+ you create any new files and manually `git add` them.
+
+* `advice.statusUoption=false` (see linkgit:git-config[1]) :
+ this config option disables a warning message when the search
+ for untracked files takes longer than desired. In some large
+ repositories, this message may appear frequently and not be a
+ helpful signal.
+
+* `core.untrackedCache=true` (see linkgit:git-update-index[1]) :
+ enable the untracked cache feature and only search directories
+ that have been modified since the previous `git status` command.
+ Git remembers the set of untracked files within each directory
+ and assumes that if a directory has not been modified, then
+ the set of untracked files within has not changed. This is much
+ faster than enumerating the contents of every directory, but still
+ not without cost, because Git still has to search for the set of
+ modified directories. The untracked cache is stored in the
+ `.git/index` file. The reduced cost of searching for untracked
+ files is offset slightly by the increased size of the index and
+ the cost of keeping it up-to-date. That reduced search time is
+ usually worth the additional size.
+
+* `core.untrackedCache=true` and `core.fsmonitor=true` or
+ `core.fsmonitor=<hook_command_pathname>` (see
+ linkgit:git-update-index[1]) : enable both the untracked cache
+ and FSMonitor features and only search directories that have
+ been modified since the previous `git status` command. This
+ is faster than using just the untracked cache alone because
+ Git can also avoid searching for modified directories. Git
+ only has to enumerate the exact set of directories that have
+ changed recently. While the FSMonitor feature can be enabled
+ without the untracked cache, the benefits are greatly reduced
+ in that case.
+
+Note that after you turn on the untracked cache and/or FSMonitor
+features it may take a few `git status` commands for the various
+caches to warm up before you see improved command times. This is
+normal.
+
SEE ALSO
--------
linkgit:gitignore[5]
diff --git a/Documentation/git-symbolic-ref.txt b/Documentation/git-symbolic-ref.txt
index ef68ad2b71..102c83eb19 100644
--- a/Documentation/git-symbolic-ref.txt
+++ b/Documentation/git-symbolic-ref.txt
@@ -9,7 +9,7 @@ SYNOPSIS
--------
[verse]
'git symbolic-ref' [-m <reason>] <name> <ref>
-'git symbolic-ref' [-q] [--short] <name>
+'git symbolic-ref' [-q] [--short] [--no-recurse] <name>
'git symbolic-ref' --delete [-q] <name>
DESCRIPTION
@@ -46,6 +46,15 @@ OPTIONS
When showing the value of <name> as a symbolic ref, try to shorten the
value, e.g. from `refs/heads/master` to `master`.
+--recurse::
+--no-recurse::
+ When showing the value of <name> as a symbolic ref, if
+ <name> refers to another symbolic ref, follow such a chain
+ of symbolic refs until the result no longer points at a
+ symbolic ref (`--recurse`, which is the default).
+ `--no-recurse` stops after dereferencing only a single level
+ of symbolic ref.
+
-m::
Update the reflog for <name> with <reason>. This is valid only
when creating or updating a symbolic ref.
diff --git a/Documentation/git-tag.txt b/Documentation/git-tag.txt
index 31a97a1b6c..fdc72b5875 100644
--- a/Documentation/git-tag.txt
+++ b/Documentation/git-tag.txt
@@ -9,7 +9,7 @@ git-tag - Create, list, delete or verify a tag object signed with GPG
SYNOPSIS
--------
[verse]
-'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] [-e]
+'git tag' [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>] [-e]
<tagname> [<commit> | <object>]
'git tag' -d <tagname>...
'git tag' [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>]
@@ -26,19 +26,19 @@ to delete, list or verify tags.
Unless `-f` is given, the named tag must not yet exist.
-If one of `-a`, `-s`, or `-u <keyid>` is passed, the command
+If one of `-a`, `-s`, or `-u <key-id>` is passed, the command
creates a 'tag' object, and requires a tag message. Unless
`-m <msg>` or `-F <file>` is given, an editor is started for the user to type
in the tag message.
-If `-m <msg>` or `-F <file>` is given and `-a`, `-s`, and `-u <keyid>`
+If `-m <msg>` or `-F <file>` is given and `-a`, `-s`, and `-u <key-id>`
are absent, `-a` is implied.
Otherwise, a tag reference that points directly at the given object
(i.e., a lightweight tag) is created.
A GnuPG signed tag object will be created when `-s` or `-u
-<keyid>` is used. When `-u <keyid>` is not used, the
+<key-id>` is used. When `-u <key-id>` is not used, the
committer identity for the current user is used to find the
GnuPG key for signing. The configuration variable `gpg.program`
is used to specify custom GnuPG binary.
@@ -72,8 +72,8 @@ OPTIONS
Override `tag.gpgSign` configuration variable that is
set to force each and every tag to be signed.
--u <keyid>::
---local-user=<keyid>::
+-u <key-id>::
+--local-user=<key-id>::
Make a GPG-signed tag, using the given key.
-f::
@@ -164,14 +164,14 @@ This option is only applicable when listing tags without annotation lines.
Use the given tag message (instead of prompting).
If multiple `-m` options are given, their values are
concatenated as separate paragraphs.
- Implies `-a` if none of `-a`, `-s`, or `-u <keyid>`
+ Implies `-a` if none of `-a`, `-s`, or `-u <key-id>`
is given.
-F <file>::
--file=<file>::
Take the tag message from the given file. Use '-' to
read the message from the standard input.
- Implies `-a` if none of `-a`, `-s`, or `-u <keyid>`
+ Implies `-a` if none of `-a`, `-s`, or `-u <key-id>`
is given.
-e::
@@ -220,7 +220,7 @@ it in the repository configuration as follows:
-------------------------------------
[user]
- signingKey = <gpg-keyid>
+ signingKey = <gpg-key_id>
-------------------------------------
`pager.tag` is only respected when listing tags, i.e., when `-l` is
diff --git a/Documentation/git-update-server-info.txt b/Documentation/git-update-server-info.txt
index 969bb2e15f..17e429dbd0 100644
--- a/Documentation/git-update-server-info.txt
+++ b/Documentation/git-update-server-info.txt
@@ -9,7 +9,7 @@ git-update-server-info - Update auxiliary info file to help dumb servers
SYNOPSIS
--------
[verse]
-'git update-server-info'
+'git update-server-info' [-f | --force]
DESCRIPTION
-----------
@@ -19,6 +19,12 @@ $GIT_OBJECT_DIRECTORY/info directories to help clients discover
what references and packs the server has. This command
generates such auxiliary files.
+OPTIONS
+-------
+-f::
+--force::
+ update the info files from scratch.
+
OUTPUT
------
diff --git a/Documentation/git-upload-archive.txt b/Documentation/git-upload-archive.txt
index fba0f1c1b2..e8eb10baad 100644
--- a/Documentation/git-upload-archive.txt
+++ b/Documentation/git-upload-archive.txt
@@ -9,7 +9,7 @@ git-upload-archive - Send archive back to git-archive
SYNOPSIS
--------
[verse]
-'git upload-archive' <directory>
+'git upload-archive' <repository>
DESCRIPTION
-----------
@@ -54,7 +54,7 @@ access via non-smart-http.
OPTIONS
-------
-<directory>::
+<repository>::
The repository to get a tar archive from.
GIT
diff --git a/Documentation/git-var.txt b/Documentation/git-var.txt
index 387cc1b914..6aa521fab2 100644
--- a/Documentation/git-var.txt
+++ b/Documentation/git-var.txt
@@ -9,7 +9,7 @@ git-var - Show a Git logical variable
SYNOPSIS
--------
[verse]
-'git var' ( -l | <variable> )
+'git var' (-l | <variable>)
DESCRIPTION
-----------
diff --git a/Documentation/git-verify-commit.txt b/Documentation/git-verify-commit.txt
index 92097f6673..aee4c40eac 100644
--- a/Documentation/git-verify-commit.txt
+++ b/Documentation/git-verify-commit.txt
@@ -8,7 +8,7 @@ git-verify-commit - Check the GPG signature of commits
SYNOPSIS
--------
[verse]
-'git verify-commit' <commit>...
+'git verify-commit' [-v | --verbose] [--raw] <commit>...
DESCRIPTION
-----------
diff --git a/Documentation/git-verify-pack.txt b/Documentation/git-verify-pack.txt
index 61ca6d04c2..b8720dce8a 100644
--- a/Documentation/git-verify-pack.txt
+++ b/Documentation/git-verify-pack.txt
@@ -9,7 +9,7 @@ git-verify-pack - Validate packed Git archive files
SYNOPSIS
--------
[verse]
-'git verify-pack' [-v|--verbose] [-s|--stat-only] [--] <pack>.idx ...
+'git verify-pack' [-v | --verbose] [-s | --stat-only] [--] <pack>.idx...
DESCRIPTION
diff --git a/Documentation/git-verify-tag.txt b/Documentation/git-verify-tag.txt
index 0b8075dad9..81d50ecc4c 100644
--- a/Documentation/git-verify-tag.txt
+++ b/Documentation/git-verify-tag.txt
@@ -8,7 +8,7 @@ git-verify-tag - Check the GPG signature of tags
SYNOPSIS
--------
[verse]
-'git verify-tag' [--format=<format>] <tag>...
+'git verify-tag' [-v | --verbose] [--format=<format>] [--raw] <tag>...
DESCRIPTION
-----------
diff --git a/Documentation/git-worktree.txt b/Documentation/git-worktree.txt
index ada30c86a7..1310bfb564 100644
--- a/Documentation/git-worktree.txt
+++ b/Documentation/git-worktree.txt
@@ -9,7 +9,8 @@ git-worktree - Manage multiple working trees
SYNOPSIS
--------
[verse]
-'git worktree add' [-f] [--detach] [--checkout] [--lock [--reason <string>]] [-b <new-branch>] <path> [<commit-ish>]
+'git worktree add' [-f] [--detach] [--checkout] [--lock [--reason <string>]]
+ [[-b | -B | --orphan] <new-branch>] <path> [<commit-ish>]
'git worktree list' [-v | --porcelain [-z]]
'git worktree lock' [--reason <string>] <worktree>
'git worktree move' <worktree> <new-path>
@@ -94,6 +95,14 @@ exist, a new branch based on `HEAD` is automatically created as if
`-b <branch>` was given. If `<branch>` does exist, it will be checked out
in the new worktree, if it's not checked out anywhere else, otherwise the
command will refuse to create the worktree (unless `--force` is used).
++
+------------
+$ git worktree add --orphan <branch> <path>
+------------
++
+Create a worktree containing an orphan branch named `<branch>` with a
+clean working directory. See `--orphan` in linkgit:git-switch[1] for
+more details.
list::
@@ -221,6 +230,10 @@ This can also be set up as the default behaviour by using the
With `prune`, do not remove anything; just report what it would
remove.
+--orphan <new-branch>::
+ With `add`, create a new orphan branch named `<new-branch>` in the new
+ worktree. See `--orphan` in linkgit:git-switch[1] for details.
+
--porcelain::
With `list`, output in an easy-to-parse format for scripts.
This format will remain stable across Git versions and regardless of user
diff --git a/Documentation/git.txt b/Documentation/git.txt
index 0c15ef3a8e..1d33e083ab 100644
--- a/Documentation/git.txt
+++ b/Documentation/git.txt
@@ -458,7 +458,12 @@ Please see linkgit:gitglossary[7].
Environment Variables
---------------------
-Various Git commands use the following environment variables:
+Various Git commands pay attention to environment variables and change
+their behavior. The environment variables marked as "Boolean" take
+their values the same way as Boolean valued configuration variables, e.g.
+"true", "yes", "on" and positive numbers are taken as "yes".
+
+Here are the variables:
The Git Repository
~~~~~~~~~~~~~~~~~~
@@ -467,13 +472,13 @@ is worth noting that they may be used/overridden by SCMS sitting above
Git so take care if using a foreign front-end.
`GIT_INDEX_FILE`::
- This environment allows the specification of an alternate
+ This environment variable specifies an alternate
index file. If not specified, the default of `$GIT_DIR/index`
is used.
`GIT_INDEX_VERSION`::
- This environment variable allows the specification of an index
- version for new repositories. It won't affect existing index
+ This environment variable specifies what index version is used
+ when writing the index file out. It won't affect existing index
files. By default index file version 2 or 3 is used. See
linkgit:git-update-index[1] for more information.
@@ -530,7 +535,7 @@ double-quotes and respecting backslash escapes. E.g., the value
When run in a directory that does not have ".git" repository
directory, Git tries to find such a directory in the parent
directories to find the top of the working tree, but by default it
- does not cross filesystem boundaries. This environment variable
+ does not cross filesystem boundaries. This Boolean environment variable
can be set to true to tell Git not to stop at filesystem
boundaries. Like `GIT_CEILING_DIRECTORIES`, this will not affect
an explicit repository directory set via `GIT_DIR` or on the
@@ -682,6 +687,11 @@ for further details.
plink or tortoiseplink. This variable overrides the config setting
`ssh.variant` that serves the same purpose.
+`GIT_SSL_NO_VERIFY`::
+ Setting and exporting this environment variable to any value
+ tells Git not to verify the SSL certificate when fetching or
+ pushing over HTTPS.
+
`GIT_ASKPASS`::
If this environment variable is set, then Git commands which need to
acquire passwords or passphrases (e.g. for HTTP or IMAP authentication)
@@ -690,7 +700,7 @@ for further details.
option in linkgit:git-config[1].
`GIT_TERMINAL_PROMPT`::
- If this environment variable is set to `0`, git will not prompt
+ If this Boolean environment variable is set to false, git will not prompt
on the terminal (e.g., when asking for HTTP authentication).
`GIT_CONFIG_GLOBAL`::
@@ -705,13 +715,14 @@ for further details.
`GIT_CONFIG_NOSYSTEM`::
Whether to skip reading settings from the system-wide
- `$(prefix)/etc/gitconfig` file. This environment variable can
+ `$(prefix)/etc/gitconfig` file. This Boolean environment variable can
be used along with `$HOME` and `$XDG_CONFIG_HOME` to create a
predictable environment for a picky script, or you can set it
- temporarily to avoid using a buggy `/etc/gitconfig` file while
+ to true to temporarily avoid using a buggy `/etc/gitconfig` file while
waiting for someone with sufficient permissions to fix it.
`GIT_FLUSH`::
+// NEEDSWORK: make it into a usual Boolean environment variable
If this environment variable is set to "1", then commands such
as 'git blame' (in incremental mode), 'git rev-list', 'git log',
'git check-attr' and 'git check-ignore' will
@@ -852,11 +863,11 @@ for full details.
`GIT_TRACE_REDACT`::
By default, when tracing is activated, Git redacts the values of
cookies, the "Authorization:" header, the "Proxy-Authorization:"
- header and packfile URIs. Set this variable to `0` to prevent this
+ header and packfile URIs. Set this Boolean environment variable to false to prevent this
redaction.
`GIT_LITERAL_PATHSPECS`::
- Setting this variable to `1` will cause Git to treat all
+ Setting this Boolean environment variable to true will cause Git to treat all
pathspecs literally, rather than as glob patterns. For example,
running `GIT_LITERAL_PATHSPECS=1 git log -- '*.c'` will search
for commits that touch the path `*.c`, not any paths that the
@@ -865,15 +876,15 @@ for full details.
`git ls-tree`, `--raw` diff output, etc).
`GIT_GLOB_PATHSPECS`::
- Setting this variable to `1` will cause Git to treat all
+ Setting this Boolean environment variable to true will cause Git to treat all
pathspecs as glob patterns (aka "glob" magic).
`GIT_NOGLOB_PATHSPECS`::
- Setting this variable to `1` will cause Git to treat all
+ Setting this Boolean environment variable to true will cause Git to treat all
pathspecs as literal (aka "literal" magic).
`GIT_ICASE_PATHSPECS`::
- Setting this variable to `1` will cause Git to treat all
+ Setting this Boolean environment variable to true will cause Git to treat all
pathspecs as case-insensitive.
`GIT_REFLOG_ACTION`::
@@ -887,7 +898,7 @@ for full details.
end user, to be recorded in the body of the reflog.
`GIT_REF_PARANOIA`::
- If set to `0`, ignore broken or badly named refs when iterating
+ If this Boolean environment variable is set to false, ignore broken or badly named refs when iterating
over lists of refs. Normally Git will try to include any such
refs, which may cause some operations to fail. This is usually
preferable, as potentially destructive operations (e.g.,
@@ -906,7 +917,7 @@ for full details.
`protocol.allow` in linkgit:git-config[1] for more details.
`GIT_PROTOCOL_FROM_USER`::
- Set to 0 to prevent protocols used by fetch/push/clone which are
+ Set this Boolean environment variable to false to prevent protocols used by fetch/push/clone which are
configured to the `user` state. This is useful to restrict recursive
submodule initialization from an untrusted repository or for programs
which feed potentially-untrusted URLS to git commands. See
@@ -934,7 +945,7 @@ only affects clones and fetches; it is not yet used for pushes (but may
be in the future).
`GIT_OPTIONAL_LOCKS`::
- If set to `0`, Git will complete any requested operation without
+ If this Boolean environment variable is set to false, Git will complete any requested operation without
performing any optional sub-operations that require taking a lock.
For example, this will prevent `git status` from refreshing the
index as a side effect. This is useful for processes running in
diff --git a/Documentation/gitcredentials.txt b/Documentation/gitcredentials.txt
index 80517b4eb2..4522471c33 100644
--- a/Documentation/gitcredentials.txt
+++ b/Documentation/gitcredentials.txt
@@ -17,9 +17,10 @@ DESCRIPTION
Git will sometimes need credentials from the user in order to perform
operations; for example, it may need to ask for a username and password
-in order to access a remote repository over HTTP. This manual describes
-the mechanisms Git uses to request these credentials, as well as some
-features to avoid inputting these credentials repeatedly.
+in order to access a remote repository over HTTP. Some remotes accept
+a personal access token or OAuth access token as a password. This
+manual describes the mechanisms Git uses to request these credentials,
+as well as some features to avoid inputting these credentials repeatedly.
REQUESTING CREDENTIALS
----------------------
@@ -61,7 +62,9 @@ for a password. It is generally configured by adding this to your config:
Credential helpers, on the other hand, are external programs from which Git can
request both usernames and passwords; they typically interface with secure
-storage provided by the OS or other programs.
+storage provided by the OS or other programs. Alternatively, a
+credential-generating helper might generate credentials for certain servers via
+some API.
To use a helper, you must first select one to use. Git currently
includes the following helpers:
@@ -269,6 +272,7 @@ stdout in the same format (see linkgit:git-credential[1] for common
attributes). A helper is free to produce a subset, or even no values at
all if it has nothing useful to provide. Any provided attributes will
overwrite those already known about by Git's credential subsystem.
+Unrecognised attributes are silently discarded.
While it is possible to override all attributes, well behaving helpers
should refrain from doing so for any attribute other than username and
@@ -286,8 +290,8 @@ For a `store` or `erase` operation, the helper's output is ignored.
If a helper fails to perform the requested operation or needs to notify
the user of a potential issue, it may write to stderr.
-If it does not support the requested operation (e.g., a read-only store),
-it should silently ignore the request.
+If it does not support the requested operation (e.g., a read-only store
+or generator), it should silently ignore the request.
If a helper receives any other operation, it should silently ignore the
request. This leaves room for future operations to be added (older
diff --git a/Documentation/gitformat-chunk.txt b/Documentation/gitformat-chunk.txt
index 57202ede27..ee3718c430 100644
--- a/Documentation/gitformat-chunk.txt
+++ b/Documentation/gitformat-chunk.txt
@@ -24,8 +24,9 @@ how they use the chunks to describe structured data.
A chunk-based file format begins with some header information custom to
that format. That header should include enough information to identify
-the file type, format version, and number of chunks in the file. From this
-information, that file can determine the start of the chunk-based region.
+the file type, format version, and (optionally) the number of chunks in
+the file. From this information, that file can determine the start of the
+chunk-based region.
The chunk-based region starts with a table of contents describing where
each chunk starts and ends. This consists of (C+1) rows of 12 bytes each,
@@ -51,8 +52,27 @@ The final entry in the table of contents must be four zero bytes. This
confirms that the table of contents is ending and provides the offset for
the end of the chunk-based data.
+The default chunk format assumes the table of contents appears at the
+beginning of the file (after the header information) and the chunks are
+ordered by increasing offset. Alternatively, the chunk format allows a
+table of contents that is placed at the end of the file (before the
+trailing hash) and the offsets are in descending order. In this trailing
+table of contents case, the data in order looks instead like the following
+table:
+
+ | Chunk ID (4 bytes) | Chunk Offset (8 bytes) |
+ |--------------------|------------------------|
+ | 0x0000 | OFFSET[C+1] |
+ | ID[C] | OFFSET[C] |
+ | ... | ... |
+ | ID[0] | OFFSET[0] |
+
+The concrete file format that uses the chunk format will mention that it
+uses a trailing table of contents if it uses it. By default, the table of
+contents is in ascending order before all chunk data.
+
Note: The chunk-based format expects that the file contains _at least_ a
-trailing hash after `OFFSET[C+1]`.
+trailing hash after either `OFFSET[C+1]` or the trailing table of contents.
Functions for working with chunk-based file formats are declared in
`chunk-format.h`. Using these methods provide extra checks that assist
diff --git a/Documentation/gitformat-commit-graph.txt b/Documentation/gitformat-commit-graph.txt
index 7324665716..31cad585e2 100644
--- a/Documentation/gitformat-commit-graph.txt
+++ b/Documentation/gitformat-commit-graph.txt
@@ -3,7 +3,7 @@ gitformat-commit-graph(5)
NAME
----
-gitformat-commit-graph - Git commit graph format
+gitformat-commit-graph - Git commit-graph format
SYNOPSIS
--------
@@ -14,7 +14,7 @@ $GIT_DIR/objects/info/commit-graphs/*
DESCRIPTION
-----------
-The Git commit graph stores a list of commit OIDs and some associated
+The Git commit-graph stores a list of commit OIDs and some associated
metadata, including:
- The generation number of the commit.
@@ -34,7 +34,7 @@ corresponding to the array position within the list of commit OIDs. Due
to some special constants we use to track parents, we can store at most
(1 << 30) + (1 << 29) + (1 << 28) - 1 (around 1.8 billion) commits.
-== Commit graph files have the following format:
+== Commit-graph files have the following format:
In order to allow extensions that add extra data to the graph, we organize
the body into "chunks" and provide a binary lookup table at the beginning
diff --git a/Documentation/glossary-content.txt b/Documentation/glossary-content.txt
index aa2f41f5e7..5a537268e2 100644
--- a/Documentation/glossary-content.txt
+++ b/Documentation/glossary-content.txt
@@ -20,7 +20,7 @@
[[def_branch]]branch::
A "branch" is a line of development. The most recent
<<def_commit,commit>> on a branch is referred to as the tip of
- that branch. The tip of the branch is referenced by a branch
+ that branch. The tip of the branch is <<def_ref,referenced>> by a branch
<<def_head,head>>, which moves forward as additional development
is done on the branch. A single Git
<<def_repository,repository>> can track an arbitrary number of
@@ -75,6 +75,21 @@ state in the Git history, by creating a new commit representing the current
state of the <<def_index,index>> and advancing <<def_HEAD,HEAD>>
to point at the new commit.
+[[def_commit_graph_general]]commit graph concept, representations and usage::
+ A synonym for the <<def_DAG,DAG>> structure formed by the commits
+ in the object database, <<def_ref,referenced>> by branch tips,
+ using their <<def_chain,chain>> of linked commits.
+ This structure is the definitive commit graph. The
+ graph can be represented in other ways, e.g. the
+ <<def_commit_graph_file,"commit-graph" file>>.
+
+[[def_commit_graph_file]]commit-graph file::
+ The "commit-graph" (normally hyphenated) file is a supplemental
+ representation of the <<def_commit_graph_general,commit graph>>
+ which accelerates commit graph walks. The "commit-graph" file is
+ stored either in the .git/objects/info directory or in the info
+ directory of an alternate object database.
+
[[def_commit_object]]commit object::
An <<def_object,object>> which contains the information about a
particular <<def_revision,revision>>, such as <<def_parent,parents>>, committer,
@@ -262,7 +277,7 @@ This commit is referred to as a "merge commit", or sometimes just a
identified by its <<def_object_name,object name>>. The objects usually
live in `$GIT_DIR/objects/`.
-[[def_object_identifier]]object identifier::
+[[def_object_identifier]]object identifier (oid)::
Synonym for <<def_object_name,object name>>.
[[def_object_name]]object name::
@@ -493,6 +508,14 @@ exclude;;
<<def_tree_object,trees>> to the trees or <<def_blob_object,blobs>>
that they contain.
+[[def_reachability_bitmap]]reachability bitmaps::
+ Reachability bitmaps store information about the
+ <<def_reachable,reachability>> of a selected set of commits in
+ a packfile, or a multi-pack index (MIDX), to speed up object search.
+ The bitmaps are stored in a ".bitmap" file. A repository may have at
+ most one bitmap file in use. The bitmap file may belong to either one
+ pack, or the repository's multi-pack index (if it exists).
+
[[def_rebase]]rebase::
To reapply a series of changes from a <<def_branch,branch>> to a
different base, and reset the <<def_head,head>> of that branch
diff --git a/Documentation/howto/coordinate-embargoed-releases.txt b/Documentation/howto/coordinate-embargoed-releases.txt
index 601aae88e9..e653775bab 100644
--- a/Documentation/howto/coordinate-embargoed-releases.txt
+++ b/Documentation/howto/coordinate-embargoed-releases.txt
@@ -1,9 +1,10 @@
Content-type: text/asciidoc
-Abstract: When a critical vulnerability is discovered and fixed, we follow this
- script to coordinate a public release.
+Abstract: When a vulnerability is reported, we follow these guidelines to
+ assess the vulnerability, create and review a fix, and coordinate embargoed
+ security releases.
How we coordinate embargoed releases
-====================================
+------------------------------------
To protect Git users from critical vulnerabilities, we do not just release
fixed versions like regular maintenance releases. Instead, we coordinate
@@ -11,33 +12,147 @@ releases with packagers, keeping the fixes under an embargo until the release
date. That way, users will have a chance to upgrade on that date, no matter
what Operating System or distribution they run.
-Open a Security Advisory draft
-------------------------------
+The `git-security` mailing list
+-------------------------------
+
+Responsible disclosures of vulnerabilities, analysis, proposed fixes as
+well as the orchestration of coordinated embargoed releases all happen on the
+`git-security` mailing list at <git-security@googlegroups.com>.
+
+In this context, the term "embargo" refers to the time period that information
+about a vulnerability is kept under wraps and only shared on a need-to-know
+basis. This is necessary to protect Git's users from bad actors who would
+otherwise be made aware of attack vectors that could be exploited. "Lifting the
+embargo" refers to publishing the version that fixes the vulnerabilities.
+
+Audience of the `git-security` mailing list
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Anybody may contact the `git-security` mailing list by sending an email
+to <git-security@googlegroups.com>, though the archive is closed to the
+public and only accessible to subscribed members.
+
+There are a few dozen subscribed members: core Git developers who are trusted
+with addressing vulnerabilities, and stakeholders (i.e. owners of products
+affected by security vulnerabilities in Git).
+
+Most of the discussions revolve around assessing the severity of the reported
+issue (including the decision whether the report is security-relevant or can be
+redirected to the public mailing list), how to remediate the issue, determining
+the timeline of the disclosure as well as aligning priorities and
+requirements.
-The first step is to https://github.com/git/git/security/advisories/new[open an
-advisory]. Technically, it is not necessary, but it is convenient and saves a
-bit of hassle. This advisory can also be used to obtain the CVE number and it
-will give us a private fork associated with it that can be used to collaborate
-on a fix.
+Communications
+~~~~~~~~~~~~~~
-Release date of the embargoed version
--------------------------------------
+If you are a stakeholder, it is a good idea to pay close attention to the
+discussions, as pertinent information may be buried in the middle of a lively
+conversation that might not look relevant to your interests. For example, the
+tentative timeline might be agreed upon in the middle of discussing code
+comment formatting in one of the patches and whether or not to combine fixes
+for multiple, separate vulnerabilities into the same embargoed release. Most
+mail threads are not usually structured specifically to communicate
+agreements, assessments or timelines.
-If the vulnerability affects Windows users, we want to have our friends over at
-Visual Studio on board. This means we need to target a "Patch Tuesday" (i.e. a
-second Tuesday of the month), at the minimum three weeks from heads-up to
-coordinated release.
+Typical timeline
+----------------
-If the vulnerability affects the server side, or can benefit from scans on the
-server side (i.e. if `git fsck` can detect an attack), it is important to give
-all involved Git repository hosting sites enough time to scan all of those
-repositories.
+- A potential vulnerability is reported to the `git-security` mailing list.
+
+- The members of the git-security list start a discussion to give an initial
+ assessment of the severity of the reported potential vulnerability.
+ We aspire to do so within a few days.
+
+- After discussion, if consensus is reached that it is not critical enough
+ to warrant any embargo, the reporter is redirected to the public Git mailing
+ list. This ends the reporter's interaction with the `git-security` list.
+
+- If it is deemed critical enough for an embargo, ideas are presented on how to
+ address the vulnerability.
+
+- Usually around that time, the Git maintainer or their delegate(s) open a draft
+ security advisory in the `git/git` repository on GitHub (see below for more
+ details).
+
+- Code review can take place in a variety of different locations,
+ depending on context. These are: patches sent inline on the git-security list,
+ a private fork on GitHub associated with the draft security advisory, or the
+ git/cabal repository.
+
+- Contributors working on a fix should consider beginning by sending
+ patches to the git-security list (inline with the original thread), since they
+ are accessible to all subscribers, along with the original reporter.
+
+- Once the review has settled and everyone involved in the review agrees that
+ the patches are nearing the finish line, the Git maintainer, and others
+ determine a release date as well as the release trains that are serviced. The
+ decision regarding which versions need a backported fix is based on input from
+ the reporter, the contributor who worked on the patches, and from
+ stakeholders. Operators of hosting sites who may want to analyze whether the
+ given issue is exploited via any of the repositories they host, and binary
+ packagers who want to make sure their product gets patched adequately against
+ the vulnerability, for example, may want to give their input at this stage.
+
+- While the Git community does its best to accommodate the specific timeline
+ requests of the various binary packagers, the nature of the issue may preclude
+ a prolonged release schedule. For fixes deemed urgent, it may be in the best
+ interest of the Git users community to shorten the disclosure and release
+ timeline, and packagers may need to adapt accordingly.
+
+- Subsequently, branches with the fixes are pushed to the git/cabal repository.
+
+- The tags are created by the Git maintainer and pushed to the same repository.
+
+- The Git for Windows, Git for macOS, BSD, Debian, etc. maintainers prepare the
+ corresponding release artifacts, based on the tags created that have been
+ prepared by the Git maintainer.
+
+- The release artifacts prepared by various binary packagers can be
+ made available to stakeholders under embargo via a mail to the
+ `git-security` list.
+
+- Less than a week before the release, a mail with the relevant information is
+ sent to <distros@vs.openwall.org> (see below), a list used to pre-announce
+ embargoed releases of open source projects to the stakeholders of all major
+ distributions of Linux as well as other OSes.
+
+- Public communication is then prepared in advance of the release date. This
+ includes blog posts and mails to the Git and Git for Windows mailing lists.
+
+- On the day of the release, at around 10am Pacific Time, the Git maintainer
+ pushes the tag and the `master` branch to the public repository, then sends
+ out an announcement mail.
+
+- Once the tag is pushed, the Git for Windows maintainer publishes the
+ corresponding tag and creates a GitHub Release with the associated release
+ artifacts (Git for Windows installer, Portable Git, MinGit, etc).
+
+- Git for Windows release is then announced via a mail to the public Git and
+ Git for Windows mailing lists as well as via a tweet.
+
+- Ditto for distribution packagers for Linux and other platforms:
+ their releases are announced via their preferred channels.
+
+- A mail to <oss-security@lists.openwall.org> (see below for details) is sent
+ as a follow-up to the <distros@vs.openwall.org> one, describing the
+ vulnerability in detail, often including a proof of concept of an exploit.
+
+Note: The Git project makes no guarantees about timelines, but aims to keep
+embargoes reasonably short in the interest of keeping Git's users safe.
+
+Opening a Security Advisory draft
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The first step is to https://github.com/git/git/security/advisories/new[open
+an advisory]. Technically, this is not necessary. However, it is the most
+convenient way to obtain the CVE number and it give us a private repository
+associated with it that can be used to collaborate on a fix.
Notifying the Linux distributions
----------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
At most two weeks before release date, we need to send a notification to
-distros@vs.openwall.org, preferably less than 7 days before the release date.
+<distros@vs.openwall.org>, preferably less than 7 days before the release date.
This will reach most (all?) Linux distributions. See an example below, and the
guidelines for this mailing list at
https://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists[here].
@@ -65,7 +180,7 @@ created using a command like this:
tar cJvf cve-xxx.bundle.tar.xz cve-xxx.bundle
Example mail to distros@vs.openwall.org
----------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
....
To: distros@vs.openwall.org
@@ -101,7 +216,7 @@ Thanks,
....
Example mail to oss-security@lists.openwall.com
------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
....
To: oss-security@lists.openwall.com
@@ -128,4 +243,4 @@ it goes to <developer>.
Thanks,
<name>
-....
+.... \ No newline at end of file
diff --git a/Documentation/howto/maintain-git.txt b/Documentation/howto/maintain-git.txt
index a67130debb..d07c6d44e5 100644
--- a/Documentation/howto/maintain-git.txt
+++ b/Documentation/howto/maintain-git.txt
@@ -231,7 +231,7 @@ by doing the following:
- Prepare 'jch' branch, which is used to represent somewhere
between 'master' and 'seen' and often is slightly ahead of 'next'.
- $ Meta/Reintegrate master..seen >Meta/redo-jch.sh
+ $ Meta/Reintegrate master..jch >Meta/redo-jch.sh
The result is a script that lists topics to be merged in order to
rebuild 'seen' as the input to Meta/Reintegrate script. Remove
@@ -256,7 +256,7 @@ by doing the following:
merged to 'next', add it at the end of the list. Then:
$ git checkout -B jch master
- $ Meta/redo-jch.sh -c1
+ $ sh Meta/redo-jch.sh -c1
to rebuild the 'jch' branch from scratch. "-c1" tells the script
to stop merging at the first line that begins with '###'
@@ -283,6 +283,11 @@ by doing the following:
$ git diff jch next
+ Then build the rest of 'jch':
+
+ $ git checkout jch
+ $ sh Meta/redo-jch.sh
+
When all is well, clean up the redo-jch.sh script with
$ sh Meta/redo-jch.sh -u
@@ -293,7 +298,7 @@ by doing the following:
- Rebuild 'seen'.
- $ Meta/Reintegrate master..seen >Meta/redo-seen.sh
+ $ Meta/Reintegrate jch..seen >Meta/redo-seen.sh
Edit the result by adding new topics that are not still in 'seen'
in the script. Then
diff --git a/Documentation/lint-fsck-msgids.perl b/Documentation/lint-fsck-msgids.perl
new file mode 100755
index 0000000000..1233ffe815
--- /dev/null
+++ b/Documentation/lint-fsck-msgids.perl
@@ -0,0 +1,70 @@
+#!/usr/bin/perl
+
+my ($fsck_h, $fsck_msgids_txt, $okfile) = @ARGV;
+
+my (%in_fsck_h, $fh, $bad);
+
+open($fh, "<", "$fsck_h") or die;
+while (<$fh>) {
+ if (/^\s+FUNC\(([0-9A-Z_]+), ([A-Z]+)\)/) {
+ my ($name, $severity) = ($1, $2);
+ my ($first) = 1;
+ $name = join('',
+ map {
+ y/A-Z/a-z/;
+ if (!$first) {
+ s/^(.)/uc($1)/e;
+ } else {
+ $first = 0;
+ }
+ $_;
+ }
+ split(/_/, $name));
+ $in_fsck_h{$name} = $severity;
+ }
+}
+close($fh);
+
+open($fh, "<", "$fsck_msgids_txt") or die;
+my ($previous, $current);
+while (<$fh>) {
+ if (!defined $current) {
+ if (/^\`([a-zA-Z0-9]*)\`::/) {
+ $current = $1;
+ if ((defined $previous) &&
+ ($current le $previous)) {
+ print STDERR "$previous >= $current in doc\n";
+ $bad = 1;
+ }
+ }
+ } elsif (/^\s+\(([A-Z]+)\) /) {
+ my ($level) = $1;
+ if (!exists $in_fsck_h{$current}) {
+ print STDERR "$current does not exist in fsck.h\n";
+ $bad = 1;
+ } elsif ($in_fsck_h{$current} eq "") {
+ print STDERR "$current defined twice\n";
+ $bad = 1;
+ } elsif ($in_fsck_h{$current} ne $level) {
+ print STDERR "$current severity $level != $in_fsck_h{$current}\n";
+ $bad = 1;
+ }
+ $previous = $current;
+ $in_fsck_h{$current} = ""; # mark as seen.
+ undef $current;
+ }
+}
+close($fh);
+
+for my $key (keys %in_fsck_h) {
+ if ($in_fsck_h{$key} ne "") {
+ print STDERR "$key not explained in doc.\n";
+ $bad = 1;
+ }
+}
+
+die if ($bad);
+
+open($fh, ">", "$okfile");
+print $fh "good\n";
+close($fh);
diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt
index 1837509566..ff68e48406 100644
--- a/Documentation/rev-list-options.txt
+++ b/Documentation/rev-list-options.txt
@@ -195,6 +195,13 @@ respectively, and they must begin with `refs/` when applied to `--glob`
or `--all`. If a trailing '/{asterisk}' is intended, it must be given
explicitly.
+--exclude-hidden=[receive|uploadpack]::
+ Do not include refs that would be hidden by `git-receive-pack` or
+ `git-upload-pack` by consulting the appropriate `receive.hideRefs` or
+ `uploadpack.hideRefs` configuration along with `transfer.hideRefs` (see
+ linkgit:git-config[1]). This option affects the next pseudo-ref option
+ `--all` or `--glob` and is cleared after processing them.
+
--reflog::
Pretend as if all objects mentioned by reflogs are listed on the
command line as `<commit>`.
diff --git a/Documentation/revisions.txt b/Documentation/revisions.txt
index e3e350126d..0d2e55d781 100644
--- a/Documentation/revisions.txt
+++ b/Documentation/revisions.txt
@@ -363,7 +363,7 @@ Revision Range Summary
'<rev>{caret}!', e.g. 'HEAD{caret}!'::
A suffix '{caret}' followed by an exclamation mark is the same
- as giving commit '<rev>' and then all its parents prefixed with
+ as giving commit '<rev>' and all its parents prefixed with
'{caret}' to exclude them (and their ancestors).
'<rev>{caret}-<n>', e.g. 'HEAD{caret}-, HEAD{caret}-2'::
diff --git a/Documentation/technical/api-trace2.txt b/Documentation/technical/api-trace2.txt
index 2afa28bb5a..de5fc25059 100644
--- a/Documentation/technical/api-trace2.txt
+++ b/Documentation/technical/api-trace2.txt
@@ -148,20 +148,18 @@ filename collisions).
== Trace2 API
-All public Trace2 functions and macros are defined in `trace2.h` and
-`trace2.c`. All public symbols are prefixed with `trace2_`.
+The Trace2 public API is defined and documented in `trace2.h`; refer to it for
+more information. All public functions and macros are prefixed
+with `trace2_` and are implemented in `trace2.c`.
There are no public Trace2 data structures.
The Trace2 code also defines a set of private functions and data types
in the `trace2/` directory. These symbols are prefixed with `tr2_`
-and should only be used by functions in `trace2.c`.
+and should only be used by functions in `trace2.c` (or other private
+source files in `trace2/`).
-== Conventions for Public Functions and Macros
-
-The functions defined by the Trace2 API are declared and documented
-in `trace2.h`. It defines the API functions and wrapper macros for
-Trace2.
+=== Conventions for Public Functions and Macros
Some functions have a `_fl()` suffix to indicate that they take `file`
and `line-number` arguments.
@@ -172,52 +170,7 @@ take a `va_list` argument.
Some functions have a `_printf_fl()` suffix to indicate that they also
take a `printf()` style format with a variable number of arguments.
-There are CPP wrapper macros and `#ifdef`s to hide most of these details.
-See `trace2.h` for more details. The following discussion will only
-describe the simplified forms.
-
-== Public API
-
-All Trace2 API functions send a message to all of the active
-Trace2 Targets. This section describes the set of available
-messages.
-
-It helps to divide these functions into groups for discussion
-purposes.
-
-=== Basic Command Messages
-
-These are concerned with the lifetime of the overall git process.
-e.g: `void trace2_initialize_clock()`, `void trace2_initialize()`,
-`int trace2_is_enabled()`, `void trace2_cmd_start(int argc, const char **argv)`.
-
-=== Command Detail Messages
-
-These are concerned with describing the specific Git command
-after the command line, config, and environment are inspected.
-e.g: `void trace2_cmd_name(const char *name)`,
-`void trace2_cmd_mode(const char *mode)`.
-
-=== Child Process Messages
-
-These are concerned with the various spawned child processes,
-including shell scripts, git commands, editors, pagers, and hooks.
-
-e.g: `void trace2_child_start(struct child_process *cmd)`.
-
-=== Git Thread Messages
-
-These messages are concerned with Git thread usage.
-
-e.g: `void trace2_thread_start(const char *thread_name)`.
-
-=== Region and Data Messages
-
-These are concerned with recording performance data
-over regions or spans of code. e.g:
-`void trace2_region_enter(const char *category, const char *label, const struct repository *repo)`.
-
-Refer to trace2.h for details about all trace2 functions.
+CPP wrapper macros are defined to hide most of these details.
== Trace2 Target Formats
@@ -685,8 +638,8 @@ The "exec_id" field is a command-unique id and is only useful if the
`"thread_start"`::
This event is generated when a thread is started. It is
- generated from *within* the new thread's thread-proc (for TLS
- reasons).
+ generated from *within* the new thread's thread-proc (because
+ it needs to access data in the thread's thread-local storage).
+
------------
{
@@ -698,7 +651,7 @@ The "exec_id" field is a command-unique id and is only useful if the
`"thread_exit"`::
This event is generated when a thread exits. It is generated
- from *within* the thread's thread-proc (for TLS reasons).
+ from *within* the thread's thread-proc.
+
------------
{
@@ -816,6 +769,73 @@ The "value" field may be an integer or a string.
}
------------
+`"th_timer"`::
+ This event logs the amount of time that a stopwatch timer was
+ running in the thread. This event is generated when a thread
+ exits for timers that requested per-thread events.
++
+------------
+{
+ "event":"th_timer",
+ ...
+ "category":"my_category",
+ "name":"my_timer",
+ "intervals":5, # number of time it was started/stopped
+ "t_total":0.052741, # total time in seconds it was running
+ "t_min":0.010061, # shortest interval
+ "t_max":0.011648 # longest interval
+}
+------------
+
+`"timer"`::
+ This event logs the amount of time that a stopwatch timer was
+ running aggregated across all threads. This event is generated
+ when the process exits.
++
+------------
+{
+ "event":"timer",
+ ...
+ "category":"my_category",
+ "name":"my_timer",
+ "intervals":5, # number of time it was started/stopped
+ "t_total":0.052741, # total time in seconds it was running
+ "t_min":0.010061, # shortest interval
+ "t_max":0.011648 # longest interval
+}
+------------
+
+`"th_counter"`::
+ This event logs the value of a counter variable in a thread.
+ This event is generated when a thread exits for counters that
+ requested per-thread events.
++
+------------
+{
+ "event":"th_counter",
+ ...
+ "category":"my_category",
+ "name":"my_counter",
+ "count":23
+}
+------------
+
+`"counter"`::
+ This event logs the value of a counter variable across all threads.
+ This event is generated when the process exits. The total value
+ reported here is the sum across all threads.
++
+------------
+{
+ "event":"counter",
+ ...
+ "category":"my_category",
+ "name":"my_counter",
+ "count":23
+}
+------------
+
+
== Example Trace2 API Usage
Here is a hypothetical usage of the Trace2 API showing the intended
@@ -1206,7 +1226,7 @@ worked on 508 items at offset 2032. Thread "th04" worked on 508 items
at offset 508.
+
This example also shows that thread names are assigned in a racy manner
-as each thread starts and allocates TLS storage.
+as each thread starts.
Config (def param) Events::
@@ -1247,6 +1267,60 @@ d0 | main | data | r0 | 0.002126 | 0.002126 | fsy
d0 | main | exit | | 0.000470 | | | code:0
d0 | main | atexit | | 0.000477 | | | code:0
----------------
+
+Stopwatch Timer Events::
+
+ Measure the time spent in a function call or span of code
+ that might be called from many places within the code
+ throughout the life of the process.
++
+----------------
+static void expensive_function(void)
+{
+ trace2_timer_start(TRACE2_TIMER_ID_TEST1);
+ ...
+ sleep_millisec(1000); // Do something expensive
+ ...
+ trace2_timer_stop(TRACE2_TIMER_ID_TEST1);
+}
+
+static int ut_100timer(int argc, const char **argv)
+{
+ ...
+
+ expensive_function();
+
+ // Do something else 1...
+
+ expensive_function();
+
+ // Do something else 2...
+
+ expensive_function();
+
+ return 0;
+}
+----------------
++
+In this example, we measure the total time spent in
+`expensive_function()` regardless of when it is called
+in the overall flow of the program.
++
+----------------
+$ export GIT_TRACE2_PERF_BRIEF=1
+$ export GIT_TRACE2_PERF=~/log.perf
+$ t/helper/test-tool trace2 100timer 3 1000
+...
+$ cat ~/log.perf
+d0 | main | version | | | | | ...
+d0 | main | start | | 0.001453 | | | t/helper/test-tool trace2 100timer 3 1000
+d0 | main | cmd_name | | | | | trace2 (trace2)
+d0 | main | exit | | 3.003667 | | | code:0
+d0 | main | timer | | | | test | name:test1 intervals:3 total:3.001686 min:1.000254 max:1.000929
+d0 | main | atexit | | 3.003796 | | | code:0
+----------------
+
+
== Future Work
=== Relationship to the Existing Trace Api (api-trace.txt)
diff --git a/Documentation/technical/bundle-uri.txt b/Documentation/technical/bundle-uri.txt
index 18f2dedd40..b78d01d9ad 100644
--- a/Documentation/technical/bundle-uri.txt
+++ b/Documentation/technical/bundle-uri.txt
@@ -153,8 +153,8 @@ bundle.<id>.filter::
bundle.<id>.creationToken::
This value is a nonnegative 64-bit integer used for sorting the bundles
- the list. This is used to download a subset of bundles during a fetch
- when `bundle.heuristic=creationToken`.
+ list. This is used to download a subset of bundles during a fetch when
+ `bundle.heuristic=creationToken`.
bundle.<id>.location::
This string value advertises a real-world location from where the bundle
@@ -234,8 +234,8 @@ will interact with bundle URIs according to the following flow:
making those OIDs present. When all required OIDs are present, the
client unbundles that data using a refspec. The default refspec is
`+refs/heads/*:refs/bundles/*`, but this can be configured. These refs
- are stored so that later `git fetch` negotiations can communicate the
- bundled refs as `have`s, reducing the size of the fetch over the Git
+ are stored so that later `git fetch` negotiations can communicate each
+ bundled ref as a `have`, reducing the size of the fetch over the Git
protocol. To allow pruning refs from this ref namespace, Git may
introduce a numbered namespace (such as `refs/bundles/<i>/*`) such that
stale bundle refs can be deleted.
diff --git a/Documentation/technical/commit-graph.txt b/Documentation/technical/commit-graph.txt
index 90c9760c23..86fed0de0f 100644
--- a/Documentation/technical/commit-graph.txt
+++ b/Documentation/technical/commit-graph.txt
@@ -1,4 +1,4 @@
-Git Commit Graph Design Notes
+Git Commit-Graph Design Notes
=============================
Git walks the commit graph for many reasons, including:
@@ -17,7 +17,7 @@ There are two main costs here:
The commit-graph file is a supplemental data structure that accelerates
commit graph walks. If a user downgrades or disables the 'core.commitGraph'
-config setting, then the existing ODB is sufficient. The file is stored
+config setting, then the existing object database is sufficient. The file is stored
as "commit-graph" either in the .git/objects/info directory or in the info
directory of an alternate.
@@ -95,7 +95,7 @@ with default order), but is not used when the topological order is
required (such as merge base calculations, "git log --graph").
In practice, we expect some commits to be created recently and not stored
-in the commit graph. We can treat these commits as having "infinite"
+in the commit-graph. We can treat these commits as having "infinite"
generation number and walk until reaching commits with known generation
number.
@@ -149,7 +149,7 @@ Design Details
helpful for these clones, anyway. The commit-graph will not be read or
written when shallow commits are present.
-Commit Graphs Chains
+Commit-Graphs Chains
--------------------
Typically, repos grow with near-constant velocity (commits per day). Over time,
diff --git a/Documentation/technical/parallel-checkout.txt b/Documentation/technical/parallel-checkout.txt
index e790258a1a..47c9b6183c 100644
--- a/Documentation/technical/parallel-checkout.txt
+++ b/Documentation/technical/parallel-checkout.txt
@@ -56,7 +56,7 @@ Rejected Multi-Threaded Solution
The most "straightforward" implementation would be to spread the set of
to-be-updated cache entries across multiple threads. But due to the
-thread-unsafe functions in the ODB code, we would have to use locks to
+thread-unsafe functions in the object database code, we would have to use locks to
coordinate the parallel operation. An early prototype of this solution
showed that the multi-threaded checkout would bring performance
improvements over the sequential code, but there was still too much lock
diff --git a/Documentation/technical/repository-version.txt b/Documentation/technical/repository-version.txt
index 7844ef30ff..8ef664b0b9 100644
--- a/Documentation/technical/repository-version.txt
+++ b/Documentation/technical/repository-version.txt
@@ -82,9 +82,9 @@ When the config key `extensions.preciousObjects` is set to `true`,
objects in the repository MUST NOT be deleted (e.g., by `git-prune` or
`git repack -d`).
-==== `partialclone`
+==== `partialClone`
-When the config key `extensions.partialclone` is set, it indicates
+When the config key `extensions.partialClone` is set, it indicates
that the repo was created with a partial clone (or later performed
a partial fetch) and that the remote may have omitted sending
certain unwanted objects. Such a remote is called a "promisor remote"
diff --git a/Documentation/technical/sparse-checkout.txt b/Documentation/technical/sparse-checkout.txt
new file mode 100644
index 0000000000..fa0d01cbda
--- /dev/null
+++ b/Documentation/technical/sparse-checkout.txt
@@ -0,0 +1,1103 @@
+Table of contents:
+
+ * Terminology
+ * Purpose of sparse-checkouts
+ * Usecases of primary concern
+ * Oversimplified mental models ("Cliff Notes" for this document!)
+ * Desired behavior
+ * Behavior classes
+ * Subcommand-dependent defaults
+ * Sparse specification vs. sparsity patterns
+ * Implementation Questions
+ * Implementation Goals/Plans
+ * Known bugs
+ * Reference Emails
+
+
+=== Terminology ===
+
+cone mode: one of two modes for specifying the desired subset of files
+ in a sparse-checkout. In cone-mode, the user specifies
+ directories (getting both everything under that directory as
+ well as everything in leading directories), while in non-cone
+ mode, the user specifies gitignore-style patterns. Controlled
+ by the --[no-]cone option to sparse-checkout init|set.
+
+SKIP_WORKTREE: When tracked files do not match the sparse specification and
+ are removed from the working tree, the file in the index is marked
+ with a SKIP_WORKTREE bit. Note that if a tracked file has the
+ SKIP_WORKTREE bit set but the file is later written by the user to
+ the working tree anyway, the SKIP_WORKTREE bit will be cleared at
+ the beginning of any subsequent Git operation.
+
+ Most sparse checkout users are unaware of this implementation
+ detail, and the term should generally be avoided in user-facing
+ descriptions and command flags. Unfortunately, prior to the
+ `sparse-checkout` subcommand this low-level detail was exposed,
+ and as of time of writing, is still exposed in various places.
+
+sparse-checkout: a subcommand in git used to reduce the files present in
+ the working tree to a subset of all tracked files. Also, the
+ name of the file in the $GIT_DIR/info directory used to track
+ the sparsity patterns corresponding to the user's desired
+ subset.
+
+sparse cone: see cone mode
+
+sparse directory: An entry in the index corresponding to a directory, which
+ appears in the index instead of all the files under that directory
+ that would normally appear. See also sparse-index. Something that
+ can cause confusion is that the "sparse directory" does NOT match
+ the sparse specification, i.e. the directory is NOT present in the
+ working tree. May be renamed in the future (e.g. to "skipped
+ directory").
+
+sparse index: A special mode for sparse-checkout that also makes the
+ index sparse by recording a directory entry in lieu of all the
+ files underneath that directory (thus making that a "skipped
+ directory" which unfortunately has also been called a "sparse
+ directory"), and does this for potentially multiple
+ directories. Controlled by the --[no-]sparse-index option to
+ init|set|reapply.
+
+sparsity patterns: patterns from $GIT_DIR/info/sparse-checkout used to
+ define the set of files of interest. A warning: It is easy to
+ over-use this term (or the shortened "patterns" term), for two
+ reasons: (1) users in cone mode specify directories rather than
+ patterns (their directories are transformed into patterns, but
+ users may think you are talking about non-cone mode if you use the
+ word "patterns"), and (b) the sparse specification might
+ transiently differ in the working tree or index from the sparsity
+ patterns (see "Sparse specification vs. sparsity patterns").
+
+sparse specification: The set of paths in the user's area of focus. This
+ is typically just the tracked files that match the sparsity
+ patterns, but the sparse specification can temporarily differ and
+ include additional files. (See also "Sparse specification
+ vs. sparsity patterns")
+
+ * When working with history, the sparse specification is exactly
+ the set of files matching the sparsity patterns.
+ * When interacting with the working tree, the sparse specification
+ is the set of tracked files with a clear SKIP_WORKTREE bit or
+ tracked files present in the working copy.
+ * When modifying or showing results from the index, the sparse
+ specification is the set of files with a clear SKIP_WORKTREE bit
+ or that differ in the index from HEAD.
+ * If working with the index and the working copy, the sparse
+ specification is the union of the paths from above.
+
+vivifying: When a command restores a tracked file to the working tree (and
+ hopefully also clears the SKIP_WORKTREE bit in the index for that
+ file), this is referred to as "vivifying" the file.
+
+
+=== Purpose of sparse-checkouts ===
+
+sparse-checkouts exist to allow users to work with a subset of their
+files.
+
+You can think of sparse-checkouts as subdividing "tracked" files into two
+categories -- a sparse subset, and all the rest. Implementationally, we
+mark "all the rest" in the index with a SKIP_WORKTREE bit and leave them
+out of the working tree. The SKIP_WORKTREE files are still tracked, just
+not present in the working tree.
+
+In the past, sparse-checkouts were defined by "SKIP_WORKTREE means the file
+is missing from the working tree but pretend the file contents match HEAD".
+That was not only bogus (it actually meant the file missing from the
+working tree matched the index rather than HEAD), but it was also a
+low-level detail which only provided decent behavior for a few commands.
+There were a surprising number of ways in which that guiding principle gave
+command results that violated user expectations, and as such was a bad
+mental model. However, it persisted for many years and may still be found
+in some corners of the code base.
+
+Anyway, the idea of "working with a subset of files" is simple enough, but
+there are multiple different high-level usecases which affect how some Git
+subcommands should behave. Further, even if we only considered one of
+those usecases, sparse-checkouts can modify different subcommands in over a
+half dozen different ways. Let's start by considering the high level
+usecases:
+
+ A) Users are _only_ interested in the sparse portion of the repo
+
+ A*) Users are _only_ interested in the sparse portion of the repo
+ that they have downloaded so far
+
+ B) Users want a sparse working tree, but are working in a larger whole
+
+ C) sparse-checkout is a behind-the-scenes implementation detail allowing
+ Git to work with a specially crafted in-house virtual file system;
+ users are actually working with a "full" working tree that is
+ lazily populated, and sparse-checkout helps with the lazy population
+ piece.
+
+It may be worth explaining each of these in a bit more detail:
+
+
+ (Behavior A) Users are _only_ interested in the sparse portion of the repo
+
+These folks might know there are other things in the repository, but
+don't care. They are uninterested in other parts of the repository, and
+only want to know about changes within their area of interest. Showing
+them other files from history (e.g. from diff/log/grep/etc.) is a
+usability annoyance, potentially a huge one since other changes in
+history may dwarf the changes they are interested in.
+
+Some of these users also arrive at this usecase from wanting to use partial
+clones together with sparse checkouts (in a way where they have downloaded
+blobs within the sparse specification) and do disconnected development.
+Not only do these users generally not care about other parts of the
+repository, but consider it a blocker for Git commands to try to operate on
+those. If commands attempt to access paths in history outside the sparsity
+specification, then the partial clone will attempt to download additional
+blobs on demand, fail, and then fail the user's command. (This may be
+unavoidable in some cases, e.g. when `git merge` has non-trivial changes to
+reconcile outside the sparse specification, but we should limit how often
+users are forced to connect to the network.)
+
+Also, even for users using partial clones that do not mind being
+always connected to the network, the need to download blobs as
+side-effects of various other commands (such as the printed diffstat
+after a merge or pull) can lead to worries about local repository size
+growing unnecessarily[10].
+
+ (Behavior A*) Users are _only_ interested in the sparse portion of the repo
+ that they have downloaded so far (a variant on the first usecase)
+
+This variant is driven by folks who using partial clones together with
+sparse checkouts and do disconnected development (so far sounding like a
+subset of behavior A users) and doing so on very large repositories. The
+reason for yet another variant is that downloading even just the blobs
+through history within their sparse specification may be too much, so they
+only download some. They would still like operations to succeed without
+network connectivity, though, so things like `git log -S${SEARCH_TERM} -p`
+or `git grep ${SEARCH_TERM} OLDREV ` would need to be prepared to provide
+partial results that depend on what happens to have been downloaded.
+
+This variant could be viewed as Behavior A with the sparse specification
+for history querying operations modified from "sparsity patterns" to
+"sparsity patterns limited to the blobs we have already downloaded".
+
+ (Behavior B) Users want a sparse working tree, but are working in a
+ larger whole
+
+Stolee described this usecase this way[11]:
+
+"I'm also focused on users that know that they are a part of a larger
+whole. They know they are operating on a large repository but focus on
+what they need to contribute their part. I expect multiple "roles" to
+use very different, almost disjoint parts of the codebase. Some other
+"architect" users operate across the entire tree or hop between different
+sections of the codebase as necessary. In this situation, I'm wary of
+scoping too many features to the sparse-checkout definition, especially
+"git log," as it can be too confusing to have their view of the codebase
+depend on your "point of view."
+
+People might also end up wanting behavior B due to complex inter-project
+dependencies. The initial attempts to use sparse-checkouts usually involve
+the directories you are directly interested in plus what those directories
+depend upon within your repository. But there's a monkey wrench here: if
+you have integration tests, they invert the hierarchy: to run integration
+tests, you need not only what you are interested in and its in-tree
+dependencies, you also need everything that depends upon what you are
+interested in or that depends upon one of your dependencies...AND you need
+all the in-tree dependencies of that expanded group. That can easily
+change your sparse-checkout into a nearly dense one.
+
+Naturally, that tends to kill the benefits of sparse-checkouts. There are
+a couple solutions to this conundrum: either avoid grabbing in-repo
+dependencies (maybe have built versions of your in-repo dependencies pulled
+from a CI cache somewhere), or say that users shouldn't run integration
+tests directly and instead do it on the CI server when they submit a code
+review. Or do both. Regardless of whether you stub out your in-repo
+dependencies or stub out the things that depend upon you, there is
+certainly a reason to want to query and be aware of those other stubbed-out
+parts of the repository, particularly when the dependencies are complex or
+change relatively frequently. Thus, for such uses, sparse-checkouts can be
+used to limit what you directly build and modify, but these users do not
+necessarily want their sparse checkout paths to limit their queries of
+versions in history.
+
+Some people may also be interested in behavior B over behavior A simply as
+a performance workaround: if they are using non-cone mode, then they have
+to deal with its inherent quadratic performance problems. In that mode,
+every operation that checks whether paths match the sparsity specification
+can be expensive. As such, these users may only be willing to pay for
+those expensive checks when interacting with the working copy, and may
+prefer getting "unrelated" results from their history queries over having
+slow commands.
+
+ (Behavior C) sparse-checkout is an implementational detail supporting a
+ special VFS.
+
+This usecase goes slightly against the traditional definition of
+sparse-checkout in that it actually tries to present a full or dense
+checkout to the user. However, this usecase utilizes the same underlying
+technical underpinnings in a new way which does provide some performance
+advantages to users. The basic idea is that a company can have an in-house
+Git-aware Virtual File System which pretends all files are present in the
+working tree, by intercepting all file system accesses and using those to
+fetch and write accessed files on demand via partial clones. The VFS uses
+sparse-checkout to prevent Git from writing or paying attention to many
+files, and manually updates the sparse checkout patterns itself based on
+user access and modification of files in the working tree. See commit
+ecc7c8841d ("repo_read_index: add config to expect files outside sparse
+patterns", 2022-02-25) and the link at [17] for a more detailed description
+of such a VFS.
+
+The biggest difference here is that users are completely unaware that the
+sparse-checkout machinery is even in use. The sparse patterns are not
+specified by the user but rather are under the complete control of the VFS
+(and the patterns are updated frequently and dynamically by it). The user
+will perceive the checkout as dense, and commands should thus behave as if
+all files are present.
+
+
+=== Usecases of primary concern ===
+
+Most of the rest of this document will focus on Behavior A and Behavior
+B. Some notes about the other two cases and why we are not focusing on
+them:
+
+ (Behavior A*)
+
+Supporting this usecase is estimated to be difficult and a lot of work.
+There are no plans to implement it currently, but it may be a potential
+future alternative. Knowing about the existence of additional alternatives
+may affect our choice of command line flags (e.g. if we need tri-state or
+quad-state flags rather than just binary flags), so it was still important
+to at least note.
+
+Further, I believe the descriptions below for Behavior A are probably still
+valid for this usecase, with the only exception being that it redefines the
+sparse specification to restrict it to already-downloaded blobs. The hard
+part is in making commands capable of respecting that modified definition.
+
+ (Behavior C)
+
+This usecase violates some of the early sparse-checkout documented
+assumptions (since files marked as SKIP_WORKTREE will be displayed to users
+as present in the working tree). That violation may mean various
+sparse-checkout related behaviors are not well suited to this usecase and
+we may need tweaks -- to both documentation and code -- to handle it.
+However, this usecase is also perhaps the simplest model to support in that
+everything behaves like a dense checkout with a few exceptions (e.g. branch
+checkouts and switches write fewer things, knowing the VFS will lazily
+write the rest on an as-needed basis).
+
+Since there is no publically available VFS-related code for folks to try,
+the number of folks who can test such a usecase is limited.
+
+The primary reason to note the Behavior C usecase is that as we fix things
+to better support Behaviors A and B, there may be additional places where
+we need to make tweaks allowing folks in this usecase to get the original
+non-sparse treatment. For an example, see ecc7c8841d ("repo_read_index:
+add config to expect files outside sparse patterns", 2022-02-25). The
+secondary reason to note Behavior C, is so that folks taking advantage of
+Behavior C do not assume they are part of the Behavior B camp and propose
+patches that break things for the real Behavior B folks.
+
+
+=== Oversimplified mental models ===
+
+An oversimplification of the differences in the above behaviors is:
+
+ Behavior A: Restrict worktree and history operations to sparse specification
+ Behavior B: Restrict worktree operations to sparse specification; have any
+ history operations work across all files
+ Behavior C: Do not restrict either worktree or history operations to the
+ sparse specification...with the exception of branch checkouts or
+ switches which avoid writing files that will match the index so
+ they can later lazily be populated instead.
+
+
+=== Desired behavior ===
+
+As noted previously, despite the simple idea of just working with a subset
+of files, there are a range of different behavioral changes that need to be
+made to different subcommands to work well with such a feature. See
+[1,2,3,4,5,6,7,8,9,10] for various examples. In particular, at [2], we saw
+that mere composition of other commands that individually worked correctly
+in a sparse-checkout context did not imply that the higher level command
+would work correctly; it sometimes requires further tweaks. So,
+understanding these differences can be beneficial.
+
+* Commands behaving the same regardless of high-level use-case
+
+ * commands that only look at files within the sparsity specification
+
+ * diff (without --cached or REVISION arguments)
+ * grep (without --cached or REVISION arguments)
+ * diff-files
+
+ * commands that restore files to the working tree that match sparsity
+ patterns, and remove unmodified files that don't match those
+ patterns:
+
+ * switch
+ * checkout (the switch-like half)
+ * read-tree
+ * reset --hard
+
+ * commands that write conflicted files to the working tree, but otherwise
+ will omit writing files to the working tree that do not match the
+ sparsity patterns:
+
+ * merge
+ * rebase
+ * cherry-pick
+ * revert
+
+ * `am` and `apply --cached` should probably be in this section but
+ are buggy (see the "Known bugs" section below)
+
+ The behavior for these commands somewhat depends upon the merge
+ strategy being used:
+ * `ort` behaves as described above
+ * `recursive` tries to not vivify files unnecessarily, but does sometimes
+ vivify files without conflicts.
+ * `octopus` and `resolve` will always vivify any file changed in the merge
+ relative to the first parent, which is rather suboptimal.
+
+ It is also important to note that these commands WILL update the index
+ outside the sparse specification relative to when the operation began,
+ BUT these commands often make a commit just before or after such that
+ by the end of the operation there is no change to the index outside the
+ sparse specification. Of course, if the operation hits conflicts or
+ does not make a commit, then these operations clearly can modify the
+ index outside the sparse specification.
+
+ Finally, it is important to note that at least the first four of these
+ commands also try to remove differences between the sparse
+ specification and the sparsity patterns (much like the commands in the
+ previous section).
+
+ * commands that always ignore sparsity since commits must be full-tree
+
+ * archive
+ * bundle
+ * commit
+ * format-patch
+ * fast-export
+ * fast-import
+ * commit-tree
+
+ * commands that write any modified file to the working tree (conflicted
+ or not, and whether those paths match sparsity patterns or not):
+
+ * stash
+ * apply (without `--index` or `--cached`)
+
+* Commands that may slightly differ for behavior A vs. behavior B:
+
+ Commands in this category behave mostly the same between the two
+ behaviors, but may differ in verbosity and types of warning and error
+ messages.
+
+ * commands that make modifications to which files are tracked:
+ * add
+ * rm
+ * mv
+ * update-index
+
+ The fact that files can move between the 'tracked' and 'untracked'
+ categories means some commands will have to treat untracked files
+ differently. But if we have to treat untracked files differently,
+ then additional commands may also need changes:
+
+ * status
+ * clean
+
+ In particular, `status` may need to report any untracked files outside
+ the sparsity specification as an erroneous condition (especially to
+ avoid the user trying to `git add` them, forcing `git add` to display
+ an error).
+
+ It's not clear to me exactly how (or even if) `clean` would change,
+ but it's the other command that also affects untracked files.
+
+ `update-index` may be slightly special. Its --[no-]skip-worktree flag
+ may need to ignore the sparse specification by its nature. Also, its
+ current --[no-]ignore-skip-worktree-entries default is totally bogus.
+
+ * commands for manually tweaking paths in both the index and the working tree
+ * `restore`
+ * the restore-like half of `checkout`
+
+ These commands should be similar to add/rm/mv in that they should
+ only operate on the sparse specification by default, and require a
+ special flag to operate on all files.
+
+ Also, note that these commands currently have a number of issues (see
+ the "Known bugs" section below)
+
+* Commands that significantly differ for behavior A vs. behavior B:
+
+ * commands that query history
+ * diff (with --cached or REVISION arguments)
+ * grep (with --cached or REVISION arguments)
+ * show (when given commit arguments)
+ * blame (only matters when one or more -C flags are passed)
+ * and annotate
+ * log
+ * whatchanged
+ * ls-files
+ * diff-index
+ * diff-tree
+ * ls-tree
+
+ Note: for log and whatchanged, revision walking logic is unaffected
+ but displaying of patches is affected by scoping the command to the
+ sparse-checkout. (The fact that revision walking is unaffected is
+ why rev-list, shortlog, show-branch, and bisect are not in this
+ list.)
+
+ ls-files may be slightly special in that e.g. `git ls-files -t` is
+ often used to see what is sparse and what is not. Perhaps -t should
+ always work on the full tree?
+
+* Commands I don't know how to classify
+
+ * range-diff
+
+ Is this like `log` or `format-patch`?
+
+ * cherry
+
+ See range-diff
+
+* Commands unaffected by sparse-checkouts
+
+ * shortlog
+ * show-branch
+ * rev-list
+ * bisect
+
+ * branch
+ * describe
+ * fetch
+ * gc
+ * init
+ * maintenance
+ * notes
+ * pull (merge & rebase have the necessary changes)
+ * push
+ * submodule
+ * tag
+
+ * config
+ * filter-branch (works in separate checkout without sparse-checkout setup)
+ * pack-refs
+ * prune
+ * remote
+ * repack
+ * replace
+
+ * bugreport
+ * count-objects
+ * fsck
+ * gitweb
+ * help
+ * instaweb
+ * merge-tree (doesn't touch worktree or index, and merges always compute full-tree)
+ * rerere
+ * verify-commit
+ * verify-tag
+
+ * commit-graph
+ * hash-object
+ * index-pack
+ * mktag
+ * mktree
+ * multi-pack-index
+ * pack-objects
+ * prune-packed
+ * symbolic-ref
+ * unpack-objects
+ * update-ref
+ * write-tree (operates on index, possibly optimized to use sparse dir entries)
+
+ * for-each-ref
+ * get-tar-commit-id
+ * ls-remote
+ * merge-base (merges are computed full tree, so merge base should be too)
+ * name-rev
+ * pack-redundant
+ * rev-parse
+ * show-index
+ * show-ref
+ * unpack-file
+ * var
+ * verify-pack
+
+ * <Everything under 'Interacting with Others' in 'git help --all'>
+ * <Everything under 'Low-level...Syncing' in 'git help --all'>
+ * <Everything under 'Low-level...Internal Helpers' in 'git help --all'>
+ * <Everything under 'External commands' in 'git help --all'>
+
+* Commands that might be affected, but who cares?
+
+ * merge-file
+ * merge-index
+ * gitk?
+
+
+=== Behavior classes ===
+
+From the above there are a few classes of behavior:
+
+ * "restrict"
+
+ Commands in this class only read or write files in the working tree
+ within the sparse specification.
+
+ When moving to a new commit (e.g. switch, reset --hard), these commands
+ may update index files outside the sparse specification as of the start
+ of the operation, but by the end of the operation those index files
+ will match HEAD again and thus those files will again be outside the
+ sparse specification.
+
+ When paths are explicitly specified, these paths are intersected with
+ the sparse specification and will only operate on such paths.
+ (e.g. `git restore [--staged] -- '*.png'`, `git reset -p -- '*.md'`)
+
+ Some of these commands may also attempt, at the end of their operation,
+ to cull transient differences between the sparse specification and the
+ sparsity patterns (see "Sparse specification vs. sparsity patterns" for
+ details, but this basically means either removing unmodified files not
+ matching the sparsity patterns and marking those files as
+ SKIP_WORKTREE, or vivifying files that match the sparsity patterns and
+ marking those files as !SKIP_WORKTREE).
+
+ * "restrict modulo conflicts"
+
+ Commands in this class generally behave like the "restrict" class,
+ except that:
+ (1) they will ignore the sparse specification and write files with
+ conflicts to the working tree (thus temporarily expanding the
+ sparse specification to include such files.)
+ (2) they are grouped with commands which move to a new commit, since
+ they often create a commit and then move to it, even though we
+ know there are many exceptions to moving to the new commit. (For
+ example, the user may rebase a commit that becomes empty, or have
+ a cherry-pick which conflicts, or a user could run `merge
+ --no-commit`, and we also view `apply --index` kind of like `am
+ --no-commit`.) As such, these commands can make changes to index
+ files outside the sparse specification, though they'll mark such
+ files with SKIP_WORKTREE.
+
+ * "restrict also specially applied to untracked files"
+
+ Commands in this class generally behave like the "restrict" class,
+ except that they have to handle untracked files differently too, often
+ because these commands are dealing with files changing state between
+ 'tracked' and 'untracked'. Often, this may mean printing an error
+ message if the command had nothing to do, but the arguments may have
+ referred to files whose tracked-ness state could have changed were it
+ not for the sparsity patterns excluding them.
+
+ * "no restrict"
+
+ Commands in this class ignore the sparse specification entirely.
+
+ * "restrict or no restrict dependent upon behavior A vs. behavior B"
+
+ Commands in this class behave like "no restrict" for folks in the
+ behavior B camp, and like "restrict" for folks in the behavior A camp.
+ However, when behaving like "restrict" a warning of some sort might be
+ provided that history queries have been limited by the sparse-checkout
+ specification.
+
+
+=== Subcommand-dependent defaults ===
+
+Note that we have different defaults depending on the command for the
+desired behavior :
+
+ * Commands defaulting to "restrict":
+ * diff-files
+ * diff (without --cached or REVISION arguments)
+ * grep (without --cached or REVISION arguments)
+ * switch
+ * checkout (the switch-like half)
+ * reset (<commit>)
+
+ * restore
+ * checkout (the restore-like half)
+ * checkout-index
+ * reset (with pathspec)
+
+ This behavior makes sense; these interact with the working tree.
+
+ * Commands defaulting to "restrict modulo conflicts":
+ * merge
+ * rebase
+ * cherry-pick
+ * revert
+
+ * am
+ * apply --index (which is kind of like an `am --no-commit`)
+
+ * read-tree (especially with -m or -u; is kind of like a --no-commit merge)
+ * reset (<tree-ish>, due to similarity to read-tree)
+
+ These also interact with the working tree, but require slightly
+ different behavior either so that (a) conflicts can be resolved or (b)
+ because they are kind of like a merge-without-commit operation.
+
+ (See also the "Known bugs" section below regarding `am` and `apply`)
+
+ * Commands defaulting to "no restrict":
+ * archive
+ * bundle
+ * commit
+ * format-patch
+ * fast-export
+ * fast-import
+ * commit-tree
+
+ * stash
+ * apply (without `--index`)
+
+ These have completely different defaults and perhaps deserve the most
+ detailed explanation:
+
+ In the case of commands in the first group (format-patch,
+ fast-export, bundle, archive, etc.), these are commands for
+ communicating history, which will be broken if they restrict to a
+ subset of the repository. As such, they operate on full paths and
+ have no `--restrict` option for overriding. Some of these commands may
+ take paths for manually restricting what is exported, but it needs to
+ be very explicit.
+
+ In the case of stash, it needs to vivify files to avoid losing the
+ user's changes.
+
+ In the case of apply without `--index`, that command needs to update
+ the working tree without the index (or the index without the working
+ tree if `--cached` is passed), and if we restrict those updates to the
+ sparse specification then we'll lose changes from the user.
+
+ * Commands defaulting to "restrict also specially applied to untracked files":
+ * add
+ * rm
+ * mv
+ * update-index
+ * status
+ * clean (?)
+
+ Our original implementation for the first three of these commands was
+ "no restrict", but it had some severe usability issues:
+ * `git add <somefile>` if honored and outside the sparse
+ specification, can result in the file randomly disappearing later
+ when some subsequent command is run (since various commands
+ automatically clean up unmodified files outside the sparse
+ specification).
+ * `git rm '*.jpg'` could very negatively surprise users if it deletes
+ files outside the range of the user's interest.
+ * `git mv` has similar surprises when moving into or out of the cone,
+ so best to restrict by default
+
+ So, we switched `add` and `rm` to default to "restrict", which made
+ usability problems much less severe and less frequent, but we still got
+ complaints because commands like:
+ git add <file-outside-sparse-specification>
+ git rm <file-outside-sparse-specification>
+ would silently do nothing. We should instead print an error in those
+ cases to get usability right.
+
+ update-index needs to be updated to match, and status and maybe clean
+ also need to be updated to specially handle untracked paths.
+
+ There may be a difference in here between behavior A and behavior B in
+ terms of verboseness of errors or additional warnings.
+
+ * Commands falling under "restrict or no restrict dependent upon behavior
+ A vs. behavior B"
+
+ * diff (with --cached or REVISION arguments)
+ * grep (with --cached or REVISION arguments)
+ * show (when given commit arguments)
+ * blame (only matters when one or more -C flags passed)
+ * and annotate
+ * log
+ * and variants: shortlog, gitk, show-branch, whatchanged, rev-list
+ * ls-files
+ * diff-index
+ * diff-tree
+ * ls-tree
+
+ For now, we default to behavior B for these, which want a default of
+ "no restrict".
+
+ Note that two of these commands -- diff and grep -- also appeared in a
+ different list with a default of "restrict", but only when limited to
+ searching the working tree. The working tree vs. history distinction
+ is fundamental in how behavior B operates, so this is expected. Note,
+ though, that for diff and grep with --cached, when doing "restrict"
+ behavior, the difference between sparse specification and sparsity
+ patterns is important to handle.
+
+ "restrict" may make more sense as the long term default for these[12].
+ Also, supporting "restrict" for these commands might be a fair amount
+ of work to implement, meaning it might be implemented over multiple
+ releases. If that behavior were the default in the commands that
+ supported it, that would force behavior B users to need to learn to
+ slowly add additional flags to their commands, depending on git
+ version, to get the behavior they want. That gradual switchover would
+ be painful, so we should avoid it at least until it's fully
+ implemented.
+
+
+=== Sparse specification vs. sparsity patterns ===
+
+In a well-behaved situation, the sparse specification is given directly
+by the $GIT_DIR/info/sparse-checkout file. However, it can transiently
+diverge for a few reasons:
+
+ * needing to resolve conflicts (merging will vivify conflicted files)
+ * running Git commands that implicitly vivify files (e.g. "git stash apply")
+ * running Git commands that explicitly vivify files (e.g. "git checkout
+ --ignore-skip-worktree-bits FILENAME")
+ * other commands that write to these files (perhaps a user copies it
+ from elsewhere)
+
+For the last item, note that we do automatically clear the SKIP_WORKTREE
+bit for files that are present in the working tree. This has been true
+since 82386b4496 ("Merge branch 'en/present-despite-skipped'",
+2022-03-09)
+
+However, such a situation is transient because:
+
+ * Such transient differences can and will be automatically removed as
+ a side-effect of commands which call unpack_trees() (checkout,
+ merge, reset, etc.).
+ * Users can also request such transient differences be corrected via
+ running `git sparse-checkout reapply`. Various places recommend
+ running that command.
+ * Additional commands are also welcome to implicitly fix these
+ differences; we may add more in the future.
+
+While we avoid dropping unstaged changes or files which have conflicts,
+we otherwise aggressively try to fix these transient differences. If
+users want these differences to persist, they should run the `set` or
+`add` subcommands of `git sparse-checkout` to reflect their intended
+sparse specification.
+
+However, when we need to do a query on history restricted to the
+"relevant subset of files" such a transiently expanded sparse
+specification is ignored. There are a couple reasons for this:
+
+ * The behavior wanted when doing something like
+ git grep expression REVISION
+ is roughly what the users would expect from
+ git checkout REVISION && git grep expression
+ (modulo a "REVISION:" prefix), which has a couple ramifications:
+
+ * REVISION may have paths not in the current index, so there is no
+ path we can consult for a SKIP_WORKTREE setting for those paths.
+
+ * Since `checkout` is one of those commands that tries to remove
+ transient differences in the sparse specification, it makes sense
+ to use the corrected sparse specification
+ (i.e. $GIT_DIR/info/sparse-checkout) rather than attempting to
+ consult SKIP_WORKTREE anyway.
+
+So, a transiently expanded (or restricted) sparse specification applies to
+the working tree, but not to history queries where we always use the
+sparsity patterns. (See [16] for an early discussion of this.)
+
+Similar to a transiently expanded sparse specification of the working tree
+based on additional files being present in the working tree, we also need
+to consider additional files being modified in the index. In particular,
+if the user has staged changes to files (relative to HEAD) that do not
+match the sparsity patterns, and the file is not present in the working
+tree, we still want to consider the file part of the sparse specification
+if we are specifically performing a query related to the index (e.g. git
+diff --cached [REVISION], git diff-index [REVISION], git restore --staged
+--source=REVISION -- PATHS, etc.) Note that a transiently expanded sparse
+specification for the index usually only matters under behavior A, since
+under behavior B index operations are lumped with history and tend to
+operate full-tree.
+
+
+=== Implementation Questions ===
+
+ * Do the options --scope={sparse,all} sound good to others? Are there better
+ options?
+ * Names in use, or appearing in patches, or previously suggested:
+ * --sparse/--dense
+ * --ignore-skip-worktree-bits
+ * --ignore-skip-worktree-entries
+ * --ignore-sparsity
+ * --[no-]restrict-to-sparse-paths
+ * --full-tree/--sparse-tree
+ * --[no-]restrict
+ * --scope={sparse,all}
+ * --focus/--unfocus
+ * --limit/--unlimited
+ * Rationale making me lean slightly towards --scope={sparse,all}:
+ * We want a name that works for many commands, so we need a name that
+ does not conflict
+ * We know that we have more than two possible usecases, so it is best
+ to avoid a flag that appears to be binary.
+ * --scope={sparse,all} isn't overly long and seems relatively
+ explanatory
+ * `--sparse`, as used in add/rm/mv, is totally backwards for
+ grep/log/etc. Changing the meaning of `--sparse` for these
+ commands would fix the backwardness, but possibly break existing
+ scripts. Using a new name pairing would allow us to treat
+ `--sparse` in these commands as a deprecated alias.
+ * There is a different `--sparse`/`--dense` pair for commands using
+ revision machinery, so using that naming might cause confusion
+ * There is also a `--sparse` in both pack-objects and show-branch, which
+ don't conflict but do suggest that `--sparse` is overloaded
+ * The name --ignore-skip-worktree-bits is a double negative, is
+ quite a mouthful, refers to an implementation detail that many
+ users may not be familiar with, and we'd need a negation for it
+ which would probably be even more ridiculously long. (But we
+ can make --ignore-skip-worktree-bits a deprecated alias for
+ --no-restrict.)
+
+ * If a config option is added (sparse.scope?) what should the values and
+ description be? "sparse" (behavior A), "worktree-sparse-history-dense"
+ (behavior B), "dense" (behavior C)? There's a risk of confusion,
+ because even for Behaviors A and B we want some commands to be
+ full-tree and others to operate sparsely, so the wording may need to be
+ more tied to the usecases and somehow explain that. Also, right now,
+ the primary difference we are focusing is just the history-querying
+ commands (log/diff/grep). Previous config suggestion here: [13]
+
+ * Is `--no-expand` a good alias for ls-files's `--sparse` option?
+ (`--sparse` does not map to either `--scope=sparse` or `--scope=all`,
+ because in non-cone mode it does nothing and in cone-mode it shows the
+ sparse directory entries which are technically outside the sparse
+ specification)
+
+ * Under Behavior A:
+ * Does ls-files' `--no-expand` override the default `--scope=all`, or
+ does it need an extra flag?
+ * Does ls-files' `-t` option imply `--scope=all`?
+ * Does update-index's `--[no-]skip-worktree` option imply `--scope=all`?
+
+ * sparse-checkout: once behavior A is fully implemented, should we take
+ an interim measure to ease people into switching the default? Namely,
+ if folks are not already in a sparse checkout, then require
+ `sparse-checkout init/set` to take a
+ `--set-scope=(sparse|worktree-sparse-history-dense|dense)` flag (which
+ would set sparse.scope according to the setting given), and throw an
+ error if the flag is not provided? That error would be a great place
+ to warn folks that the default may change in the future, and get them
+ used to specifying what they want so that the eventual default switch
+ is seamless for them.
+
+
+=== Implementation Goals/Plans ===
+
+ * Get buy-in on this document in general.
+
+ * Figure out answers to the 'Implementation Questions' sections (above)
+
+ * Fix bugs in the 'Known bugs' section (below)
+
+ * Provide some kind of method for backfilling the blobs within the sparse
+ specification in a partial clone
+
+ [Below here is kind of spitballing since the first two haven't been resolved]
+
+ * update-index: flip the default to --no-ignore-skip-worktree-entries,
+ nuke this stupid "Oh, there's a bug? Let me add a flag to let users
+ request that they not trigger this bug." flag
+
+ * Flags & Config
+ * Make `--sparse` in add/rm/mv a deprecated alias for `--scope=all`
+ * Make `--ignore-skip-worktree-bits` in checkout-index/checkout/restore
+ a deprecated aliases for `--scope=all`
+ * Create config option (sparse.scope?), tie it to the "Cliff notes"
+ overview
+
+ * Add --scope=sparse (and --scope=all) flag to each of the history querying
+ commands. IMPORTANT: make sure diff machinery changes don't mess with
+ format-patch, fast-export, etc.
+
+=== Known bugs ===
+
+This list used to be a lot longer (see e.g. [1,2,3,4,5,6,7,8,9]), but we've
+been working on it.
+
+0. Behavior A is not well supported in Git. (Behavior B didn't used to
+ be either, but was the easier of the two to implement.)
+
+1. am and apply:
+
+ apply, without `--index` or `--cached`, relies on files being present
+ in the working copy, and also writes to them unconditionally. As
+ such, it should first check for the files' presence, and if found to
+ be SKIP_WORKTREE, then clear the bit and vivify the paths, then do
+ its work. Currently, it just throws an error.
+
+ apply, with either `--cached` or `--index`, will not preserve the
+ SKIP_WORKTREE bit. This is fine if the file has conflicts, but
+ otherwise SKIP_WORKTREE bits should be preserved for --cached and
+ probably also for --index.
+
+ am, if there are no conflicts, will vivify files and fail to preserve
+ the SKIP_WORKTREE bit. If there are conflicts and `-3` is not
+ specified, it will vivify files and then complain the patch doesn't
+ apply. If there are conflicts and `-3` is specified, it will vivify
+ files and then complain that those vivified files would be
+ overwritten by merge.
+
+2. reset --hard:
+
+ reset --hard provides confusing error message (works correctly, but
+ misleads the user into believing it didn't):
+
+ $ touch addme
+ $ git add addme
+ $ git ls-files -t
+ H addme
+ H tracked
+ S tracked-but-maybe-skipped
+ $ git reset --hard # usually works great
+ error: Path 'addme' not uptodate; will not remove from working tree.
+ HEAD is now at bdbbb6f third
+ $ git ls-files -t
+ H tracked
+ S tracked-but-maybe-skipped
+ $ ls -1
+ tracked
+
+ `git reset --hard` DID remove addme from the index and the working tree, contrary
+ to the error message, but in line with how reset --hard should behave.
+
+3. read-tree
+
+ `read-tree` doesn't apply the 'SKIP_WORKTREE' bit to *any* of the
+ entries it reads into the index, resulting in all your files suddenly
+ appearing to be "deleted".
+
+4. Checkout, restore:
+
+ These command do not handle path & revision arguments appropriately:
+
+ $ ls
+ tracked
+ $ git ls-files -t
+ H tracked
+ S tracked-but-maybe-skipped
+ $ git status --porcelain
+ $ git checkout -- '*skipped'
+ error: pathspec '*skipped' did not match any file(s) known to git
+ $ git ls-files -- '*skipped'
+ tracked-but-maybe-skipped
+ $ git checkout HEAD -- '*skipped'
+ error: pathspec '*skipped' did not match any file(s) known to git
+ $ git ls-tree HEAD | grep skipped
+ 100644 blob 276f5a64354b791b13840f02047738c77ad0584f tracked-but-maybe-skipped
+ $ git status --porcelain
+ $ git checkout HEAD~1 -- '*skipped'
+ $ git ls-files -t
+ H tracked
+ H tracked-but-maybe-skipped
+ $ git status --porcelain
+ M tracked-but-maybe-skipped
+ $ git checkout HEAD -- '*skipped'
+ $ git status --porcelain
+ $
+
+ Note that checkout without a revision (or restore --staged) fails to
+ find a file to restore from the index, even though ls-files shows
+ such a file certainly exists.
+
+ Similar issues occur with HEAD (--source=HEAD in restore's case),
+ but suddenly works when HEAD~1 is specified. And then after that it
+ will work with HEAD specified, even though it didn't before.
+
+ Directories are also an issue:
+
+ $ git sparse-checkout set nomatches
+ $ git status
+ On branch main
+ You are in a sparse checkout with 0% of tracked files present.
+
+ nothing to commit, working tree clean
+ $ git checkout .
+ error: pathspec '.' did not match any file(s) known to git
+ $ git checkout HEAD~1 .
+ Updated 1 path from 58916d9
+ $ git ls-files -t
+ S tracked
+ H tracked-but-maybe-skipped
+
+5. checkout and restore --staged, continued:
+
+ These commands do not correctly scope operations to the sparse
+ specification, and make it worse by not setting important SKIP_WORKTREE
+ bits:
+
+ $ git restore --source OLDREV --staged outside-sparse-cone/
+ $ git status --porcelain
+ MD outside-sparse-cone/file1
+ MD outside-sparse-cone/file2
+ MD outside-sparse-cone/file3
+
+ We can add a --scope=all mode to `git restore` to let it operate outside
+ the sparse specification, but then it will be important to set the
+ SKIP_WORKTREE bits appropriately.
+
+6. Performance issues; see:
+ https://lore.kernel.org/git/CABPp-BEkJQoKZsQGCYioyga_uoDQ6iBeW+FKr8JhyuuTMK1RDw@mail.gmail.com/
+
+
+=== Reference Emails ===
+
+Emails that detail various bugs we've had in sparse-checkout:
+
+[1] (Original descriptions of behavior A & behavior B)
+ https://lore.kernel.org/git/CABPp-BGJ_Nvi5TmgriD9Bh6eNXE2EDq2f8e8QKXAeYG3BxZafA@mail.gmail.com/
+[2] (Fix stash applications in sparse checkouts; bugs from behavioral differences)
+ https://lore.kernel.org/git/ccfedc7140dbf63ba26a15f93bd3885180b26517.1606861519.git.gitgitgadget@gmail.com/
+[3] (Present-despite-skipped entries)
+ https://lore.kernel.org/git/11d46a399d26c913787b704d2b7169cafc28d639.1642175983.git.gitgitgadget@gmail.com/
+[4] (Clone --no-checkout interaction)
+ https://lore.kernel.org/git/pull.801.v2.git.git.1591324899170.gitgitgadget@gmail.com/ (clone --no-checkout)
+[5] (The need for update_sparsity() and avoiding `read-tree -mu HEAD`)
+ https://lore.kernel.org/git/3a1f084641eb47515b5a41ed4409a36128913309.1585270142.git.gitgitgadget@gmail.com/
+[6] (SKIP_WORKTREE is advisory, not mandatory)
+ https://lore.kernel.org/git/844306c3e86ef67591cc086decb2b760e7d710a3.1585270142.git.gitgitgadget@gmail.com/
+[7] (`worktree add` should copy sparsity settings from current worktree)
+ https://lore.kernel.org/git/c51cb3714e7b1d2f8c9370fe87eca9984ff4859f.1644269584.git.gitgitgadget@gmail.com/
+[8] (Avoid negative surprises in add, rm, and mv)
+ https://lore.kernel.org/git/cover.1617914011.git.matheus.bernardino@usp.br/
+ https://lore.kernel.org/git/pull.1018.v4.git.1632497954.gitgitgadget@gmail.com/
+[9] (Move from out-of-cone to in-cone)
+ https://lore.kernel.org/git/20220630023737.473690-6-shaoxuan.yuan02@gmail.com/
+ https://lore.kernel.org/git/20220630023737.473690-4-shaoxuan.yuan02@gmail.com/
+[10] (Unnecessarily downloading objects outside sparse specification)
+ https://lore.kernel.org/git/CAOLTT8QfwOi9yx_qZZgyGa8iL8kHWutEED7ok_jxwTcYT_hf9Q@mail.gmail.com/
+
+[11] (Stolee's comments on high-level usecases)
+ https://lore.kernel.org/git/1a1e33f6-3514-9afc-0a28-5a6b85bd8014@gmail.com/
+
+[12] Others commenting on eventually switching default to behavior A:
+ * https://lore.kernel.org/git/xmqqh719pcoo.fsf@gitster.g/
+ * https://lore.kernel.org/git/xmqqzgeqw0sy.fsf@gitster.g/
+ * https://lore.kernel.org/git/a86af661-cf58-a4e5-0214-a67d3a794d7e@github.com/
+
+[13] Previous config name suggestion and description
+ * https://lore.kernel.org/git/CABPp-BE6zW0nJSStcVU=_DoDBnPgLqOR8pkTXK3dW11=T01OhA@mail.gmail.com/
+
+[14] Tangential issue: switch to cone mode as default sparse specification mechanism:
+ https://lore.kernel.org/git/a1b68fd6126eb341ef3637bb93fedad4309b36d0.1650594746.git.gitgitgadget@gmail.com/
+
+[15] Lengthy email on grep behavior, covering what should be searched:
+ * https://lore.kernel.org/git/CABPp-BGVO3QdbfE84uF_3QDF0-y2iHHh6G5FAFzNRfeRitkuHw@mail.gmail.com/
+
+[16] Email explaining sparsity patterns vs. SKIP_WORKTREE and history operations,
+ search for the parenthetical comment starting "We do not check".
+ https://lore.kernel.org/git/CABPp-BFsCPPNOZ92JQRJeGyNd0e-TCW-LcLyr0i_+VSQJP+GCg@mail.gmail.com/
+
+[17] https://lore.kernel.org/git/20220207190320.2960362-1-jonathantanmy@google.com/
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 024cbd635e..e3eaeb4926 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.38.1
+DEF_VER=v2.38.GIT
LF='
'
diff --git a/INSTALL b/INSTALL
index 89b15d71df..3344788397 100644
--- a/INSTALL
+++ b/INSTALL
@@ -133,10 +133,6 @@ Issues of note:
you are using libcurl older than 7.34.0. Otherwise you can use
NO_OPENSSL without losing git-imap-send.
- By default, git uses OpenSSL for SHA1 but it will use its own
- library (inspired by Mozilla's) with either NO_OPENSSL or
- BLK_SHA1.
-
- "libcurl" library is used for fetching and pushing
repositories over http:// or https://, as well as by
git-imap-send if the curl version is >= 7.34.0. If you do
diff --git a/Makefile b/Makefile
index cac3452edb..54f3bdc6a3 100644
--- a/Makefile
+++ b/Makefile
@@ -4,8 +4,20 @@ all::
# Import tree-wide shared Makefile behavior and libraries
include shared.mak
+# == Makefile defines ==
+#
+# These defines change the behavior of the Makefile itself, but have
+# no impact on what it builds:
+#
# Define V=1 to have a more verbose compile.
#
+# == Portability and optional library defines ==
+#
+# These defines indicate what Git can expect from the OS, what
+# libraries are available etc. Much of this is auto-detected in
+# config.mak.uname, or in configure.ac when using the optional "make
+# configure && ./configure" (see INSTALL).
+#
# Define SHELL_PATH to a POSIX shell if your /bin/sh is broken.
#
# Define SANE_TOOL_PATH to a colon-separated list of paths to prepend
@@ -30,68 +42,8 @@ include shared.mak
#
# Define NO_OPENSSL environment variable if you do not have OpenSSL.
#
-# Define USE_LIBPCRE if you have and want to use libpcre. Various
-# commands such as log and grep offer runtime options to use
-# Perl-compatible regular expressions instead of standard or extended
-# POSIX regular expressions.
-#
-# Only libpcre version 2 is supported. USE_LIBPCRE2 is a synonym for
-# USE_LIBPCRE, support for the old USE_LIBPCRE1 has been removed.
-#
-# Define LIBPCREDIR=/foo/bar if your PCRE header and library files are
-# in /foo/bar/include and /foo/bar/lib directories.
-#
# Define HAVE_ALLOCA_H if you have working alloca(3) defined in that header.
#
-# Define NO_CURL if you do not have libcurl installed. git-http-fetch and
-# git-http-push are not built, and you cannot use http:// and https://
-# transports (neither smart nor dumb).
-#
-# Define CURLDIR=/foo/bar if your curl header and library files are in
-# /foo/bar/include and /foo/bar/lib directories.
-#
-# Define CURL_CONFIG to curl's configuration program that prints information
-# about the library (e.g., its version number). The default is 'curl-config'.
-#
-# Define CURL_LDFLAGS to specify flags that you need to link when using libcurl,
-# if you do not want to rely on the libraries provided by CURL_CONFIG. The
-# default value is a result of `curl-config --libs`. An example value for
-# CURL_LDFLAGS is as follows:
-#
-# CURL_LDFLAGS=-lcurl
-#
-# Define NO_EXPAT if you do not have expat installed. git-http-push is
-# not built, and you cannot push using http:// and https:// transports (dumb).
-#
-# Define EXPATDIR=/foo/bar if your expat header and library files are in
-# /foo/bar/include and /foo/bar/lib directories.
-#
-# Define EXPAT_NEEDS_XMLPARSE_H if you have an old version of expat (e.g.,
-# 1.1 or 1.2) that provides xmlparse.h instead of expat.h.
-#
-# Define NO_GETTEXT if you don't want Git output to be translated.
-# A translated Git requires GNU libintl or another gettext implementation,
-# plus libintl-perl at runtime.
-#
-# Define USE_GETTEXT_SCHEME and set it to 'fallthrough', if you don't trust
-# the installed gettext translation of the shell scripts output.
-#
-# Define HAVE_LIBCHARSET_H if you haven't set NO_GETTEXT and you can't
-# trust the langinfo.h's nl_langinfo(CODESET) function to return the
-# current character set. GNU and Solaris have a nl_langinfo(CODESET),
-# FreeBSD can use either, but MinGW and some others need to use
-# libcharset.h's locale_charset() instead.
-#
-# Define CHARSET_LIB to the library you need to link with in order to
-# use locale_charset() function. On some platforms this needs to set to
-# -lcharset, on others to -liconv .
-#
-# Define LIBC_CONTAINS_LIBINTL if your gettext implementation doesn't
-# need -lintl when linking.
-#
-# Define NO_MSGFMT_EXTENDED_OPTIONS if your implementation of msgfmt
-# doesn't support GNU extensions like --check and --statistics
-#
# Define HAVE_PATHS_H if you have paths.h and want to use the default PATH
# it specifies.
#
@@ -152,39 +104,6 @@ include shared.mak
# and do not want to use Apple's CommonCrypto library. This allows you
# to provide your own OpenSSL library, for example from MacPorts.
#
-# Define BLK_SHA1 environment variable to make use of the bundled
-# optimized C SHA1 routine.
-#
-# Define DC_SHA1 to unconditionally enable the collision-detecting sha1
-# algorithm. This is slower, but may detect attempted collision attacks.
-# Takes priority over other *_SHA1 knobs.
-#
-# Define DC_SHA1_EXTERNAL in addition to DC_SHA1 if you want to build / link
-# git with the external SHA1 collision-detect library.
-# Without this option, i.e. the default behavior is to build git with its
-# own built-in code (or submodule).
-#
-# Define DC_SHA1_SUBMODULE in addition to DC_SHA1 to use the
-# sha1collisiondetection shipped as a submodule instead of the
-# non-submodule copy in sha1dc/. This is an experimental option used
-# by the git project to migrate to using sha1collisiondetection as a
-# submodule.
-#
-# Define OPENSSL_SHA1 environment variable when running make to link
-# with the SHA1 routine from openssl library.
-#
-# Define SHA1_MAX_BLOCK_SIZE to limit the amount of data that will be hashed
-# in one call to the platform's SHA1_Update(). e.g. APPLE_COMMON_CRYPTO
-# wants 'SHA1_MAX_BLOCK_SIZE=1024L*1024L*1024L' defined.
-#
-# Define BLK_SHA256 to use the built-in SHA-256 routines.
-#
-# Define NETTLE_SHA256 to use the SHA-256 routines in libnettle.
-#
-# Define GCRYPT_SHA256 to use the SHA-256 routines in libgcrypt.
-#
-# Define OPENSSL_SHA256 to use the SHA-256 routines in OpenSSL.
-#
# Define NEEDS_CRYPTO_WITH_SSL if you need -lcrypto when using -lssl (Darwin).
#
# Define NEEDS_SSL_WITH_CRYPTO if you need -lssl when using -lcrypto (Darwin).
@@ -490,6 +409,151 @@ include shared.mak
# to the "<name>" of the corresponding `compat/fsmonitor/fsm-settings-<name>.c`
# that implements the `fsm_os_settings__*()` routines.
#
+# === Optional library: libintl ===
+#
+# Define NO_GETTEXT if you don't want Git output to be translated.
+# A translated Git requires GNU libintl or another gettext implementation,
+# plus libintl-perl at runtime.
+#
+# Define USE_GETTEXT_SCHEME and set it to 'fallthrough', if you don't trust
+# the installed gettext translation of the shell scripts output.
+#
+# Define HAVE_LIBCHARSET_H if you haven't set NO_GETTEXT and you can't
+# trust the langinfo.h's nl_langinfo(CODESET) function to return the
+# current character set. GNU and Solaris have a nl_langinfo(CODESET),
+# FreeBSD can use either, but MinGW and some others need to use
+# libcharset.h's locale_charset() instead.
+#
+# Define CHARSET_LIB to the library you need to link with in order to
+# use locale_charset() function. On some platforms this needs to set to
+# -lcharset, on others to -liconv .
+#
+# Define LIBC_CONTAINS_LIBINTL if your gettext implementation doesn't
+# need -lintl when linking.
+#
+# Define NO_MSGFMT_EXTENDED_OPTIONS if your implementation of msgfmt
+# doesn't support GNU extensions like --check and --statistics
+#
+# === Optional library: libexpat ===
+#
+# Define NO_EXPAT if you do not have expat installed. git-http-push is
+# not built, and you cannot push using http:// and https:// transports (dumb).
+#
+# Define EXPATDIR=/foo/bar if your expat header and library files are in
+# /foo/bar/include and /foo/bar/lib directories.
+#
+# Define EXPAT_NEEDS_XMLPARSE_H if you have an old version of expat (e.g.,
+# 1.1 or 1.2) that provides xmlparse.h instead of expat.h.
+
+# === Optional library: libcurl ===
+#
+# Define NO_CURL if you do not have libcurl installed. git-http-fetch and
+# git-http-push are not built, and you cannot use http:// and https://
+# transports (neither smart nor dumb).
+#
+# Define CURLDIR=/foo/bar if your curl header and library files are in
+# /foo/bar/include and /foo/bar/lib directories.
+#
+# Define CURL_CONFIG to curl's configuration program that prints information
+# about the library (e.g., its version number). The default is 'curl-config'.
+#
+# Define CURL_LDFLAGS to specify flags that you need to link when using libcurl,
+# if you do not want to rely on the libraries provided by CURL_CONFIG. The
+# default value is a result of `curl-config --libs`. An example value for
+# CURL_LDFLAGS is as follows:
+#
+# CURL_LDFLAGS=-lcurl
+#
+# === Optional library: libpcre2 ===
+#
+# Define USE_LIBPCRE if you have and want to use libpcre. Various
+# commands such as log and grep offer runtime options to use
+# Perl-compatible regular expressions instead of standard or extended
+# POSIX regular expressions.
+#
+# Only libpcre version 2 is supported. USE_LIBPCRE2 is a synonym for
+# USE_LIBPCRE, support for the old USE_LIBPCRE1 has been removed.
+#
+# Define LIBPCREDIR=/foo/bar if your PCRE header and library files are
+# in /foo/bar/include and /foo/bar/lib directories.
+#
+# == SHA-1 and SHA-256 defines ==
+#
+# === SHA-1 backend ===
+#
+# ==== Security ====
+#
+# Due to the SHAttered (https://shattered.io) attack vector on SHA-1
+# it's strongly recommended to use the sha1collisiondetection
+# counter-cryptanalysis library for SHA-1 hashing.
+#
+# If you know that you can trust the repository contents, or where
+# potential SHA-1 attacks are otherwise mitigated the other backends
+# listed in "SHA-1 implementations" are faster than
+# sha1collisiondetection.
+#
+# ==== Default SHA-1 backend ====
+#
+# If no *_SHA1 backend is picked, the first supported one listed in
+# "SHA-1 implementations" will be picked.
+#
+# ==== Options common to all SHA-1 implementations ====
+#
+# Define SHA1_MAX_BLOCK_SIZE to limit the amount of data that will be hashed
+# in one call to the platform's SHA1_Update(). e.g. APPLE_COMMON_CRYPTO
+# wants 'SHA1_MAX_BLOCK_SIZE=1024L*1024L*1024L' defined.
+#
+# ==== SHA-1 implementations ====
+#
+# Define OPENSSL_SHA1 to link to the SHA-1 routines from the OpenSSL
+# library.
+#
+# Define BLK_SHA1 to make use of optimized C SHA-1 routines bundled
+# with git (in the block-sha1/ directory).
+#
+# Define NO_APPLE_COMMON_CRYPTO on OSX to opt-out of using the
+# "APPLE_COMMON_CRYPTO" backend for SHA-1, which is currently the
+# default on that OS. On macOS 01.4 (Tiger) or older,
+# NO_APPLE_COMMON_CRYPTO is defined by default.
+#
+# If don't enable any of the *_SHA1 settings in this section, Git will
+# default to its built-in sha1collisiondetection library, which is a
+# collision-detecting sha1 This is slower, but may detect attempted
+# collision attacks.
+#
+# ==== Options for the sha1collisiondetection library ====
+#
+# Define DC_SHA1_EXTERNAL if you want to build / link
+# git with the external SHA1 collision-detect library.
+# Without this option, i.e. the default behavior is to build git with its
+# own built-in code (or submodule).
+#
+# Define DC_SHA1_SUBMODULE to use the
+# sha1collisiondetection shipped as a submodule instead of the
+# non-submodule copy in sha1dc/. This is an experimental option used
+# by the git project to migrate to using sha1collisiondetection as a
+# submodule.
+#
+# === SHA-256 backend ===
+#
+# ==== Security ====
+#
+# Unlike SHA-1 the SHA-256 algorithm does not suffer from any known
+# vulnerabilities, so any implementation will do.
+#
+# ==== SHA-256 implementations ====
+#
+# Define OPENSSL_SHA256 to use the SHA-256 routines in OpenSSL.
+#
+# Define NETTLE_SHA256 to use the SHA-256 routines in libnettle.
+#
+# Define GCRYPT_SHA256 to use the SHA-256 routines in libgcrypt.
+#
+# If don't enable any of the *_SHA256 settings in this section, Git
+# will default to its built-in sha256 implementation.
+#
+# == DEVELOPER defines ==
+#
# Define DEVELOPER to enable more compiler warnings. Compiler version
# and family are auto detected, but could be overridden by defining
# COMPILER_FEATURES (see config.mak.dev). You can still set
@@ -529,8 +593,9 @@ GIT-VERSION-FILE: FORCE
# template_dir
# sysconfdir
# can be specified as a relative path some/where/else;
-# this is interpreted as relative to $(prefix) and "git" at
-# runtime figures out where they are based on the path to the executable.
+# this is interpreted as relative to $(prefix) and "git" built with
+# RUNTIME_PREFIX flag will figure out (at runtime) where they are
+# based on the path to the executable.
# Additionally, the following will be treated as relative by "git" if they
# begin with "$(prefix)/":
# mandir
@@ -626,7 +691,6 @@ THIRD_PARTY_SOURCES =
# interactive shell sessions without exporting it.
unexport CDPATH
-SCRIPT_SH += git-bisect.sh
SCRIPT_SH += git-difftool--helper.sh
SCRIPT_SH += git-filter-branch.sh
SCRIPT_SH += git-merge-octopus.sh
@@ -688,9 +752,9 @@ SCRIPTS = $(SCRIPT_SH_GEN) \
ETAGS_TARGET = TAGS
-FUZZ_OBJS += fuzz-commit-graph.o
-FUZZ_OBJS += fuzz-pack-headers.o
-FUZZ_OBJS += fuzz-pack-idx.o
+FUZZ_OBJS += oss-fuzz/fuzz-commit-graph.o
+FUZZ_OBJS += oss-fuzz/fuzz-pack-headers.o
+FUZZ_OBJS += oss-fuzz/fuzz-pack-idx.o
.PHONY: fuzz-objs
fuzz-objs: $(FUZZ_OBJS)
@@ -721,6 +785,8 @@ PROGRAMS += $(patsubst %.o,git-%$X,$(PROGRAM_OBJS))
TEST_BUILTINS_OBJS += test-advise.o
TEST_BUILTINS_OBJS += test-bitmap.o
TEST_BUILTINS_OBJS += test-bloom.o
+TEST_BUILTINS_OBJS += test-bundle-uri.o
+TEST_BUILTINS_OBJS += test-cache-tree.o
TEST_BUILTINS_OBJS += test-chmtime.o
TEST_BUILTINS_OBJS += test-config.o
TEST_BUILTINS_OBJS += test-crontab.o
@@ -1055,6 +1121,8 @@ LIB_OBJS += refs/debug.o
LIB_OBJS += refs/files-backend.o
LIB_OBJS += refs/iterator.o
LIB_OBJS += refs/packed-backend.o
+LIB_OBJS += refs/packed-format-v1.o
+LIB_OBJS += refs/packed-format-v2.o
LIB_OBJS += refs/ref-cache.o
LIB_OBJS += refspec.o
LIB_OBJS += remote.o
@@ -1094,6 +1162,7 @@ LIB_OBJS += trace.o
LIB_OBJS += trace2.o
LIB_OBJS += trace2/tr2_cfg.o
LIB_OBJS += trace2/tr2_cmd_name.o
+LIB_OBJS += trace2/tr2_ctr.o
LIB_OBJS += trace2/tr2_dst.o
LIB_OBJS += trace2/tr2_sid.o
LIB_OBJS += trace2/tr2_sysenv.o
@@ -1102,6 +1171,7 @@ LIB_OBJS += trace2/tr2_tgt_event.o
LIB_OBJS += trace2/tr2_tgt_normal.o
LIB_OBJS += trace2/tr2_tgt_perf.o
LIB_OBJS += trace2/tr2_tls.o
+LIB_OBJS += trace2/tr2_tmr.o
LIB_OBJS += trailer.o
LIB_OBJS += transport-helper.o
LIB_OBJS += transport.o
@@ -1133,7 +1203,7 @@ BUILTIN_OBJS += builtin/am.o
BUILTIN_OBJS += builtin/annotate.o
BUILTIN_OBJS += builtin/apply.o
BUILTIN_OBJS += builtin/archive.o
-BUILTIN_OBJS += builtin/bisect--helper.o
+BUILTIN_OBJS += builtin/bisect.o
BUILTIN_OBJS += builtin/blame.o
BUILTIN_OBJS += builtin/branch.o
BUILTIN_OBJS += builtin/bugreport.o
@@ -1298,11 +1368,53 @@ SP_EXTRA_FLAGS = -Wno-universal-initializer
SANITIZE_LEAK =
SANITIZE_ADDRESS =
-# For the 'coccicheck' target; setting SPATCH_BATCH_SIZE higher will
-# usually result in less CPU usage at the cost of higher peak memory.
-# Setting it to 0 will feed all files in a single spatch invocation.
-SPATCH_FLAGS = --all-includes
-SPATCH_BATCH_SIZE = 1
+# For the 'coccicheck' target
+SPATCH_INCLUDE_FLAGS = --all-includes
+SPATCH_FLAGS =
+SPATCH_TEST_FLAGS =
+
+# If *.o files are present, have "coccicheck" depend on them, with
+# COMPUTE_HEADER_DEPENDENCIES this will speed up the common-case of
+# only needing to re-generate coccicheck results for the users of a
+# given API if it's changed, and not all files in the project. If
+# COMPUTE_HEADER_DEPENDENCIES=no this will be unset too.
+SPATCH_USE_O_DEPENDENCIES = YesPlease
+
+# Set SPATCH_CONCAT_COCCI to concatenate the contrib/cocci/*.cocci
+# files into a single contrib/cocci/ALL.cocci before running
+# "coccicheck".
+#
+# Pros:
+#
+# - Speeds up a one-shot run of "make coccicheck", as we won't have to
+# parse *.[ch] files N times for the N *.cocci rules
+#
+# Cons:
+#
+# - Will make incremental development of *.cocci slower, as
+# e.g. changing strbuf.cocci will re-run all *.cocci.
+#
+# - Makes error and performance analysis harder, as rules will be
+# applied from a monolithic ALL.cocci, rather than
+# e.g. strbuf.cocci. To work around this either undefine this, or
+# generate a specific patch, e.g. this will always use strbuf.cocci,
+# not ALL.cocci:
+#
+# make contrib/coccinelle/strbuf.cocci.patch
+SPATCH_CONCAT_COCCI = YesPlease
+
+# Rebuild 'coccicheck' if $(SPATCH), its flags etc. change
+TRACK_SPATCH_DEFINES =
+TRACK_SPATCH_DEFINES += $(SPATCH)
+TRACK_SPATCH_DEFINES += $(SPATCH_INCLUDE_FLAGS)
+TRACK_SPATCH_DEFINES += $(SPATCH_FLAGS)
+TRACK_SPATCH_DEFINES += $(SPATCH_TEST_FLAGS)
+GIT-SPATCH-DEFINES: FORCE
+ @FLAGS='$(TRACK_SPATCH_DEFINES)'; \
+ if test x"$$FLAGS" != x"`cat GIT-SPATCH-DEFINES 2>/dev/null`" ; then \
+ echo >&2 " * new spatch flags"; \
+ echo "$$FLAGS" >GIT-SPATCH-DEFINES; \
+ fi
include config.mak.uname
-include config.mak.autogen
@@ -1338,6 +1450,7 @@ BASIC_CFLAGS += -DSHA1DC_FORCE_ALIGNED_ACCESS
endif
ifneq ($(filter leak,$(SANITIZERS)),)
BASIC_CFLAGS += -DSUPPRESS_ANNOTATED_LEAKS
+BASIC_CFLAGS += -O0
SANITIZE_LEAK = YesCompiledWithIt
endif
ifneq ($(filter address,$(SANITIZERS)),)
@@ -1442,7 +1555,6 @@ ifeq ($(uname_S),Darwin)
APPLE_COMMON_CRYPTO = YesPlease
COMPAT_CFLAGS += -DAPPLE_COMMON_CRYPTO
endif
- NO_REGEX = YesPlease
PTHREAD_LIBS =
endif
@@ -1822,7 +1934,6 @@ ifdef APPLE_COMMON_CRYPTO
COMPAT_CFLAGS += -DCOMMON_DIGEST_FOR_OPENSSL
BASIC_CFLAGS += -DSHA1_APPLE
else
- DC_SHA1 := YesPlease
BASIC_CFLAGS += -DSHA1_DC
LIB_OBJS += sha1dc_git.o
ifdef DC_SHA1_EXTERNAL
@@ -2039,11 +2150,13 @@ ifdef FSMONITOR_DAEMON_BACKEND
COMPAT_CFLAGS += -DHAVE_FSMONITOR_DAEMON_BACKEND
COMPAT_OBJS += compat/fsmonitor/fsm-listen-$(FSMONITOR_DAEMON_BACKEND).o
COMPAT_OBJS += compat/fsmonitor/fsm-health-$(FSMONITOR_DAEMON_BACKEND).o
+ COMPAT_OBJS += compat/fsmonitor/fsm-ipc-$(FSMONITOR_DAEMON_BACKEND).o
endif
ifdef FSMONITOR_OS_SETTINGS
COMPAT_CFLAGS += -DHAVE_FSMONITOR_OS_SETTINGS
COMPAT_OBJS += compat/fsmonitor/fsm-settings-$(FSMONITOR_OS_SETTINGS).o
+ COMPAT_OBJS += compat/fsmonitor/fsm-path-utils-$(FSMONITOR_OS_SETTINGS).o
endif
ifeq ($(TCLTK_PATH),)
@@ -2980,9 +3093,9 @@ GIT-BUILD-OPTIONS: FORCE
@echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@+
@echo NO_PTHREADS=\''$(subst ','\'',$(subst ','\'',$(NO_PTHREADS)))'\' >>$@+
@echo NO_PYTHON=\''$(subst ','\'',$(subst ','\'',$(NO_PYTHON)))'\' >>$@+
+ @echo NO_REGEX=\''$(subst ','\'',$(subst ','\'',$(NO_REGEX)))'\' >>$@+
@echo NO_UNIX_SOCKETS=\''$(subst ','\'',$(subst ','\'',$(NO_UNIX_SOCKETS)))'\' >>$@+
@echo PAGER_ENV=\''$(subst ','\'',$(subst ','\'',$(PAGER_ENV)))'\' >>$@+
- @echo DC_SHA1=\''$(subst ','\'',$(subst ','\'',$(DC_SHA1)))'\' >>$@+
@echo SANITIZE_LEAK=\''$(subst ','\'',$(subst ','\'',$(SANITIZE_LEAK)))'\' >>$@+
@echo SANITIZE_ADDRESS=\''$(subst ','\'',$(subst ','\'',$(SANITIZE_ADDRESS)))'\' >>$@+
@echo X=\'$(X)\' >>$@+
@@ -3136,35 +3249,113 @@ check: $(GENERATED_H)
exit 1; \
fi
+COCCI_GEN_ALL = .build/contrib/coccinelle/ALL.cocci
+COCCI_GLOB = $(wildcard contrib/coccinelle/*.cocci)
+COCCI_RULES_TRACKED = $(COCCI_GLOB:%=.build/%)
+COCCI_RULES_TRACKED_NO_PENDING = $(filter-out %.pending.cocci,$(COCCI_RULES_TRACKED))
+COCCI_RULES =
+COCCI_RULES += $(COCCI_GEN_ALL)
+COCCI_RULES += $(COCCI_RULES_TRACKED)
+COCCI_NAMES =
+COCCI_NAMES += $(COCCI_RULES:.build/contrib/coccinelle/%.cocci=%)
+
+COCCICHECK_PENDING = $(filter %.pending.cocci,$(COCCI_RULES))
+COCCICHECK = $(filter-out $(COCCICHECK_PENDING),$(COCCI_RULES))
+
+COCCICHECK_PATCHES = $(COCCICHECK:%=%.patch)
+COCCICHECK_PATCHES_PENDING = $(COCCICHECK_PENDING:%=%.patch)
+
+COCCICHECK_PATCHES_INTREE = $(COCCICHECK_PATCHES:.build/%=%)
+COCCICHECK_PATCHES_PENDING_INTREE = $(COCCICHECK_PATCHES_PENDING:.build/%=%)
+
+# It's expensive to compute the many=many rules below, only eval them
+# on $(MAKECMDGOALS) that match these $(COCCI_RULES)
+COCCI_RULES_GLOB =
+COCCI_RULES_GLOB += cocci%
+COCCI_RULES_GLOB += .build/contrib/coccinelle/%
+COCCI_RULES_GLOB += $(COCCICHECK_PATCHES)
+COCCI_RULES_GLOB += $(COCCICHEC_PATCHES_PENDING)
+COCCI_RULES_GLOB += $(COCCICHECK_PATCHES_INTREE)
+COCCI_RULES_GLOB += $(COCCICHECK_PATCHES_PENDING_INTREE)
+COCCI_GOALS = $(filter $(COCCI_RULES_GLOB),$(MAKECMDGOALS))
+
COCCI_TEST_RES = $(wildcard contrib/coccinelle/tests/*.res)
-%.cocci.patch: %.cocci $(COCCI_SOURCES)
- $(QUIET_SPATCH) \
- if test $(SPATCH_BATCH_SIZE) = 0; then \
- limit=; \
- else \
- limit='-n $(SPATCH_BATCH_SIZE)'; \
- fi; \
- if ! echo $(COCCI_SOURCES) | xargs $$limit \
- $(SPATCH) $(SPATCH_FLAGS) \
- --sp-file $< --patch . \
- >$@+ 2>$@.log; \
+$(COCCI_RULES_TRACKED): .build/% : %
+ $(call mkdir_p_parent_template)
+ $(QUIET_CP)cp $< $@
+
+.build/contrib/coccinelle/FOUND_H_SOURCES: $(FOUND_H_SOURCES)
+ $(call mkdir_p_parent_template)
+ $(QUIET_GEN) >$@
+
+$(COCCI_GEN_ALL): $(COCCI_RULES_TRACKED_NO_PENDING)
+ $(call mkdir_p_parent_template)
+ $(QUIET_SPATCH_CAT)cat $^ >$@
+
+ifeq ($(COMPUTE_HEADER_DEPENDENCIES),no)
+SPATCH_USE_O_DEPENDENCIES =
+endif
+define cocci-rule
+
+## Rule for .build/$(1).patch/$(2); Params:
+# $(1) = e.g. ".build/contrib/coccinelle/free.cocci"
+# $(2) = e.g. "grep.c"
+# $(3) = e.g. "grep.o"
+COCCI_$(1:.build/contrib/coccinelle/%.cocci=%) += $(1).d/$(2).patch
+$(1).d/$(2).patch: GIT-SPATCH-DEFINES
+$(1).d/$(2).patch: $(if $(and $(SPATCH_USE_O_DEPENDENCIES),$(wildcard $(3))),$(3),.build/contrib/coccinelle/FOUND_H_SOURCES)
+$(1).d/$(2).patch: $(1)
+$(1).d/$(2).patch: $(1).d/%.patch : %
+ $$(call mkdir_p_parent_template)
+ $$(QUIET_SPATCH)if ! $$(SPATCH) $$(SPATCH_FLAGS) \
+ $$(SPATCH_INCLUDE_FLAGS) \
+ --sp-file $(1) --patch . $$< \
+ >$$@ 2>$$@.log; \
then \
- cat $@.log; \
+ echo "ERROR when applying '$(1)' to '$$<'; '$$@.log' follows:"; \
+ cat $$@.log; \
exit 1; \
- fi; \
- mv $@+ $@; \
- if test -s $@; \
+ fi
+endef
+
+define cocci-matrix
+
+$(foreach s,$(COCCI_SOURCES),$(call cocci-rule,$(c),$(s),$(s:%.c=%.o)))
+endef
+
+ifdef COCCI_GOALS
+$(eval $(foreach c,$(COCCI_RULES),$(call cocci-matrix,$(c))))
+endif
+
+define spatch-rule
+
+.build/contrib/coccinelle/$(1).cocci.patch: $$(COCCI_$(1))
+ $$(QUIET_SPATCH_CAT)cat $$^ >$$@ && \
+ if test -s $$@; \
then \
- echo ' ' SPATCH result: $@; \
+ echo ' ' SPATCH result: $$@; \
fi
+contrib/coccinelle/$(1).cocci.patch: .build/contrib/coccinelle/$(1).cocci.patch
+ $$(QUIET_CP)cp $$< $$@
+
+endef
+
+ifdef COCCI_GOALS
+$(eval $(foreach n,$(COCCI_NAMES),$(call spatch-rule,$(n))))
+endif
COCCI_TEST_RES_GEN = $(addprefix .build/,$(COCCI_TEST_RES))
+$(COCCI_TEST_RES_GEN): GIT-SPATCH-DEFINES
$(COCCI_TEST_RES_GEN): .build/%.res : %.c
$(COCCI_TEST_RES_GEN): .build/%.res : %.res
+ifdef SPATCH_CONCAT_COCCI
+$(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : $(COCCI_GEN_ALL)
+else
$(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : contrib/coccinelle/%.cocci
+endif
$(call mkdir_p_parent_template)
- $(QUIET_SPATCH_T)$(SPATCH) $(SPATCH_FLAGS) \
+ $(QUIET_SPATCH_TEST)$(SPATCH) $(SPATCH_TEST_FLAGS) \
--very-quiet --no-show-diff \
--sp-file $< -o $@ \
$(@:.build/%.res=%.c) && \
@@ -3175,11 +3366,15 @@ $(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : contrib/coccinell
coccicheck-test: $(COCCI_TEST_RES_GEN)
coccicheck: coccicheck-test
-coccicheck: $(addsuffix .patch,$(filter-out %.pending.cocci,$(wildcard contrib/coccinelle/*.cocci)))
+ifdef SPATCH_CONCAT_COCCI
+coccicheck: contrib/coccinelle/ALL.cocci.patch
+else
+coccicheck: $(COCCICHECK_PATCHES_INTREE)
+endif
# See contrib/coccinelle/README
coccicheck-pending: coccicheck-test
-coccicheck-pending: $(addsuffix .patch,$(wildcard contrib/coccinelle/*.pending.cocci))
+coccicheck-pending: $(COCCICHECK_PATCHES_PENDING_INTREE)
.PHONY: coccicheck coccicheck-pending
@@ -3446,8 +3641,9 @@ profile-clean:
$(RM) $(addsuffix *.gcno,$(addprefix $(PROFILE_DIR)/, $(object_dirs)))
cocciclean:
+ $(RM) GIT-SPATCH-DEFINES
$(RM) -r .build/contrib/coccinelle
- $(RM) contrib/coccinelle/*.cocci.patch*
+ $(RM) contrib/coccinelle/*.cocci.patch
clean: profile-clean coverage-clean cocciclean
$(RM) -r .build
diff --git a/RelNotes b/RelNotes
index 141903f31a..758368388a 120000
--- a/RelNotes
+++ b/RelNotes
@@ -1 +1 @@
-Documentation/RelNotes/2.38.1.txt \ No newline at end of file
+Documentation/RelNotes/2.39.0.txt \ No newline at end of file
diff --git a/add-interactive.c b/add-interactive.c
index f071b2a1b4..ecc5ae1b24 100644
--- a/add-interactive.c
+++ b/add-interactive.c
@@ -997,18 +997,17 @@ static int run_diff(struct add_i_state *s, const struct pathspec *ps,
count = list_and_choose(s, files, opts);
opts->flags = 0;
if (count > 0) {
- struct strvec args = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
- strvec_pushl(&args, "git", "diff", "-p", "--cached",
+ strvec_pushl(&cmd.args, "git", "diff", "-p", "--cached",
oid_to_hex(!is_initial ? &oid :
s->r->hash_algo->empty_tree),
"--", NULL);
for (i = 0; i < files->items.nr; i++)
if (files->selected[i])
- strvec_push(&args,
+ strvec_push(&cmd.args,
files->items.items[i].string);
- res = run_command_v_opt(args.v, 0);
- strvec_clear(&args);
+ res = run_command(&cmd);
}
putchar('\n');
diff --git a/apply.c b/apply.c
index 2b7cd930ef..bc33814313 100644
--- a/apply.c
+++ b/apply.c
@@ -125,7 +125,7 @@ void clear_apply_state(struct apply_state *state)
/* &state->fn_table is cleared at the end of apply_patch() */
}
-static void mute_routine(const char *msg, va_list params)
+static void mute_routine(const char *msg UNUSED, va_list params UNUSED)
{
/* do nothing */
}
@@ -386,9 +386,19 @@ static void say_patch_name(FILE *output, const char *fmt, struct patch *patch)
#define SLOP (16)
+/*
+ * apply.c isn't equipped to handle arbitrarily large patches, because
+ * it intermingles `unsigned long` with `int` for the type used to store
+ * buffer lengths.
+ *
+ * Only process patches that are just shy of 1 GiB large in order to
+ * avoid any truncation or overflow issues.
+ */
+#define MAX_APPLY_SIZE (1024UL * 1024 * 1023)
+
static int read_patch_file(struct strbuf *sb, int fd)
{
- if (strbuf_read(sb, fd, 0) < 0)
+ if (strbuf_read(sb, fd, 0) < 0 || sb->len >= MAX_APPLY_SIZE)
return error_errno("git apply: failed to read");
/*
@@ -892,9 +902,9 @@ static int parse_traditional_patch(struct apply_state *state,
return 0;
}
-static int gitdiff_hdrend(struct gitdiff_data *state,
- const char *line,
- struct patch *patch)
+static int gitdiff_hdrend(struct gitdiff_data *state UNUSED,
+ const char *line UNUSED,
+ struct patch *patch UNUSED)
{
return 1;
}
@@ -1044,7 +1054,7 @@ static int gitdiff_renamedst(struct gitdiff_data *state,
return 0;
}
-static int gitdiff_similarity(struct gitdiff_data *state,
+static int gitdiff_similarity(struct gitdiff_data *state UNUSED,
const char *line,
struct patch *patch)
{
@@ -1054,7 +1064,7 @@ static int gitdiff_similarity(struct gitdiff_data *state,
return 0;
}
-static int gitdiff_dissimilarity(struct gitdiff_data *state,
+static int gitdiff_dissimilarity(struct gitdiff_data *state UNUSED,
const char *line,
struct patch *patch)
{
@@ -1104,9 +1114,9 @@ static int gitdiff_index(struct gitdiff_data *state,
* This is normal for a diff that doesn't change anything: we'll fall through
* into the next diff. Tell the parser to break out.
*/
-static int gitdiff_unrecognized(struct gitdiff_data *state,
- const char *line,
- struct patch *patch)
+static int gitdiff_unrecognized(struct gitdiff_data *state UNUSED,
+ const char *line UNUSED,
+ struct patch *patch UNUSED)
{
return 1;
}
diff --git a/archive-tar.c b/archive-tar.c
index 3e4822b684..f8fad2946e 100644
--- a/archive-tar.c
+++ b/archive-tar.c
@@ -498,6 +498,7 @@ static int write_tar_filter_archive(const struct archiver *ar,
strvec_push(&filter.args, cmd.buf);
filter.use_shell = 1;
filter.in = -1;
+ filter.silent_exec_failure = 1;
if (start_command(&filter) < 0)
die_errno(_("unable to start '%s' filter"), cmd.buf);
diff --git a/archive.c b/archive.c
index 61a79e4a22..941495f5d7 100644
--- a/archive.c
+++ b/archive.c
@@ -166,18 +166,16 @@ static int write_archive_entry(const struct object_id *oid, const char *base,
args->convert = check_attr_export_subst(check);
}
+ if (args->verbose)
+ fprintf(stderr, "%.*s\n", (int)path.len, path.buf);
+
if (S_ISDIR(mode) || S_ISGITLINK(mode)) {
- if (args->verbose)
- fprintf(stderr, "%.*s\n", (int)path.len, path.buf);
err = write_entry(args, oid, path.buf, path.len, mode, NULL, 0);
if (err)
return err;
return (S_ISDIR(mode) ? READ_TREE_RECURSIVE : 0);
}
- if (args->verbose)
- fprintf(stderr, "%.*s\n", (int)path.len, path.buf);
-
/* Stream it? */
if (S_ISREG(mode) && !args->convert &&
oid_object_info(args->repo, oid, &size) == OBJ_BLOB &&
@@ -500,7 +498,7 @@ static void parse_treeish_arg(const char **argv,
ar_args->time = archive_time;
}
-static void extra_file_info_clear(void *util, const char *str)
+static void extra_file_info_clear(void *util, const char *str UNUSED)
{
struct extra_file_info *info = util;
free(info->base);
diff --git a/attr.c b/attr.c
index 8250b06953..42ad6de8c7 100644
--- a/attr.c
+++ b/attr.c
@@ -23,10 +23,6 @@ static const char git_attr__unknown[] = "(builtin)unknown";
#define ATTR__UNSET NULL
#define ATTR__UNKNOWN git_attr__unknown
-#ifndef DEBUG_ATTR
-#define DEBUG_ATTR 0
-#endif
-
struct git_attr {
int attr_nr; /* unique attribute number */
char name[FLEX_ARRAY]; /* attribute name */
@@ -807,33 +803,6 @@ static struct attr_stack *read_attr(struct index_state *istate,
return res;
}
-#if DEBUG_ATTR
-static void debug_info(const char *what, struct attr_stack *elem)
-{
- fprintf(stderr, "%s: %s\n", what, elem->origin ? elem->origin : "()");
-}
-static void debug_set(const char *what, const char *match, struct git_attr *attr, const void *v)
-{
- const char *value = v;
-
- if (ATTR_TRUE(value))
- value = "set";
- else if (ATTR_FALSE(value))
- value = "unset";
- else if (ATTR_UNSET(value))
- value = "unspecified";
-
- fprintf(stderr, "%s: %s => %s (%s)\n",
- what, attr->name, (char *) value, match);
-}
-#define debug_push(a) debug_info("push", (a))
-#define debug_pop(a) debug_info("pop", (a))
-#else
-#define debug_push(a) do { ; } while (0)
-#define debug_pop(a) do { ; } while (0)
-#define debug_set(a,b,c,d) do { ; } while (0)
-#endif /* DEBUG_ATTR */
-
static const char *git_etc_gitattributes(void)
{
static const char *system_wide;
@@ -954,7 +923,6 @@ static void prepare_attr_stack(struct index_state *istate,
(!namelen || path[namelen] == '/'))
break;
- debug_pop(elem);
*stack = elem->prev;
attr_stack_free(elem);
}
@@ -1028,7 +996,7 @@ static int path_matches(const char *pathname, int pathlen,
static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem);
-static int fill_one(const char *what, struct all_attrs_item *all_attrs,
+static int fill_one(struct all_attrs_item *all_attrs,
const struct match_attr *a, int rem)
{
int i;
@@ -1039,9 +1007,6 @@ static int fill_one(const char *what, struct all_attrs_item *all_attrs,
const char *v = a->state[i].setto;
if (*n == ATTR__UNKNOWN) {
- debug_set(what,
- a->is_macro ? a->u.attr->name : a->u.pat.pattern,
- attr, v);
*n = v;
rem--;
rem = macroexpand_one(all_attrs, attr->attr_nr, rem);
@@ -1064,7 +1029,7 @@ static int fill(const char *path, int pathlen, int basename_offset,
continue;
if (path_matches(path, pathlen, basename_offset,
&a->u.pat, base, stack->originlen))
- rem = fill_one("fill", all_attrs, a, rem);
+ rem = fill_one(all_attrs, a, rem);
}
}
@@ -1076,7 +1041,7 @@ static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem)
const struct all_attrs_item *item = &all_attrs[nr];
if (item->macro && item->value == ATTR__TRUE)
- return fill_one("expand", all_attrs, item->macro, rem);
+ return fill_one(all_attrs, item->macro, rem);
else
return rem;
}
diff --git a/bisect.c b/bisect.c
index fd581b85a7..ec7487e683 100644
--- a/bisect.c
+++ b/bisect.c
@@ -22,8 +22,6 @@ static struct oid_array skipped_revs;
static struct object_id *current_bad_oid;
-static const char *argv_checkout[] = {"checkout", "-q", NULL, "--", NULL};
-
static const char *term_bad;
static const char *term_good;
@@ -729,20 +727,22 @@ static int is_expected_rev(const struct object_id *oid)
enum bisect_error bisect_checkout(const struct object_id *bisect_rev,
int no_checkout)
{
- char bisect_rev_hex[GIT_MAX_HEXSZ + 1];
struct commit *commit;
struct pretty_print_context pp = {0};
struct strbuf commit_msg = STRBUF_INIT;
- oid_to_hex_r(bisect_rev_hex, bisect_rev);
update_ref(NULL, "BISECT_EXPECTED_REV", bisect_rev, NULL, 0, UPDATE_REFS_DIE_ON_ERR);
- argv_checkout[2] = bisect_rev_hex;
if (no_checkout) {
update_ref(NULL, "BISECT_HEAD", bisect_rev, NULL, 0,
UPDATE_REFS_DIE_ON_ERR);
} else {
- if (run_command_v_opt(argv_checkout, RUN_GIT_CMD))
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "checkout", "-q",
+ oid_to_hex(bisect_rev), "--", NULL);
+ if (run_command(&cmd))
/*
* Errors in `run_command()` itself, signaled by res < 0,
* and errors in the child process, signaled by res > 0
diff --git a/builtin.h b/builtin.h
index 8901a34d6b..aa955466b4 100644
--- a/builtin.h
+++ b/builtin.h
@@ -116,7 +116,7 @@ int cmd_am(int argc, const char **argv, const char *prefix);
int cmd_annotate(int argc, const char **argv, const char *prefix);
int cmd_apply(int argc, const char **argv, const char *prefix);
int cmd_archive(int argc, const char **argv, const char *prefix);
-int cmd_bisect__helper(int argc, const char **argv, const char *prefix);
+int cmd_bisect(int argc, const char **argv, const char *prefix);
int cmd_blame(int argc, const char **argv, const char *prefix);
int cmd_branch(int argc, const char **argv, const char *prefix);
int cmd_bugreport(int argc, const char **argv, const char *prefix);
diff --git a/builtin/add.c b/builtin/add.c
index f84372964c..e9a76c1049 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -240,8 +240,8 @@ static int refresh(int verbose, const struct pathspec *pathspec)
int run_add_interactive(const char *revision, const char *patch_mode,
const struct pathspec *pathspec)
{
- int status, i;
- struct strvec argv = STRVEC_INIT;
+ int i;
+ struct child_process cmd = CHILD_PROCESS_INIT;
int use_builtin_add_i =
git_env_bool("GIT_TEST_ADD_I_USE_BUILTIN", -1);
@@ -272,19 +272,18 @@ int run_add_interactive(const char *revision, const char *patch_mode,
return !!run_add_p(the_repository, mode, revision, pathspec);
}
- strvec_push(&argv, "add--interactive");
+ strvec_push(&cmd.args, "add--interactive");
if (patch_mode)
- strvec_push(&argv, patch_mode);
+ strvec_push(&cmd.args, patch_mode);
if (revision)
- strvec_push(&argv, revision);
- strvec_push(&argv, "--");
+ strvec_push(&cmd.args, revision);
+ strvec_push(&cmd.args, "--");
for (i = 0; i < pathspec->nr; i++)
/* pass original pathspec, to be re-parsed */
- strvec_push(&argv, pathspec->items[i].original);
+ strvec_push(&cmd.args, pathspec->items[i].original);
- status = run_command_v_opt(argv.v, RUN_GIT_CMD);
- strvec_clear(&argv);
- return status;
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
}
int interactive_add(const char **argv, const char *prefix, int patch)
@@ -695,6 +694,6 @@ finish:
die(_("Unable to write new index file"));
dir_clear(&dir);
- UNLEAK(pathspec);
+ clear_pathspec(&pathspec);
return exit_status;
}
diff --git a/builtin/am.c b/builtin/am.c
index 39fea24833..20aea0d248 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -2187,14 +2187,12 @@ static int show_patch(struct am_state *state, enum show_patch_type sub_mode)
int len;
if (!is_null_oid(&state->orig_commit)) {
- const char *av[4] = { "show", NULL, "--", NULL };
- char *new_oid_str;
- int ret;
+ struct child_process cmd = CHILD_PROCESS_INIT;
- av[1] = new_oid_str = xstrdup(oid_to_hex(&state->orig_commit));
- ret = run_command_v_opt(av, RUN_GIT_CMD);
- free(new_oid_str);
- return ret;
+ strvec_pushl(&cmd.args, "show", oid_to_hex(&state->orig_commit),
+ "--", NULL);
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
}
switch (sub_mode) {
diff --git a/builtin/bisect--helper.c b/builtin/bisect.c
index 501245fac9..cc9483e851 100644
--- a/builtin/bisect--helper.c
+++ b/builtin/bisect.c
@@ -20,18 +20,40 @@ static GIT_PATH_FUNC(git_path_bisect_names, "BISECT_NAMES")
static GIT_PATH_FUNC(git_path_bisect_first_parent, "BISECT_FIRST_PARENT")
static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN")
-static const char * const git_bisect_helper_usage[] = {
- N_("git bisect--helper --bisect-reset [<commit>]"),
- "git bisect--helper --bisect-terms [--term-good | --term-old | --term-bad | --term-new]",
- N_("git bisect--helper --bisect-start [--term-{new,bad}=<term> --term-{old,good}=<term>]"
- " [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<paths>...]"),
- "git bisect--helper --bisect-next",
- N_("git bisect--helper --bisect-state (bad|new) [<rev>]"),
- N_("git bisect--helper --bisect-state (good|old) [<rev>...]"),
- N_("git bisect--helper --bisect-replay <filename>"),
- N_("git bisect--helper --bisect-skip [(<rev>|<range>)...]"),
- "git bisect--helper --bisect-visualize",
- N_("git bisect--helper --bisect-run <cmd>..."),
+#define BUILTIN_GIT_BISECT_START_USAGE \
+ N_("git bisect start [--term-{new,bad}=<term> --term-{old,good}=<term>]" \
+ " [--no-checkout] [--first-parent] [<bad> [<good>...]] [--]" \
+ " [<pathspec>...]")
+#define BUILTIN_GIT_BISECT_STATE_USAGE \
+ N_("git bisect (good|bad) [<rev>...]")
+#define BUILTIN_GIT_BISECT_TERMS_USAGE \
+ "git bisect terms [--term-good | --term-bad]"
+#define BUILTIN_GIT_BISECT_SKIP_USAGE \
+ N_("git bisect skip [(<rev>|<range>)...]")
+#define BUILTIN_GIT_BISECT_NEXT_USAGE \
+ "git bisect next"
+#define BUILTIN_GIT_BISECT_RESET_USAGE \
+ N_("git bisect reset [<commit>]")
+#define BUILTIN_GIT_BISECT_VISUALIZE_USAGE \
+ "git bisect visualize"
+#define BUILTIN_GIT_BISECT_REPLAY_USAGE \
+ N_("git bisect replay <logfile>")
+#define BUILTIN_GIT_BISECT_LOG_USAGE \
+ "git bisect log"
+#define BUILTIN_GIT_BISECT_RUN_USAGE \
+ N_("git bisect run <cmd>...")
+
+static const char * const git_bisect_usage[] = {
+ BUILTIN_GIT_BISECT_START_USAGE,
+ BUILTIN_GIT_BISECT_STATE_USAGE,
+ BUILTIN_GIT_BISECT_TERMS_USAGE,
+ BUILTIN_GIT_BISECT_SKIP_USAGE,
+ BUILTIN_GIT_BISECT_NEXT_USAGE,
+ BUILTIN_GIT_BISECT_RESET_USAGE,
+ BUILTIN_GIT_BISECT_VISUALIZE_USAGE,
+ BUILTIN_GIT_BISECT_REPLAY_USAGE,
+ BUILTIN_GIT_BISECT_LOG_USAGE,
+ BUILTIN_GIT_BISECT_RUN_USAGE,
NULL
};
@@ -220,18 +242,17 @@ static int bisect_reset(const char *commit)
}
if (!ref_exists("BISECT_HEAD")) {
- struct strvec argv = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
- strvec_pushl(&argv, "checkout", branch.buf, "--", NULL);
- if (run_command_v_opt(argv.v, RUN_GIT_CMD)) {
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "checkout", branch.buf, "--", NULL);
+ if (run_command(&cmd)) {
error(_("could not check out original"
" HEAD '%s'. Try 'git bisect"
" reset <commit>'."), branch.buf);
strbuf_release(&branch);
- strvec_clear(&argv);
return -1;
}
- strvec_clear(&argv);
}
strbuf_release(&branch);
@@ -765,11 +786,12 @@ static enum bisect_error bisect_start(struct bisect_terms *terms, const char **a
strbuf_read_file(&start_head, git_path_bisect_start(), 0);
strbuf_trim(&start_head);
if (!no_checkout) {
- struct strvec argv = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
- strvec_pushl(&argv, "checkout", start_head.buf,
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "checkout", start_head.buf,
"--", NULL);
- if (run_command_v_opt(argv.v, RUN_GIT_CMD)) {
+ if (run_command(&cmd)) {
res = error(_("checking out '%s' failed."
" Try 'git bisect start "
"<valid-branch>'."),
@@ -1099,40 +1121,38 @@ static enum bisect_error bisect_skip(struct bisect_terms *terms, const char **ar
static int bisect_visualize(struct bisect_terms *terms, const char **argv, int argc)
{
- struct strvec args = STRVEC_INIT;
- int flags = RUN_COMMAND_NO_STDIN, res = 0;
+ struct child_process cmd = CHILD_PROCESS_INIT;
struct strbuf sb = STRBUF_INIT;
if (bisect_next_check(terms, NULL) != 0)
return BISECT_FAILED;
+ cmd.no_stdin = 1;
if (!argc) {
if ((getenv("DISPLAY") || getenv("SESSIONNAME") || getenv("MSYSTEM") ||
getenv("SECURITYSESSIONID")) && exists_in_PATH("gitk")) {
- strvec_push(&args, "gitk");
+ strvec_push(&cmd.args, "gitk");
} else {
- strvec_push(&args, "log");
- flags |= RUN_GIT_CMD;
+ strvec_push(&cmd.args, "log");
+ cmd.git_cmd = 1;
}
} else {
if (argv[0][0] == '-') {
- strvec_push(&args, "log");
- flags |= RUN_GIT_CMD;
+ strvec_push(&cmd.args, "log");
+ cmd.git_cmd = 1;
} else if (strcmp(argv[0], "tig") && !starts_with(argv[0], "git"))
- flags |= RUN_GIT_CMD;
+ cmd.git_cmd = 1;
- strvec_pushv(&args, argv);
+ strvec_pushv(&cmd.args, argv);
}
- strvec_pushl(&args, "--bisect", "--", NULL);
+ strvec_pushl(&cmd.args, "--bisect", "--", NULL);
strbuf_read_file(&sb, git_path_bisect_names(), 0);
- sq_dequote_to_strvec(sb.buf, &args);
+ sq_dequote_to_strvec(sb.buf, &cmd.args);
strbuf_release(&sb);
- res = run_command_v_opt(args.v, flags);
- strvec_clear(&args);
- return res;
+ return run_command(&cmd);
}
static int get_first_good(const char *refname UNUSED,
@@ -1143,8 +1163,17 @@ static int get_first_good(const char *refname UNUSED,
return 1;
}
-static int verify_good(const struct bisect_terms *terms,
- const char **quoted_argv)
+static int do_bisect_run(const char *command)
+{
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ printf(_("running %s\n"), command);
+ cmd.use_shell = 1;
+ strvec_push(&cmd.args, command);
+ return run_command(&cmd);
+}
+
+static int verify_good(const struct bisect_terms *terms, const char *command)
{
int rc;
enum bisect_error res;
@@ -1164,8 +1193,7 @@ static int verify_good(const struct bisect_terms *terms,
if (res != BISECT_OK)
return -1;
- printf(_("running %s\n"), quoted_argv[0]);
- rc = run_command_v_opt(quoted_argv, RUN_USING_SHELL);
+ rc = do_bisect_run(command);
res = bisect_checkout(&current_rev, no_checkout);
if (res != BISECT_OK)
@@ -1178,7 +1206,6 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
{
int res = BISECT_OK;
struct strbuf command = STRBUF_INIT;
- struct strvec run_args = STRVEC_INIT;
const char *new_state;
int temporary_stdout_fd, saved_stdout;
int is_first_run = 1;
@@ -1186,18 +1213,15 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
if (bisect_next_check(terms, NULL))
return BISECT_FAILED;
- if (argc)
- sq_quote_argv(&command, argv);
- else {
+ if (!argc) {
error(_("bisect run failed: no command provided."));
return BISECT_FAILED;
}
- strvec_push(&run_args, command.buf);
-
+ sq_quote_argv(&command, argv);
+ strbuf_ltrim(&command);
while (1) {
- printf(_("running %s\n"), command.buf);
- res = run_command_v_opt(run_args.v, RUN_USING_SHELL);
+ res = do_bisect_run(command.buf);
/*
* Exit code 126 and 127 can either come from the shell
@@ -1207,10 +1231,10 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
* missing or non-executable script.
*/
if (is_first_run && (res == 126 || res == 127)) {
- int rc = verify_good(terms, run_args.v);
+ int rc = verify_good(terms, command.buf);
is_first_run = 0;
- if (rc < 0) {
- error(_("unable to verify '%s' on good"
+ if (rc < 0 || 128 <= rc) {
+ error(_("unable to verify %s on good"
" revision"), command.buf);
res = BISECT_FAILED;
break;
@@ -1225,7 +1249,7 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
if (res < 0 || 128 <= res) {
error(_("bisect run failed: exit code %d from"
- " '%s' is < 0 or >= 128"), res, command.buf);
+ " %s is < 0 or >= 128"), res, command.buf);
break;
}
@@ -1259,14 +1283,14 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
if (res == BISECT_ONLY_SKIPPED_LEFT)
error(_("bisect run cannot continue any more"));
else if (res == BISECT_INTERNAL_SUCCESS_MERGE_BASE) {
- printf(_("bisect run success"));
+ puts(_("bisect run success"));
res = BISECT_OK;
} else if (res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND) {
- printf(_("bisect found first bad commit"));
+ puts(_("bisect found first bad commit"));
res = BISECT_OK;
} else if (res) {
- error(_("bisect run failed: 'git bisect--helper --bisect-state"
- " %s' exited with error code %d"), new_state, res);
+ error(_("bisect run failed: 'bisect-state %s'"
+ " exited with error code %d"), new_state, res);
} else {
continue;
}
@@ -1274,119 +1298,147 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
}
strbuf_release(&command);
- strvec_clear(&run_args);
return res;
}
-int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
+static int cmd_bisect__reset(int argc, const char **argv, const char *prefix UNUSED)
+{
+ if (argc > 1)
+ return error(_("'%s' requires either no argument or a commit"),
+ "git bisect reset");
+ return bisect_reset(argc ? argv[0] : NULL);
+}
+
+static int cmd_bisect__terms(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (argc > 1)
+ return error(_("'%s' requires 0 or 1 argument"),
+ "git bisect terms");
+ res = bisect_terms(&terms, argc == 1 ? argv[0] : NULL);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__start(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ set_terms(&terms, "bad", "good");
+ res = bisect_start(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__next(int argc, const char **argv UNUSED, const char *prefix)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (argc)
+ return error(_("'%s' requires 0 arguments"),
+ "git bisect next");
+ get_terms(&terms);
+ res = bisect_next(&terms, prefix);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__log(int argc UNUSED, const char **argv UNUSED, const char *prefix UNUSED)
+{
+ return bisect_log();
+}
+
+static int cmd_bisect__replay(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (argc != 1)
+ return error(_("no logfile given"));
+ set_terms(&terms, "bad", "good");
+ res = bisect_replay(&terms, argv[0]);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__skip(int argc, const char **argv, const char *prefix UNUSED)
{
- enum {
- BISECT_RESET = 1,
- BISECT_NEXT_CHECK,
- BISECT_TERMS,
- BISECT_START,
- BISECT_AUTOSTART,
- BISECT_NEXT,
- BISECT_STATE,
- BISECT_LOG,
- BISECT_REPLAY,
- BISECT_SKIP,
- BISECT_VISUALIZE,
- BISECT_RUN,
- } cmdmode = 0;
- int res = 0, nolog = 0;
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ set_terms(&terms, "bad", "good");
+ get_terms(&terms);
+ res = bisect_skip(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__visualize(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ get_terms(&terms);
+ res = bisect_visualize(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__run(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (!argc)
+ return error(_("'%s' failed: no command provided."), "git bisect run");
+ get_terms(&terms);
+ res = bisect_run(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+int cmd_bisect(int argc, const char **argv, const char *prefix)
+{
+ int res = 0;
+ parse_opt_subcommand_fn *fn = NULL;
struct option options[] = {
- OPT_CMDMODE(0, "bisect-reset", &cmdmode,
- N_("reset the bisection state"), BISECT_RESET),
- OPT_CMDMODE(0, "bisect-next-check", &cmdmode,
- N_("check whether bad or good terms exist"), BISECT_NEXT_CHECK),
- OPT_CMDMODE(0, "bisect-terms", &cmdmode,
- N_("print out the bisect terms"), BISECT_TERMS),
- OPT_CMDMODE(0, "bisect-start", &cmdmode,
- N_("start the bisect session"), BISECT_START),
- OPT_CMDMODE(0, "bisect-next", &cmdmode,
- N_("find the next bisection commit"), BISECT_NEXT),
- OPT_CMDMODE(0, "bisect-state", &cmdmode,
- N_("mark the state of ref (or refs)"), BISECT_STATE),
- OPT_CMDMODE(0, "bisect-log", &cmdmode,
- N_("list the bisection steps so far"), BISECT_LOG),
- OPT_CMDMODE(0, "bisect-replay", &cmdmode,
- N_("replay the bisection process from the given file"), BISECT_REPLAY),
- OPT_CMDMODE(0, "bisect-skip", &cmdmode,
- N_("skip some commits for checkout"), BISECT_SKIP),
- OPT_CMDMODE(0, "bisect-visualize", &cmdmode,
- N_("visualize the bisection"), BISECT_VISUALIZE),
- OPT_CMDMODE(0, "bisect-run", &cmdmode,
- N_("use <cmd>... to automatically bisect"), BISECT_RUN),
- OPT_BOOL(0, "no-log", &nolog,
- N_("no log for BISECT_WRITE")),
+ OPT_SUBCOMMAND("reset", &fn, cmd_bisect__reset),
+ OPT_SUBCOMMAND("terms", &fn, cmd_bisect__terms),
+ OPT_SUBCOMMAND("start", &fn, cmd_bisect__start),
+ OPT_SUBCOMMAND("next", &fn, cmd_bisect__next),
+ OPT_SUBCOMMAND("log", &fn, cmd_bisect__log),
+ OPT_SUBCOMMAND("replay", &fn, cmd_bisect__replay),
+ OPT_SUBCOMMAND("skip", &fn, cmd_bisect__skip),
+ OPT_SUBCOMMAND("visualize", &fn, cmd_bisect__visualize),
+ OPT_SUBCOMMAND("view", &fn, cmd_bisect__visualize),
+ OPT_SUBCOMMAND("run", &fn, cmd_bisect__run),
OPT_END()
};
- struct bisect_terms terms = { .term_good = NULL, .term_bad = NULL };
+ argc = parse_options(argc, argv, prefix, options, git_bisect_usage,
+ PARSE_OPT_SUBCOMMAND_OPTIONAL);
- argc = parse_options(argc, argv, prefix, options,
- git_bisect_helper_usage,
- PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_UNKNOWN_OPT);
+ if (!fn) {
+ struct bisect_terms terms = { 0 };
- if (!cmdmode)
- usage_with_options(git_bisect_helper_usage, options);
+ if (!argc)
+ usage_msg_opt(_("need a command"), git_bisect_usage, options);
- switch (cmdmode) {
- case BISECT_RESET:
- if (argc > 1)
- return error(_("--bisect-reset requires either no argument or a commit"));
- res = bisect_reset(argc ? argv[0] : NULL);
- break;
- case BISECT_TERMS:
- if (argc > 1)
- return error(_("--bisect-terms requires 0 or 1 argument"));
- res = bisect_terms(&terms, argc == 1 ? argv[0] : NULL);
- break;
- case BISECT_START:
- set_terms(&terms, "bad", "good");
- res = bisect_start(&terms, argv, argc);
- break;
- case BISECT_NEXT:
- if (argc)
- return error(_("--bisect-next requires 0 arguments"));
- get_terms(&terms);
- res = bisect_next(&terms, prefix);
- break;
- case BISECT_STATE:
set_terms(&terms, "bad", "good");
get_terms(&terms);
+ if (check_and_set_terms(&terms, argv[0]))
+ usage_msg_optf(_("unknown command: '%s'"), git_bisect_usage,
+ options, argv[0]);
res = bisect_state(&terms, argv, argc);
- break;
- case BISECT_LOG:
- if (argc)
- return error(_("--bisect-log requires 0 arguments"));
- res = bisect_log();
- break;
- case BISECT_REPLAY:
- if (argc != 1)
- return error(_("no logfile given"));
- set_terms(&terms, "bad", "good");
- res = bisect_replay(&terms, argv[0]);
- break;
- case BISECT_SKIP:
- set_terms(&terms, "bad", "good");
- get_terms(&terms);
- res = bisect_skip(&terms, argv, argc);
- break;
- case BISECT_VISUALIZE:
- get_terms(&terms);
- res = bisect_visualize(&terms, argv, argc);
- break;
- case BISECT_RUN:
- if (!argc)
- return error(_("bisect run failed: no command provided."));
- get_terms(&terms);
- res = bisect_run(&terms, argv, argc);
- break;
- default:
- BUG("unknown subcommand %d", cmdmode);
+ free_terms(&terms);
+ } else {
+ argc--;
+ argv++;
+ res = fn(argc, argv, prefix);
}
- free_terms(&terms);
/*
* Handle early success
diff --git a/builtin/blame.c b/builtin/blame.c
index a9fe8cf7a6..71f925e456 100644
--- a/builtin/blame.c
+++ b/builtin/blame.c
@@ -30,6 +30,7 @@
#include "tag.h"
static char blame_usage[] = N_("git blame [<options>] [<rev-opts>] [<rev>] [--] <file>");
+static char annotate_usage[] = N_("git annotate [<options>] [<rev-opts>] [<rev>] [--] <file>");
static const char *blame_opt_usage[] = {
blame_usage,
@@ -38,6 +39,13 @@ static const char *blame_opt_usage[] = {
NULL
};
+static const char *annotate_opt_usage[] = {
+ annotate_usage,
+ "",
+ N_("<rev-opts> are documented in git-rev-list(1)"),
+ NULL
+};
+
static int longest_file;
static int longest_author;
static int max_orig_digits;
@@ -899,6 +907,8 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
long anchor;
const int hexsz = the_hash_algo->hexsz;
long num_lines = 0;
+ const char *str_usage = cmd_is_annotate ? annotate_usage : blame_usage;
+ const char **opt_usage = cmd_is_annotate ? annotate_opt_usage : blame_opt_usage;
setup_default_color_by_age();
git_config(git_blame_config, &output_option);
@@ -914,7 +924,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
parse_options_start(&ctx, argc, argv, prefix, options,
PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0);
for (;;) {
- switch (parse_options_step(&ctx, options, blame_opt_usage)) {
+ switch (parse_options_step(&ctx, options, opt_usage)) {
case PARSE_OPT_NON_OPTION:
case PARSE_OPT_UNKNOWN:
break;
@@ -934,7 +944,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
ctx.argv[0] = "--children";
reverse = 1;
}
- parse_revision_opt(&revs, &ctx, options, blame_opt_usage);
+ parse_revision_opt(&revs, &ctx, options, opt_usage);
}
parse_done:
revision_opts_finish(&revs);
@@ -1040,7 +1050,7 @@ parse_done:
switch (argc - dashdash_pos - 1) {
case 2: /* (1b) */
if (argc != 4)
- usage_with_options(blame_opt_usage, options);
+ usage_with_options(opt_usage, options);
/* reorder for the new way: <rev> -- <path> */
argv[1] = argv[3];
argv[3] = argv[2];
@@ -1051,11 +1061,11 @@ parse_done:
argv[argc] = NULL;
break;
default:
- usage_with_options(blame_opt_usage, options);
+ usage_with_options(opt_usage, options);
}
} else {
if (argc < 2)
- usage_with_options(blame_opt_usage, options);
+ usage_with_options(opt_usage, options);
if (argc == 3 && is_a_rev(argv[argc - 1])) { /* (2b) */
path = add_prefix(prefix, argv[1]);
argv[1] = argv[2];
@@ -1113,7 +1123,7 @@ parse_done:
nth_line_cb, &sb, lno, anchor,
&bottom, &top, sb.path,
the_repository->index))
- usage(blame_usage);
+ usage(str_usage);
if ((!lno && (top || bottom)) || lno < bottom)
die(Q_("file %s has only %lu line",
"file %s has only %lu lines",
diff --git a/builtin/branch.c b/builtin/branch.c
index 55cd9a6e99..0879cc655e 100644
--- a/builtin/branch.c
+++ b/builtin/branch.c
@@ -40,7 +40,6 @@ static const char * const builtin_branch_usage[] = {
static const char *head;
static struct object_id head_oid;
static int recurse_submodules = 0;
-static int submodule_propagate_branches = 0;
static int branch_use_color = -1;
static char branch_colors[][COLOR_MAXLEN] = {
@@ -106,10 +105,6 @@ static int git_branch_config(const char *var, const char *value, void *cb)
recurse_submodules = git_config_bool(var, value);
return 0;
}
- if (!strcasecmp(var, "submodule.propagateBranches")) {
- submodule_propagate_branches = git_config_bool(var, value);
- return 0;
- }
return git_color_default_config(var, value, cb);
}
@@ -150,7 +145,7 @@ static int branch_merged(int kind, const char *name,
if (!reference_rev)
reference_rev = head_rev;
- merged = in_merge_bases(rev, reference_rev);
+ merged = reference_rev ? in_merge_bases(rev, reference_rev) : 0;
/*
* After the safety valve is fully redefined to "check with
@@ -160,7 +155,7 @@ static int branch_merged(int kind, const char *name,
* a gentle reminder is in order.
*/
if ((head_rev != reference_rev) &&
- in_merge_bases(rev, head_rev) != merged) {
+ (head_rev ? in_merge_bases(rev, head_rev) : 0) != merged) {
if (merged)
warning(_("deleting branch '%s' that has been merged to\n"
" '%s', but not yet merged to HEAD."),
@@ -235,11 +230,8 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
}
branch_name_pos = strcspn(fmt, "%");
- if (!force) {
+ if (!force)
head_rev = lookup_commit_reference(the_repository, &head_oid);
- if (!head_rev)
- die(_("Couldn't look up commit object for HEAD"));
- }
for (i = 0; i < argc; i++, strbuf_reset(&bname)) {
char *target = NULL;
@@ -520,13 +512,6 @@ static void copy_or_rename_branch(const char *oldname, const char *newname, int
const char *interpreted_newname = NULL;
int recovery = 0;
- if (!oldname) {
- if (copy)
- die(_("cannot copy the current branch while not on any."));
- else
- die(_("cannot rename the current branch while not on any."));
- }
-
if (strbuf_check_branch_ref(&oldref, oldname)) {
/*
* Bad name --- this could be an attempt to rename a
@@ -538,6 +523,13 @@ static void copy_or_rename_branch(const char *oldname, const char *newname, int
die(_("Invalid branch name: '%s'"), oldname);
}
+ if ((copy || strcmp(head, oldname)) && !ref_exists(oldref.buf)) {
+ if (copy && !strcmp(head, oldname))
+ die(_("No commit on branch '%s' yet."), oldname);
+ else
+ die(_("No branch named '%s'."), oldname);
+ }
+
/*
* A command like "git branch -M currentbranch currentbranch" cannot
* cause the worktree to become inconsistent with HEAD, so allow it.
@@ -599,10 +591,11 @@ static GIT_PATH_FUNC(edit_description, "EDIT_DESCRIPTION")
static int edit_branch_description(const char *branch_name)
{
+ int exists;
struct strbuf buf = STRBUF_INIT;
struct strbuf name = STRBUF_INIT;
- read_branch_desc(&buf, branch_name);
+ exists = !read_branch_desc(&buf, branch_name);
if (!buf.len || buf.buf[buf.len-1] != '\n')
strbuf_addch(&buf, '\n');
strbuf_commented_addf(&buf,
@@ -619,7 +612,8 @@ static int edit_branch_description(const char *branch_name)
strbuf_stripspace(&buf, 1);
strbuf_addf(&name, "branch.%s.description", branch_name);
- git_config_set(name.buf, buf.len ? buf.buf : NULL);
+ if (buf.len || exists)
+ git_config_set(name.buf, buf.len ? buf.buf : NULL);
strbuf_release(&name);
strbuf_release(&buf);
@@ -714,7 +708,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options, builtin_branch_usage,
0);
-
+ prepare_repo_settings(the_repository);
if (!delete && !rename && !copy && !edit_description && !new_upstream &&
!show_current && !unset_upstream && argc == 0)
list = 1;
@@ -730,7 +724,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
usage_with_options(builtin_branch_usage, options);
if (recurse_submodules_explicit) {
- if (!submodule_propagate_branches)
+ if (!the_repository->settings.submodule_propagate_branches)
die(_("branch with --recurse-submodules can only be used if submodule.propagateBranches is enabled"));
if (noncreate_actions)
die(_("--recurse-submodules can only be used to create branches"));
@@ -738,7 +732,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
recurse_submodules =
(recurse_submodules || recurse_submodules_explicit) &&
- submodule_propagate_branches;
+ the_repository->settings.submodule_propagate_branches;
if (filter.abbrev == -1)
filter.abbrev = DEFAULT_ABBREV;
@@ -791,53 +785,56 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
} else if (edit_description) {
const char *branch_name;
struct strbuf branch_ref = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ int ret = 1; /* assume failure */
if (!argc) {
if (filter.detached)
die(_("Cannot give description to detached HEAD"));
branch_name = head;
- } else if (argc == 1)
- branch_name = argv[0];
- else
+ } else if (argc == 1) {
+ strbuf_branchname(&buf, argv[0], INTERPRET_BRANCH_LOCAL);
+ branch_name = buf.buf;
+ } else {
die(_("cannot edit description of more than one branch"));
+ }
strbuf_addf(&branch_ref, "refs/heads/%s", branch_name);
- if (!ref_exists(branch_ref.buf)) {
- strbuf_release(&branch_ref);
-
- if (!argc)
- return error(_("No commit on branch '%s' yet."),
- branch_name);
- else
- return error(_("No branch named '%s'."),
- branch_name);
- }
+ if (!ref_exists(branch_ref.buf))
+ error((!argc || !strcmp(head, branch_name))
+ ? _("No commit on branch '%s' yet.")
+ : _("No branch named '%s'."),
+ branch_name);
+ else if (!edit_branch_description(branch_name))
+ ret = 0; /* happy */
+
strbuf_release(&branch_ref);
+ strbuf_release(&buf);
- if (edit_branch_description(branch_name))
- return 1;
- } else if (copy) {
- if (!argc)
- die(_("branch name required"));
- else if (argc == 1)
- copy_or_rename_branch(head, argv[0], 1, copy > 1);
- else if (argc == 2)
- copy_or_rename_branch(argv[0], argv[1], 1, copy > 1);
- else
- die(_("too many branches for a copy operation"));
- } else if (rename) {
+ return ret;
+ } else if (copy || rename) {
if (!argc)
die(_("branch name required"));
+ else if ((argc == 1) && filter.detached)
+ die(copy? _("cannot copy the current branch while not on any.")
+ : _("cannot rename the current branch while not on any."));
else if (argc == 1)
- copy_or_rename_branch(head, argv[0], 0, rename > 1);
+ copy_or_rename_branch(head, argv[0], copy, copy + rename > 1);
else if (argc == 2)
- copy_or_rename_branch(argv[0], argv[1], 0, rename > 1);
+ copy_or_rename_branch(argv[0], argv[1], copy, copy + rename > 1);
else
- die(_("too many arguments for a rename operation"));
+ die(copy? _("too many branches for a copy operation")
+ : _("too many arguments for a rename operation"));
} else if (new_upstream) {
- struct branch *branch = branch_get(argv[0]);
+ struct branch *branch;
+ struct strbuf buf = STRBUF_INIT;
- if (argc > 1)
+ if (!argc)
+ branch = branch_get(NULL);
+ else if (argc == 1) {
+ strbuf_branchname(&buf, argv[0], INTERPRET_BRANCH_LOCAL);
+ branch = branch_get(buf.buf);
+ } else
die(_("too many arguments to set new upstream"));
if (!branch) {
@@ -848,17 +845,26 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
die(_("no such branch '%s'"), argv[0]);
}
- if (!ref_exists(branch->refname))
+ if (!ref_exists(branch->refname)) {
+ if (!argc || !strcmp(head, branch->name))
+ die(_("No commit on branch '%s' yet."), branch->name);
die(_("branch '%s' does not exist"), branch->name);
+ }
dwim_and_setup_tracking(the_repository, branch->name,
new_upstream, BRANCH_TRACK_OVERRIDE,
quiet);
+ strbuf_release(&buf);
} else if (unset_upstream) {
- struct branch *branch = branch_get(argv[0]);
+ struct branch *branch;
struct strbuf buf = STRBUF_INIT;
- if (argc > 1)
+ if (!argc)
+ branch = branch_get(NULL);
+ else if (argc == 1) {
+ strbuf_branchname(&buf, argv[0], INTERPRET_BRANCH_LOCAL);
+ branch = branch_get(buf.buf);
+ } else
die(_("too many arguments to unset upstream"));
if (!branch) {
@@ -871,6 +877,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
if (!branch_has_merge_config(branch))
die(_("Branch '%s' has no upstream information"), branch->name);
+ strbuf_reset(&buf);
strbuf_addf(&buf, "branch.%s.remote", branch->name);
git_config_set_multivar(buf.buf, NULL, NULL, CONFIG_FLAGS_MULTI_REPLACE);
strbuf_reset(&buf);
diff --git a/builtin/bugreport.c b/builtin/bugreport.c
index 530895be55..5bc254be80 100644
--- a/builtin/bugreport.c
+++ b/builtin/bugreport.c
@@ -60,7 +60,8 @@ static void get_populated_hooks(struct strbuf *hook_info, int nongit)
}
static const char * const bugreport_usage[] = {
- N_("git bugreport [-o|--output-directory <file>] [-s|--suffix <format>] [--diagnose[=<mode>]"),
+ N_("git bugreport [(-o | --output-directory) <path>] [(-s | --suffix) <format>]\n"
+ " [--diagnose[=<mode>]]"),
NULL
};
@@ -105,6 +106,7 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix)
const char *user_relative_path = NULL;
char *prefixed_filename;
size_t output_path_len;
+ int ret;
const struct option bugreport_options[] = {
OPT_CALLBACK_F(0, "diagnose", &diagnose, N_("mode"),
@@ -181,7 +183,9 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix)
user_relative_path);
free(prefixed_filename);
- UNLEAK(buffer);
- UNLEAK(report_path);
- return !!launch_editor(report_path.buf, NULL, NULL);
+ strbuf_release(&buffer);
+
+ ret = !!launch_editor(report_path.buf, NULL, NULL);
+ strbuf_release(&report_path);
+ return ret;
}
diff --git a/builtin/bundle.c b/builtin/bundle.c
index e80efce3a4..c12c09f854 100644
--- a/builtin/bundle.c
+++ b/builtin/bundle.c
@@ -11,32 +11,42 @@
* bundle supporting "fetch", "pull", and "ls-remote".
*/
-static const char * const builtin_bundle_usage[] = {
- N_("git bundle create [<options>] <file> <git-rev-list args>"),
- N_("git bundle verify [<options>] <file>"),
- N_("git bundle list-heads <file> [<refname>...]"),
- N_("git bundle unbundle <file> [<refname>...]"),
- NULL
+#define BUILTIN_BUNDLE_CREATE_USAGE \
+ N_("git bundle create [-q | --quiet | --progress | --all-progress] [--all-progress-implied]\n" \
+ " [--version=<version>] <file> <git-rev-list-args>")
+#define BUILTIN_BUNDLE_VERIFY_USAGE \
+ N_("git bundle verify [-q | --quiet] <file>")
+#define BUILTIN_BUNDLE_LIST_HEADS_USAGE \
+ N_("git bundle list-heads <file> [<refname>...]")
+#define BUILTIN_BUNDLE_UNBUNDLE_USAGE \
+ N_("git bundle unbundle [--progress] <file> [<refname>...]")
+
+static char const * const builtin_bundle_usage[] = {
+ BUILTIN_BUNDLE_CREATE_USAGE,
+ BUILTIN_BUNDLE_VERIFY_USAGE,
+ BUILTIN_BUNDLE_LIST_HEADS_USAGE,
+ BUILTIN_BUNDLE_UNBUNDLE_USAGE,
+ NULL,
};
static const char * const builtin_bundle_create_usage[] = {
- N_("git bundle create [<options>] <file> <git-rev-list args>"),
- NULL
+ BUILTIN_BUNDLE_CREATE_USAGE,
+ NULL
};
static const char * const builtin_bundle_verify_usage[] = {
- N_("git bundle verify [<options>] <file>"),
- NULL
+ BUILTIN_BUNDLE_VERIFY_USAGE,
+ NULL
};
static const char * const builtin_bundle_list_heads_usage[] = {
- N_("git bundle list-heads <file> [<refname>...]"),
- NULL
+ BUILTIN_BUNDLE_LIST_HEADS_USAGE,
+ NULL
};
static const char * const builtin_bundle_unbundle_usage[] = {
- N_("git bundle unbundle <file> [<refname>...]"),
- NULL
+ BUILTIN_BUNDLE_UNBUNDLE_USAGE,
+ NULL
};
static int parse_options_cmd_bundle(int argc,
@@ -119,7 +129,8 @@ static int cmd_bundle_verify(int argc, const char **argv, const char *prefix) {
goto cleanup;
}
close(bundle_fd);
- if (verify_bundle(the_repository, &header, !quiet)) {
+ if (verify_bundle(the_repository, &header,
+ quiet ? VERIFY_BUNDLE_QUIET : VERIFY_BUNDLE_VERBOSE)) {
ret = 1;
goto cleanup;
}
@@ -185,7 +196,7 @@ static int cmd_bundle_unbundle(int argc, const char **argv, const char *prefix)
strvec_pushl(&extra_index_pack_args, "-v", "--progress-title",
_("Unbundling objects"), NULL);
ret = !!unbundle(the_repository, &header, bundle_fd,
- &extra_index_pack_args) ||
+ &extra_index_pack_args, 0) ||
list_bundle_refs(&header, argc, argv);
bundle_header_release(&header);
cleanup:
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 989eee0bb4..fa7bd89169 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -893,7 +893,7 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix)
N_("git cat-file (-t | -s) [--allow-unknown-type] <object>"),
N_("git cat-file (--batch | --batch-check | --batch-command) [--batch-all-objects]\n"
" [--buffer] [--follow-symlinks] [--unordered]\n"
- " [--textconv | --filters]"),
+ " [--textconv | --filters] [-z]"),
N_("git cat-file (--textconv | --filters)\n"
" [<rev>:<path|tree-ish> | --path=<path|tree-ish> <rev>]"),
NULL
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 2a132392fb..659dd5c430 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -1470,6 +1470,8 @@ static void die_if_some_operation_in_progress(void)
"or \"git worktree add\"."));
if (state.bisect_in_progress)
warning(_("you are switching branch while bisecting"));
+
+ wt_status_state_free_buffers(&state);
}
static int checkout_branch(struct checkout_opts *opts,
diff --git a/builtin/clean.c b/builtin/clean.c
index 5466636e66..40ff2c578d 100644
--- a/builtin/clean.c
+++ b/builtin/clean.c
@@ -26,7 +26,7 @@ static struct string_list del_list = STRING_LIST_INIT_DUP;
static unsigned int colopts;
static const char *const builtin_clean_usage[] = {
- N_("git clean [-d] [-f] [-i] [-n] [-q] [-e <pattern>] [-x | -X] [--] <paths>..."),
+ N_("git clean [-d] [-f] [-i] [-n] [-q] [-e <pattern>] [-x | -X] [--] [<pathspec>...]"),
NULL
};
diff --git a/builtin/clone.c b/builtin/clone.c
index 5d7affc29a..894be8eda4 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -78,6 +78,7 @@ static int option_filter_submodules = -1; /* unspecified */
static int config_filter_submodules = -1; /* unspecified */
static struct string_list server_options = STRING_LIST_INIT_NODUP;
static int option_remote_submodules;
+static int option_detach;
static const char *bundle_uri;
static int recurse_submodules_cb(const struct option *opt,
@@ -162,6 +163,8 @@ static struct option builtin_clone_options[] = {
N_("any cloned submodules will use their remote-tracking branch")),
OPT_BOOL(0, "sparse", &option_sparse_checkout,
N_("initialize sparse-checkout file to include only files at root")),
+ OPT_BOOL(0, "detach", &option_detach,
+ N_("detach HEAD and don't create a local branch")),
OPT_STRING(0, "bundle-uri", &bundle_uri,
N_("uri"), N_("a URI for downloading bundles before fetching from origin remote")),
OPT_END()
@@ -613,10 +616,12 @@ static void update_remote_refs(const struct ref *refs,
}
static void update_head(const struct ref *our, const struct ref *remote,
- const char *unborn, const char *msg)
+ const char *unborn, int should_detach,
+ const char *msg)
{
const char *head;
- if (our && skip_prefix(our->name, "refs/heads/", &head)) {
+ if (our && !should_detach &&
+ skip_prefix(our->name, "refs/heads/", &head)) {
/* Local default branch link */
if (create_symref("HEAD", our->name, NULL) < 0)
die(_("unable to update HEAD"));
@@ -653,9 +658,9 @@ static void update_head(const struct ref *our, const struct ref *remote,
static int git_sparse_checkout_init(const char *repo)
{
- struct strvec argv = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
int result = 0;
- strvec_pushl(&argv, "-C", repo, "sparse-checkout", "set", NULL);
+ strvec_pushl(&cmd.args, "-C", repo, "sparse-checkout", "set", NULL);
/*
* We must apply the setting in the current process
@@ -663,12 +668,12 @@ static int git_sparse_checkout_init(const char *repo)
*/
core_apply_sparse_checkout = 1;
- if (run_command_v_opt(argv.v, RUN_GIT_CMD)) {
+ cmd.git_cmd = 1;
+ if (run_command(&cmd)) {
error(_("failed to initialize sparse-checkout"));
result = 1;
}
- strvec_clear(&argv);
return result;
}
@@ -733,37 +738,38 @@ static int checkout(int submodule_progress, int filter_submodules)
oid_to_hex(&oid), "1", NULL);
if (!err && (option_recurse_submodules.nr > 0)) {
- struct strvec args = STRVEC_INIT;
- strvec_pushl(&args, "submodule", "update", "--require-init", "--recursive", NULL);
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ strvec_pushl(&cmd.args, "submodule", "update", "--require-init",
+ "--recursive", NULL);
if (option_shallow_submodules == 1)
- strvec_push(&args, "--depth=1");
+ strvec_push(&cmd.args, "--depth=1");
if (max_jobs != -1)
- strvec_pushf(&args, "--jobs=%d", max_jobs);
+ strvec_pushf(&cmd.args, "--jobs=%d", max_jobs);
if (submodule_progress)
- strvec_push(&args, "--progress");
+ strvec_push(&cmd.args, "--progress");
if (option_verbosity < 0)
- strvec_push(&args, "--quiet");
+ strvec_push(&cmd.args, "--quiet");
if (option_remote_submodules) {
- strvec_push(&args, "--remote");
- strvec_push(&args, "--no-fetch");
+ strvec_push(&cmd.args, "--remote");
+ strvec_push(&cmd.args, "--no-fetch");
}
if (filter_submodules && filter_options.choice)
- strvec_pushf(&args, "--filter=%s",
+ strvec_pushf(&cmd.args, "--filter=%s",
expand_list_objects_filter_spec(&filter_options));
if (option_single_branch >= 0)
- strvec_push(&args, option_single_branch ?
+ strvec_push(&cmd.args, option_single_branch ?
"--single-branch" :
"--no-single-branch");
- err = run_command_v_opt(args.v, RUN_GIT_CMD);
- strvec_clear(&args);
+ cmd.git_cmd = 1;
+ err = run_command(&cmd);
}
return err;
@@ -864,11 +870,15 @@ static void write_refspec_config(const char *src_ref_prefix,
static void dissociate_from_references(void)
{
- static const char* argv[] = { "repack", "-a", "-d", NULL };
char *alternates = git_pathdup("objects/info/alternates");
if (!access(alternates, F_OK)) {
- if (run_command_v_opt(argv, RUN_GIT_CMD|RUN_COMMAND_NO_STDIN))
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ cmd.no_stdin = 1;
+ strvec_pushl(&cmd.args, "repack", "-a", "-d", NULL);
+ if (run_command(&cmd))
die(_("cannot repack to clean up"));
if (unlink(alternates) && errno != ENOENT)
die_errno(_("cannot unlink temporary alternates file"));
@@ -931,9 +941,6 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
option_bare = 1;
if (option_bare) {
- if (option_origin)
- die(_("options '%s' and '%s %s' cannot be used together"),
- "--bare", "--origin", option_origin);
if (real_git_dir)
die(_("options '%s' and '%s' cannot be used together"), "--bare", "--separate-git-dir");
option_no_checkout = 1;
@@ -1360,7 +1367,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
branch_top.buf, reflog_msg.buf, transport,
!is_local);
- update_head(our_head_points_at, remote_head, unborn_head, reflog_msg.buf);
+ update_head(our_head_points_at, remote_head, unborn_head,
+ option_detach, reflog_msg.buf);
/*
* We want to show progress for recursive submodule clones iff
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
index 51557fe786..e8f77f535f 100644
--- a/builtin/commit-graph.c
+++ b/builtin/commit-graph.c
@@ -10,13 +10,13 @@
#include "tag.h"
#define BUILTIN_COMMIT_GRAPH_VERIFY_USAGE \
- N_("git commit-graph verify [--object-dir <objdir>] [--shallow] [--[no-]progress]")
+ N_("git commit-graph verify [--object-dir <dir>] [--shallow] [--[no-]progress]")
#define BUILTIN_COMMIT_GRAPH_WRITE_USAGE \
- N_("git commit-graph write [--object-dir <objdir>] [--append] " \
- "[--split[=<strategy>]] [--reachable|--stdin-packs|--stdin-commits] " \
- "[--changed-paths] [--[no-]max-new-filters <n>] [--[no-]progress] " \
- "<split options>")
+ N_("git commit-graph write [--object-dir <dir>] [--append]\n" \
+ " [--split[=<strategy>]] [--reachable | --stdin-packs | --stdin-commits]\n" \
+ " [--changed-paths] [--[no-]max-new-filters <n>] [--[no-]progress]\n" \
+ " <split options>")
static const char * builtin_commit_graph_verify_usage[] = {
BUILTIN_COMMIT_GRAPH_VERIFY_USAGE,
diff --git a/builtin/commit-tree.c b/builtin/commit-tree.c
index 63ea322933..cc8d584be2 100644
--- a/builtin/commit-tree.c
+++ b/builtin/commit-tree.c
@@ -15,8 +15,9 @@
#include "parse-options.h"
static const char * const commit_tree_usage[] = {
- N_("git commit-tree [(-p <parent>)...] [-S[<keyid>]] [(-m <message>)...] "
- "[(-F <file>)...] <tree>"),
+ N_("git commit-tree <tree> [(-p <parent>)...]"),
+ N_("git commit-tree [(-p <parent>)...] [-S[<keyid>]] [(-m <message>)...]\n"
+ " [(-F <file>)...] <tree>"),
NULL
};
diff --git a/builtin/commit.c b/builtin/commit.c
index fcf9c85947..f88a29167f 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -40,12 +40,19 @@
#include "pretty.h"
static const char * const builtin_commit_usage[] = {
- N_("git commit [<options>] [--] <pathspec>..."),
+ N_("git commit [-a | --interactive | --patch] [-s] [-v] [-u<mode>] [--amend]\n"
+ " [--dry-run] [(-c | -C | --squash) <commit> | --fixup [(amend|reword):]<commit>)]\n"
+ " [-F <file> | -m <msg>] [--reset-author] [--allow-empty]\n"
+ " [--allow-empty-message] [--no-verify] [-e] [--author=<author>]\n"
+ " [--date=<date>] [--cleanup=<mode>] [--[no-]status]\n"
+ " [-i | -o] [--pathspec-from-file=<file> [--pathspec-file-nul]]\n"
+ " [(--trailer <token>[(=|:)<value>])...] [-S[<keyid>]]\n"
+ " [--] [<pathspec>...]"),
NULL
};
static const char * const builtin_status_usage[] = {
- N_("git status [<options>] [--] <pathspec>..."),
+ N_("git status [<options>] [--] [<pathspec>...]"),
NULL
};
@@ -139,7 +146,7 @@ static int opt_pass_trailer(const struct option *opt, const char *arg, int unset
{
BUG_ON_OPT_NEG(unset);
- strvec_pushl(&trailer_args, "--trailer", arg, NULL);
+ strvec_pushl(opt->value, "--trailer", arg, NULL);
return 0;
}
@@ -980,8 +987,11 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
struct object_id oid;
const char *parent = "HEAD";
- if (!active_nr && read_cache() < 0)
- die(_("Cannot read index"));
+ if (!active_nr) {
+ discard_cache();
+ if (read_cache() < 0)
+ die(_("Cannot read index"));
+ }
if (amend)
parent = "HEAD^1";
@@ -1633,7 +1643,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
OPT_STRING(0, "fixup", &fixup_message, N_("[(amend|reword):]commit"), N_("use autosquash formatted message to fixup or amend/reword specified commit")),
OPT_STRING(0, "squash", &squash_message, N_("commit"), N_("use autosquash formatted message to squash specified commit")),
OPT_BOOL(0, "reset-author", &renew_authorship, N_("the commit is authored by me now (used with -C/-c/--amend)")),
- OPT_CALLBACK_F(0, "trailer", NULL, N_("trailer"), N_("add custom trailer(s)"), PARSE_OPT_NONEG, opt_pass_trailer),
+ OPT_CALLBACK_F(0, "trailer", &trailer_args, N_("trailer"), N_("add custom trailer(s)"), PARSE_OPT_NONEG, opt_pass_trailer),
OPT_BOOL('s', "signoff", &signoff, N_("add a Signed-off-by trailer")),
OPT_FILENAME('t', "template", &template_file, N_("use specified template file")),
OPT_BOOL('e', "edit", &edit_flag, N_("force edit of commit")),
@@ -1864,8 +1874,8 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
apply_autostash(git_path_merge_autostash(the_repository));
cleanup:
- UNLEAK(author_ident);
- UNLEAK(err);
- UNLEAK(sb);
+ strbuf_release(&author_ident);
+ strbuf_release(&err);
+ strbuf_release(&sb);
return ret;
}
diff --git a/builtin/config.c b/builtin/config.c
index 753e5fac29..060cf9f3e0 100644
--- a/builtin/config.c
+++ b/builtin/config.c
@@ -639,8 +639,9 @@ static char *default_user_config(void)
int cmd_config(int argc, const char **argv, const char *prefix)
{
int nongit = !startup_info->have_repository;
- char *value;
+ char *value = NULL;
int flags = 0;
+ int ret = 0;
given_config_source.file = xstrdup_or_null(getenv(CONFIG_ENVIRONMENT));
@@ -856,44 +857,38 @@ int cmd_config(int argc, const char **argv, const char *prefix)
free(config_file);
}
else if (actions == ACTION_SET) {
- int ret;
check_write();
check_argc(argc, 2, 2);
value = normalize_value(argv[0], argv[1]);
- UNLEAK(value);
ret = git_config_set_in_file_gently(given_config_source.file, argv[0], value);
if (ret == CONFIG_NOTHING_SET)
error(_("cannot overwrite multiple values with a single value\n"
" Use a regexp, --add or --replace-all to change %s."), argv[0]);
- return ret;
}
else if (actions == ACTION_SET_ALL) {
check_write();
check_argc(argc, 2, 3);
value = normalize_value(argv[0], argv[1]);
- UNLEAK(value);
- return git_config_set_multivar_in_file_gently(given_config_source.file,
- argv[0], value, argv[2],
- flags);
+ ret = git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value, argv[2],
+ flags);
}
else if (actions == ACTION_ADD) {
check_write();
check_argc(argc, 2, 2);
value = normalize_value(argv[0], argv[1]);
- UNLEAK(value);
- return git_config_set_multivar_in_file_gently(given_config_source.file,
- argv[0], value,
- CONFIG_REGEX_NONE,
- flags);
+ ret = git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value,
+ CONFIG_REGEX_NONE,
+ flags);
}
else if (actions == ACTION_REPLACE_ALL) {
check_write();
check_argc(argc, 2, 3);
value = normalize_value(argv[0], argv[1]);
- UNLEAK(value);
- return git_config_set_multivar_in_file_gently(given_config_source.file,
- argv[0], value, argv[2],
- flags | CONFIG_FLAGS_MULTI_REPLACE);
+ ret = git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value, argv[2],
+ flags | CONFIG_FLAGS_MULTI_REPLACE);
}
else if (actions == ACTION_GET) {
check_argc(argc, 1, 2);
@@ -934,26 +929,28 @@ int cmd_config(int argc, const char **argv, const char *prefix)
flags | CONFIG_FLAGS_MULTI_REPLACE);
}
else if (actions == ACTION_RENAME_SECTION) {
- int ret;
check_write();
check_argc(argc, 2, 2);
ret = git_config_rename_section_in_file(given_config_source.file,
argv[0], argv[1]);
if (ret < 0)
return ret;
- if (ret == 0)
+ else if (!ret)
die(_("no such section: %s"), argv[0]);
+ else
+ ret = 0;
}
else if (actions == ACTION_REMOVE_SECTION) {
- int ret;
check_write();
check_argc(argc, 1, 1);
ret = git_config_rename_section_in_file(given_config_source.file,
argv[0], NULL);
if (ret < 0)
return ret;
- if (ret == 0)
+ else if (!ret)
die(_("no such section: %s"), argv[0]);
+ else
+ ret = 0;
}
else if (actions == ACTION_GET_COLOR) {
check_argc(argc, 1, 2);
@@ -966,5 +963,6 @@ int cmd_config(int argc, const char **argv, const char *prefix)
return get_colorbool(argv[0], argc == 2);
}
- return 0;
+ free(value);
+ return ret;
}
diff --git a/builtin/credential-cache--daemon.c b/builtin/credential-cache--daemon.c
index 4c6c89ab0d..f3c89831d4 100644
--- a/builtin/credential-cache--daemon.c
+++ b/builtin/credential-cache--daemon.c
@@ -267,7 +267,7 @@ int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix)
const char *socket_path;
int ignore_sighup = 0;
static const char *usage[] = {
- "git-credential-cache--daemon [opts] <socket_path>",
+ "git credential-cache--daemon [--debug] <socket-path>",
NULL
};
int debug = 0;
@@ -305,7 +305,7 @@ int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix)
int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix)
{
const char * const usage[] = {
- "git credential-cache--daemon [options] <action>",
+ "git credential-cache--daemon [--debug] <socket-path>",
"",
"credential-cache--daemon is disabled in this build of Git",
NULL
diff --git a/builtin/describe.c b/builtin/describe.c
index e17c4b4c69..23e3f05fb1 100644
--- a/builtin/describe.c
+++ b/builtin/describe.c
@@ -23,8 +23,9 @@
define_commit_slab(commit_names, struct commit_name *);
static const char * const describe_usage[] = {
- N_("git describe [<options>] [<commit-ish>...]"),
- N_("git describe [<options>] --dirty"),
+ N_("git describe [--all] [--tags] [--contains] [--abbrev=<n>] [<commit-ish>...]"),
+ N_("git describe [--all] [--tags] [--contains] [--abbrev=<n>] --dirty[=<mark>]"),
+ N_("git describe <blob>"),
NULL
};
diff --git a/builtin/diagnose.c b/builtin/diagnose.c
index 576e0e8e38..d52015c67a 100644
--- a/builtin/diagnose.c
+++ b/builtin/diagnose.c
@@ -3,7 +3,8 @@
#include "diagnose.h"
static const char * const diagnose_usage[] = {
- N_("git diagnose [-o|--output-directory <path>] [-s|--suffix <format>] [--mode=<mode>]"),
+ N_("git diagnose [(-o | --output-directory) <path>] [(-s | --suffix) <format>]\n"
+ " [--mode=<mode>]"),
NULL
};
diff --git a/builtin/diff-files.c b/builtin/diff-files.c
index 92cf6e1e92..096ea2fedb 100644
--- a/builtin/diff-files.c
+++ b/builtin/diff-files.c
@@ -15,6 +15,7 @@
static const char diff_files_usage[] =
"git diff-files [-q] [-0 | -1 | -2 | -3 | -c | --cc] [<common-diff-options>] [<path>...]"
+"\n"
COMMON_DIFF_OPTIONS_HELP;
int cmd_diff_files(int argc, const char **argv, const char *prefix)
diff --git a/builtin/diff-index.c b/builtin/diff-index.c
index 7d158af6b6..aea139b9d8 100644
--- a/builtin/diff-index.c
+++ b/builtin/diff-index.c
@@ -9,8 +9,9 @@
#include "submodule.h"
static const char diff_cache_usage[] =
-"git diff-index [-m] [--cached] "
+"git diff-index [-m] [--cached] [--merge-base] "
"[<common-diff-options>] <tree-ish> [<path>...]"
+"\n"
COMMON_DIFF_OPTIONS_HELP;
int cmd_diff_index(int argc, const char **argv, const char *prefix)
diff --git a/builtin/diff-tree.c b/builtin/diff-tree.c
index 116097a404..85e8c81e59 100644
--- a/builtin/diff-tree.c
+++ b/builtin/diff-tree.c
@@ -83,8 +83,10 @@ static int diff_tree_stdin(char *line)
}
static const char diff_tree_usage[] =
-"git diff-tree [--stdin] [-m] [-c | --cc] [-s] [-v] [--pretty] [-t] [-r] [--root] "
-"[<common-diff-options>] <tree-ish> [<tree-ish>] [<path>...]\n"
+"git diff-tree [--stdin] [-m] [-s] [-v] [--no-commit-id] [--pretty]\n"
+" [-t] [-r] [-c | --cc] [--combined-all-paths] [--root] [--merge-base]\n"
+" [<common-diff-options>] <tree-ish> [<tree-ish>] [<path>...]\n"
+"\n"
" -r diff recursively\n"
" -c show combined diff for merge commits\n"
" --cc show combined diff for merge commits removing uninteresting hunks\n"
diff --git a/builtin/diff.c b/builtin/diff.c
index 54bb3de964..cb63f157dd 100644
--- a/builtin/diff.c
+++ b/builtin/diff.c
@@ -30,7 +30,8 @@ static const char builtin_diff_usage[] =
" or: git diff [<options>] [--merge-base] <commit> [<commit>...] <commit> [--] [<path>...]\n"
" or: git diff [<options>] <commit>...<commit> [--] [<path>...]\n"
" or: git diff [<options>] <blob> <blob>\n"
-" or: git diff [<options>] --no-index [--] <path> <path>\n"
+" or: git diff [<options>] --no-index [--] <path> <path>"
+"\n"
COMMON_DIFF_OPTIONS_HELP;
static const char *blob_path(struct object_array_entry *entry)
@@ -209,7 +210,7 @@ static int builtin_diff_tree(struct rev_info *revs,
static int builtin_diff_combined(struct rev_info *revs,
int argc, const char **argv,
struct object_array_entry *ent,
- int ents)
+ int ents, int first_non_parent)
{
struct oid_array parents = OID_ARRAY_INIT;
int i;
@@ -217,11 +218,18 @@ static int builtin_diff_combined(struct rev_info *revs,
if (argc > 1)
usage(builtin_diff_usage);
+ if (first_non_parent < 0)
+ die(_("no merge given, only parents."));
+ if (first_non_parent >= ents)
+ BUG("first_non_parent out of range: %d", first_non_parent);
+
diff_merges_set_dense_combined_if_unset(revs);
- for (i = 1; i < ents; i++)
- oid_array_append(&parents, &ent[i].item->oid);
- diff_tree_combined(&ent[0].item->oid, &parents, revs);
+ for (i = 0; i < ents; i++) {
+ if (i != first_non_parent)
+ oid_array_append(&parents, &ent[i].item->oid);
+ }
+ diff_tree_combined(&ent[first_non_parent].item->oid, &parents, revs);
oid_array_clear(&parents);
return 0;
}
@@ -385,6 +393,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
int i;
struct rev_info rev;
struct object_array ent = OBJECT_ARRAY_INIT;
+ int first_non_parent = -1;
int blobs = 0, paths = 0;
struct object_array_entry *blob[2];
int nongit = 0, no_index = 0;
@@ -543,6 +552,10 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
continue;
obj->flags |= flags;
add_object_array(obj, name, &ent);
+ if (first_non_parent < 0 &&
+ (i >= rev.cmdline.nr || /* HEAD by hand. */
+ rev.cmdline.rev[i].whence != REV_CMD_PARENTS_ONLY))
+ first_non_parent = ent.nr - 1;
} else if (obj->type == OBJ_BLOB) {
if (2 <= blobs)
die(_("more than two blobs given: '%s'"), name);
@@ -590,12 +603,13 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
&ent.objects[0], &ent.objects[1]);
} else
result = builtin_diff_combined(&rev, argc, argv,
- ent.objects, ent.nr);
+ ent.objects, ent.nr,
+ first_non_parent);
result = diff_result_code(&rev.diffopt, result);
if (1 < rev.diffopt.skip_stat_unmatch)
refresh_index_quietly();
release_revisions(&rev);
- UNLEAK(ent);
+ object_array_clear(&ent);
UNLEAK(blob);
return result;
}
diff --git a/builtin/difftool.c b/builtin/difftool.c
index 4b10ad1a36..d7f08c8a7f 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -44,8 +44,11 @@ static int difftool_config(const char *var, const char *value, void *cb)
static int print_tool_help(void)
{
- const char *argv[] = { "mergetool", "--tool-help=diff", NULL };
- return run_command_v_opt(argv, RUN_GIT_CMD);
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "mergetool", "--tool-help=diff", NULL);
+ return run_command(&cmd);
}
static int parse_index_info(char *p, int *mode1, int *mode2,
@@ -360,8 +363,8 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
struct pair_entry *entry;
struct index_state wtindex;
struct checkout lstate, rstate;
- int flags = RUN_GIT_CMD, err = 0;
- const char *helper_argv[] = { "difftool--helper", NULL, NULL, NULL };
+ int err = 0;
+ struct child_process cmd = CHILD_PROCESS_INIT;
struct hashmap wt_modified, tmp_modified;
int indices_loaded = 0;
@@ -563,16 +566,17 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
}
strbuf_setlen(&ldir, ldir_len);
- helper_argv[1] = ldir.buf;
strbuf_setlen(&rdir, rdir_len);
- helper_argv[2] = rdir.buf;
if (extcmd) {
- helper_argv[0] = extcmd;
- flags = 0;
- } else
+ strvec_push(&cmd.args, extcmd);
+ } else {
+ strvec_push(&cmd.args, "difftool--helper");
+ cmd.git_cmd = 1;
setenv("GIT_DIFFTOOL_DIRDIFF", "true", 1);
- ret = run_command_v_opt(helper_argv, flags);
+ }
+ strvec_pushl(&cmd.args, ldir.buf, rdir.buf, NULL);
+ ret = run_command(&cmd);
/* TODO: audit for interaction with sparse-index. */
ensure_full_index(&wtindex);
diff --git a/builtin/fetch.c b/builtin/fetch.c
index a0fca93bb6..7378cafeec 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -122,6 +122,8 @@ static int git_fetch_config(const char *k, const char *v, void *cb)
fetch_parallel_config = git_config_int(k, v);
if (fetch_parallel_config < 0)
die(_("fetch.parallel cannot be negative"));
+ if (!fetch_parallel_config)
+ fetch_parallel_config = online_cpus();
return 0;
}
@@ -1951,28 +1953,36 @@ static int fetch_multiple(struct string_list *list, int max_children)
if (max_children != 1 && list->nr != 1) {
struct parallel_fetch_state state = { argv.v, list, 0, 0 };
+ const struct run_process_parallel_opts opts = {
+ .tr2_category = "fetch",
+ .tr2_label = "parallel/fetch",
+
+ .processes = max_children,
+
+ .get_next_task = &fetch_next_remote,
+ .start_failure = &fetch_failed_to_start,
+ .task_finished = &fetch_finished,
+ .data = &state,
+ };
strvec_push(&argv, "--end-of-options");
- result = run_processes_parallel_tr2(max_children,
- &fetch_next_remote,
- &fetch_failed_to_start,
- &fetch_finished,
- &state,
- "fetch", "parallel/fetch");
-
- if (!result)
- result = state.result;
+
+ run_processes_parallel(&opts);
+ result = state.result;
} else
for (i = 0; i < list->nr; i++) {
const char *name = list->items[i].string;
- strvec_push(&argv, name);
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_pushv(&cmd.args, argv.v);
+ strvec_push(&cmd.args, name);
if (verbosity >= 0)
printf(_("Fetching %s\n"), name);
- if (run_command_v_opt(argv.v, RUN_GIT_CMD)) {
+ cmd.git_cmd = 1;
+ if (run_command(&cmd)) {
error(_("could not fetch %s"), name);
result = 1;
}
- strvec_pop(&argv);
}
strvec_clear(&argv);
diff --git a/builtin/for-each-repo.c b/builtin/for-each-repo.c
index fd86e5a861..6aeac37148 100644
--- a/builtin/for-each-repo.c
+++ b/builtin/for-each-repo.c
@@ -6,7 +6,7 @@
#include "string-list.h"
static const char * const for_each_repo_usage[] = {
- N_("git for-each-repo --config=<config> <command-args>"),
+ N_("git for-each-repo --config=<config> [--] <arguments>"),
NULL
};
@@ -14,13 +14,16 @@ static int run_command_on_repo(const char *path, int argc, const char ** argv)
{
int i;
struct child_process child = CHILD_PROCESS_INIT;
+ char *abspath = interpolate_path(path, 0);
child.git_cmd = 1;
- strvec_pushl(&child.args, "-C", path, NULL);
+ strvec_pushl(&child.args, "-C", abspath, NULL);
for (i = 0; i < argc; i++)
strvec_push(&child.args, argv[i]);
+ free(abspath);
+
return run_command(&child);
}
diff --git a/builtin/fsck.c b/builtin/fsck.c
index f7916f06ed..7436e1a68e 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -228,6 +228,8 @@ static void mark_unreachable_referents(const struct object_id *oid)
options.walk = mark_used;
fsck_walk(obj, NULL, &options);
+ if (obj->type == OBJ_TREE)
+ free_tree_buffer((struct tree *)obj);
}
static int mark_loose_unreachable_referents(const struct object_id *oid,
@@ -437,9 +439,6 @@ static int fsck_obj(struct object *obj, void *buffer, unsigned long size)
out:
if (obj->type == OBJ_TREE)
free_tree_buffer((struct tree *)obj);
- if (obj->type == OBJ_COMMIT)
- free_commit_buffer(the_repository->parsed_objects,
- (struct commit *)obj);
return err;
}
@@ -821,7 +820,10 @@ static int mark_packed_for_connectivity(const struct object_id *oid,
}
static char const * const fsck_usage[] = {
- N_("git fsck [<options>] [<object>...]"),
+ N_("git fsck [--tags] [--root] [--unreachable] [--cache] [--no-reflogs]\n"
+ " [--[no-]full] [--strict] [--verbose] [--lost-found]\n"
+ " [--[no-]dangling] [--[no-]progress] [--connectivity-only]\n"
+ " [--[no-]name-objects] [<object>...]"),
NULL
};
@@ -853,6 +855,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
errors_found = 0;
read_replace_refs = 0;
+ save_commit_buffer = 0;
argc = parse_options(argc, argv, prefix, fsck_opts, fsck_usage, 0);
diff --git a/builtin/fsmonitor--daemon.c b/builtin/fsmonitor--daemon.c
index 2c109cf8b3..6f30a4f93a 100644
--- a/builtin/fsmonitor--daemon.c
+++ b/builtin/fsmonitor--daemon.c
@@ -3,6 +3,7 @@
#include "parse-options.h"
#include "fsmonitor.h"
#include "fsmonitor-ipc.h"
+#include "fsmonitor-path-utils.h"
#include "compat/fsmonitor/fsm-health.h"
#include "compat/fsmonitor/fsm-listen.h"
#include "fsmonitor--daemon.h"
@@ -13,8 +14,8 @@
static const char * const builtin_fsmonitor__daemon_usage[] = {
N_("git fsmonitor--daemon start [<options>]"),
N_("git fsmonitor--daemon run [<options>]"),
- N_("git fsmonitor--daemon stop"),
- N_("git fsmonitor--daemon status"),
+ "git fsmonitor--daemon stop",
+ "git fsmonitor--daemon status",
NULL
};
@@ -1282,6 +1283,11 @@ static int fsmonitor_run_daemon(void)
strbuf_addstr(&state.path_worktree_watch, absolute_path(get_git_work_tree()));
state.nr_paths_watching = 1;
+ strbuf_init(&state.alias.alias, 0);
+ strbuf_init(&state.alias.points_to, 0);
+ if ((err = fsmonitor__get_alias(state.path_worktree_watch.buf, &state.alias)))
+ goto done;
+
/*
* We create and delete cookie files somewhere inside the .git
* directory to help us keep sync with the file system. If
@@ -1343,7 +1349,8 @@ static int fsmonitor_run_daemon(void)
* directory.)
*/
strbuf_init(&state.path_ipc, 0);
- strbuf_addstr(&state.path_ipc, absolute_path(fsmonitor_ipc__get_path()));
+ strbuf_addstr(&state.path_ipc,
+ absolute_path(fsmonitor_ipc__get_path(the_repository)));
/*
* Confirm that we can create platform-specific resources for the
@@ -1390,6 +1397,8 @@ done:
strbuf_release(&state.path_gitdir_watch);
strbuf_release(&state.path_cookie_prefix);
strbuf_release(&state.path_ipc);
+ strbuf_release(&state.alias.alias);
+ strbuf_release(&state.alias.points_to);
return err;
}
diff --git a/builtin/gc.c b/builtin/gc.c
index 2753bd15a5..02455fdcd7 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -42,7 +42,7 @@ static const char * const builtin_gc_usage[] = {
static int pack_refs = 1;
static int prune_reflogs = 1;
-static int cruft_packs = 0;
+static int cruft_packs = -1;
static int aggressive_depth = 50;
static int aggressive_window = 250;
static int gc_auto_threshold = 6700;
@@ -167,16 +167,11 @@ static void gc_config(void)
struct maintenance_run_opts;
static int maintenance_task_pack_refs(MAYBE_UNUSED struct maintenance_run_opts *opts)
{
- struct strvec pack_refs_cmd = STRVEC_INIT;
- int ret;
+ struct child_process cmd = CHILD_PROCESS_INIT;
- strvec_pushl(&pack_refs_cmd, "pack-refs", "--all", "--prune", NULL);
-
- ret = run_command_v_opt(pack_refs_cmd.v, RUN_GIT_CMD);
-
- strvec_clear(&pack_refs_cmd);
-
- return ret;
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "pack-refs", "--all", "--prune", NULL);
+ return run_command(&cmd);
}
static int too_many_loose_objects(void)
@@ -329,7 +324,7 @@ static uint64_t estimate_repack_memory(struct packed_git *pack)
return os_cache + heap;
}
-static int keep_one_pack(struct string_list_item *item, void *data)
+static int keep_one_pack(struct string_list_item *item, void *data UNUSED)
{
strvec_pushf(&repack, "--keep-pack=%s", basename(item->string));
return 0;
@@ -542,8 +537,14 @@ static void gc_before_repack(void)
if (pack_refs && maintenance_task_pack_refs(NULL))
die(FAILED_RUN, "pack-refs");
- if (prune_reflogs && run_command_v_opt(reflog.v, RUN_GIT_CMD))
- die(FAILED_RUN, reflog.v[0]);
+ if (prune_reflogs) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ strvec_pushv(&cmd.args, reflog.v);
+ if (run_command(&cmd))
+ die(FAILED_RUN, reflog.v[0]);
+ }
}
int cmd_gc(int argc, const char **argv, const char *prefix)
@@ -557,6 +558,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
int daemonized = 0;
int keep_largest_pack = -1;
timestamp_t dummy;
+ struct child_process rerere_cmd = CHILD_PROCESS_INIT;
struct option builtin_gc_options[] = {
OPT__QUIET(&quiet, N_("suppress progress reporting")),
@@ -600,6 +602,10 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
if (prune_expire && parse_expiry_date(prune_expire, &dummy))
die(_("failed to parse prune expiry value %s"), prune_expire);
+ prepare_repo_settings(the_repository);
+ if (cruft_packs < 0)
+ cruft_packs = the_repository->settings.gc_cruft_packs;
+
if (aggressive) {
strvec_push(&repack, "-f");
if (aggressive_depth > 0)
@@ -678,11 +684,17 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
gc_before_repack();
if (!repository_format_precious_objects) {
- if (run_command_v_opt(repack.v,
- RUN_GIT_CMD | RUN_CLOSE_OBJECT_STORE))
+ struct child_process repack_cmd = CHILD_PROCESS_INIT;
+
+ repack_cmd.git_cmd = 1;
+ repack_cmd.close_object_store = 1;
+ strvec_pushv(&repack_cmd.args, repack.v);
+ if (run_command(&repack_cmd))
die(FAILED_RUN, repack.v[0]);
if (prune_expire) {
+ struct child_process prune_cmd = CHILD_PROCESS_INIT;
+
/* run `git prune` even if using cruft packs */
strvec_push(&prune, prune_expire);
if (quiet)
@@ -690,18 +702,26 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
if (has_promisor_remote())
strvec_push(&prune,
"--exclude-promisor-objects");
- if (run_command_v_opt(prune.v, RUN_GIT_CMD))
+ prune_cmd.git_cmd = 1;
+ strvec_pushv(&prune_cmd.args, prune.v);
+ if (run_command(&prune_cmd))
die(FAILED_RUN, prune.v[0]);
}
}
if (prune_worktrees_expire) {
+ struct child_process prune_worktrees_cmd = CHILD_PROCESS_INIT;
+
strvec_push(&prune_worktrees, prune_worktrees_expire);
- if (run_command_v_opt(prune_worktrees.v, RUN_GIT_CMD))
+ prune_worktrees_cmd.git_cmd = 1;
+ strvec_pushv(&prune_worktrees_cmd.args, prune_worktrees.v);
+ if (run_command(&prune_worktrees_cmd))
die(FAILED_RUN, prune_worktrees.v[0]);
}
- if (run_command_v_opt(rerere.v, RUN_GIT_CMD))
+ rerere_cmd.git_cmd = 1;
+ strvec_pushv(&rerere_cmd.args, rerere.v);
+ if (run_command(&rerere_cmd))
die(FAILED_RUN, rerere.v[0]);
report_garbage = report_pack_garbage;
@@ -711,7 +731,6 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
clean_pack_garbage();
}
- prepare_repo_settings(the_repository);
if (the_repository->settings.gc_write_commit_graph == 1)
write_commit_graph_reachable(the_repository->objects->odb,
!quiet && !daemonized ? COMMIT_GRAPH_WRITE_PROGRESS : 0,
@@ -1461,20 +1480,23 @@ static char *get_maintpath(void)
}
static char const * const builtin_maintenance_register_usage[] = {
- "git maintenance register",
+ "git maintenance register [--config-file <path>]",
NULL
};
static int maintenance_register(int argc, const char **argv, const char *prefix)
{
+ char *config_file = NULL;
struct option options[] = {
+ OPT_STRING(0, "config-file", &config_file, N_("file"), N_("use given config file")),
OPT_END(),
};
- int rc;
+ int found = 0;
+ const char *key = "maintenance.repo";
char *config_value;
- struct child_process config_set = CHILD_PROCESS_INIT;
- struct child_process config_get = CHILD_PROCESS_INIT;
char *maintpath = get_maintpath();
+ struct string_list_item *item;
+ const struct string_list *list;
argc = parse_options(argc, argv, prefix, options,
builtin_maintenance_register_usage, 0);
@@ -1491,46 +1513,63 @@ static int maintenance_register(int argc, const char **argv, const char *prefix)
else
git_config_set("maintenance.strategy", "incremental");
- config_get.git_cmd = 1;
- strvec_pushl(&config_get.args, "config", "--global", "--get",
- "--fixed-value", "maintenance.repo", maintpath, NULL);
- config_get.out = -1;
-
- if (start_command(&config_get)) {
- rc = error(_("failed to run 'git config'"));
- goto done;
- }
-
- /* We already have this value in our config! */
- if (!finish_command(&config_get)) {
- rc = 0;
- goto done;
+ list = git_config_get_value_multi(key);
+ if (list) {
+ for_each_string_list_item(item, list) {
+ if (!strcmp(maintpath, item->string)) {
+ found = 1;
+ break;
+ }
+ }
}
- config_set.git_cmd = 1;
- strvec_pushl(&config_set.args, "config", "--add", "--global", "maintenance.repo",
- maintpath, NULL);
+ if (!found) {
+ int rc;
+ char *user_config = NULL, *xdg_config = NULL;
- rc = run_command(&config_set);
+ if (!config_file) {
+ git_global_config(&user_config, &xdg_config);
+ config_file = user_config;
+ if (!user_config)
+ die(_("$HOME not set"));
+ }
+ rc = git_config_set_multivar_in_file_gently(
+ config_file, "maintenance.repo", maintpath,
+ CONFIG_REGEX_NONE, 0);
+ free(user_config);
+ free(xdg_config);
+
+ if (rc)
+ die(_("unable to add '%s' value of '%s'"),
+ key, maintpath);
+ }
-done:
free(maintpath);
- return rc;
+ return 0;
}
static char const * const builtin_maintenance_unregister_usage[] = {
- "git maintenance unregister",
+ "git maintenance unregister [--config-file <path>] [--force]",
NULL
};
static int maintenance_unregister(int argc, const char **argv, const char *prefix)
{
+ int force = 0;
+ char *config_file = NULL;
struct option options[] = {
+ OPT_STRING(0, "config-file", &config_file, N_("file"), N_("use given config file")),
+ OPT__FORCE(&force,
+ N_("return success even if repository was not registered"),
+ PARSE_OPT_NOCOMPLETE),
OPT_END(),
};
- int rc;
- struct child_process config_unset = CHILD_PROCESS_INIT;
+ const char *key = "maintenance.repo";
char *maintpath = get_maintpath();
+ int found = 0;
+ struct string_list_item *item;
+ const struct string_list *list;
+ struct config_set cs = { { 0 } };
argc = parse_options(argc, argv, prefix, options,
builtin_maintenance_unregister_usage, 0);
@@ -1538,13 +1577,48 @@ static int maintenance_unregister(int argc, const char **argv, const char *prefi
usage_with_options(builtin_maintenance_unregister_usage,
options);
- config_unset.git_cmd = 1;
- strvec_pushl(&config_unset.args, "config", "--global", "--unset",
- "--fixed-value", "maintenance.repo", maintpath, NULL);
+ if (config_file) {
+ git_configset_init(&cs);
+ git_configset_add_file(&cs, config_file);
+ list = git_configset_get_value_multi(&cs, key);
+ } else {
+ list = git_config_get_value_multi(key);
+ }
+ if (list) {
+ for_each_string_list_item(item, list) {
+ if (!strcmp(maintpath, item->string)) {
+ found = 1;
+ break;
+ }
+ }
+ }
- rc = run_command(&config_unset);
+ if (found) {
+ int rc;
+ char *user_config = NULL, *xdg_config = NULL;
+ if (!config_file) {
+ git_global_config(&user_config, &xdg_config);
+ config_file = user_config;
+ if (!user_config)
+ die(_("$HOME not set"));
+ }
+ rc = git_config_set_multivar_in_file_gently(
+ config_file, key, NULL, maintpath,
+ CONFIG_FLAGS_MULTI_REPLACE | CONFIG_FLAGS_FIXED_VALUE);
+ free(user_config);
+ free(xdg_config);
+
+ if (rc &&
+ (!force || rc == CONFIG_NOTHING_SET))
+ die(_("unable to unset '%s' value of '%s'"),
+ key, maintpath);
+ } else if (!force) {
+ die(_("repository '%s' is not registered"), maintpath);
+ }
+
+ git_configset_clear(&cs);
free(maintpath);
- return rc;
+ return 0;
}
static const char *get_frequency(enum schedule_priority schedule)
@@ -1881,20 +1955,16 @@ static char *schtasks_task_name(const char *frequency)
static int schtasks_remove_task(enum schedule_priority schedule)
{
const char *cmd = "schtasks";
- int result;
- struct strvec args = STRVEC_INIT;
+ struct child_process child = CHILD_PROCESS_INIT;
const char *frequency = get_frequency(schedule);
char *name = schtasks_task_name(frequency);
get_schedule_cmd(&cmd, NULL);
- strvec_split(&args, cmd);
- strvec_pushl(&args, "/delete", "/tn", name, "/f", NULL);
-
- result = run_command_v_opt(args.v, 0);
-
- strvec_clear(&args);
+ strvec_split(&child.args, cmd);
+ strvec_pushl(&child.args, "/delete", "/tn", name, "/f", NULL);
free(name);
- return result;
+
+ return run_command(&child);
}
static int schtasks_remove_tasks(void)
diff --git a/builtin/grep.c b/builtin/grep.c
index e6bcdf860c..5fa927d4e2 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -458,6 +458,33 @@ static int grep_submodule(struct grep_opt *opt,
* subrepo's odbs to the in-memory alternates list.
*/
obj_read_lock();
+
+ /*
+ * NEEDSWORK: when reading a submodule, the sparsity settings in the
+ * superproject are incorrectly forgotten or misused. For example:
+ *
+ * 1. "command_requires_full_index"
+ * When this setting is turned on for `grep`, only the superproject
+ * knows it. All the submodules are read with their own configs
+ * and get prepare_repo_settings()'d. Therefore, these submodules
+ * "forget" the sparse-index feature switch. As a result, the index
+ * of these submodules are expanded unexpectedly.
+ *
+ * 2. "core_apply_sparse_checkout"
+ * When running `grep` in the superproject, this setting is
+ * populated using the superproject's configs. However, once
+ * initialized, this config is globally accessible and is read by
+ * prepare_repo_settings() for the submodules. For instance, if a
+ * submodule is using a sparse-checkout, however, the superproject
+ * is not, the result is that the config from the superproject will
+ * dictate the behavior for the submodule, making it "forget" its
+ * sparse-checkout state.
+ *
+ * 3. "core_sparse_checkout_cone"
+ * ditto.
+ *
+ * Note that this list is not exhaustive.
+ */
repo_read_gitmodules(subrepo, 0);
/*
@@ -520,8 +547,6 @@ static int grep_cache(struct grep_opt *opt,
if (repo_read_index(repo) < 0)
die(_("index file corrupt"));
- /* TODO: audit for interaction with sparse-index. */
- ensure_full_index(repo->index);
for (nr = 0; nr < repo->index->cache_nr; nr++) {
const struct cache_entry *ce = repo->index->cache[nr];
@@ -530,8 +555,20 @@ static int grep_cache(struct grep_opt *opt,
strbuf_setlen(&name, name_base_len);
strbuf_addstr(&name, ce->name);
+ if (S_ISSPARSEDIR(ce->ce_mode)) {
+ enum object_type type;
+ struct tree_desc tree;
+ void *data;
+ unsigned long size;
- if (S_ISREG(ce->ce_mode) &&
+ data = read_object_file(&ce->oid, &type, &size);
+ init_tree_desc(&tree, data, size);
+
+ hit |= grep_tree(opt, pathspec, &tree, &name, 0, 0);
+ strbuf_setlen(&name, name_base_len);
+ strbuf_addstr(&name, ce->name);
+ free(data);
+ } else if (S_ISREG(ce->ce_mode) &&
match_pathspec(repo->index, pathspec, name.buf, name.len, 0, NULL,
S_ISDIR(ce->ce_mode) ||
S_ISGITLINK(ce->ce_mode))) {
@@ -984,6 +1021,11 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
PARSE_OPT_KEEP_DASHDASH |
PARSE_OPT_STOP_AT_NON_OPTION);
+ if (the_repository->gitdir) {
+ prepare_repo_settings(the_repository);
+ the_repository->settings.command_requires_full_index = 0;
+ }
+
if (use_index && !startup_info->have_repository) {
int fallback = 0;
git_config_get_bool("grep.fallbacktonoindex", &fallback);
diff --git a/builtin/hash-object.c b/builtin/hash-object.c
index fbae878c2b..b506381502 100644
--- a/builtin/hash-object.c
+++ b/builtin/hash-object.c
@@ -80,8 +80,9 @@ static void hash_stdin_paths(const char *type, int no_filters, unsigned flags,
int cmd_hash_object(int argc, const char **argv, const char *prefix)
{
static const char * const hash_object_usage[] = {
- N_("git hash-object [-t <type>] [-w] [--path=<file> | --no-filters] [--stdin] [--] <file>..."),
- "git hash-object --stdin-paths",
+ N_("git hash-object [-t <type>] [-w] [--path=<file> | --no-filters]\n"
+ " [--stdin [--literally]] [--] <file>..."),
+ N_("git hash-object [-t <type>] [-w] --stdin-paths [--no-filters]"),
NULL
};
const char *type = blob_type;
diff --git a/builtin/help.c b/builtin/help.c
index 6f2796f211..53f2812dfb 100644
--- a/builtin/help.c
+++ b/builtin/help.c
@@ -88,7 +88,7 @@ static struct option builtin_help_options[] = {
};
static const char * const builtin_help_usage[] = {
- "git help [-a|--all] [--[no-]verbose]] [--[no-]external-commands] [--[no-]aliases]",
+ "git help [-a|--all] [--[no-]verbose] [--[no-]external-commands] [--[no-]aliases]",
N_("git help [[-i|--info] [-m|--man] [-w|--web]] [<command>|<doc>]"),
"git help [-g|--guides]",
"git help [-c|--config]",
diff --git a/builtin/init-db.c b/builtin/init-db.c
index 546f9c595e..dcaaf102ea 100644
--- a/builtin/init-db.c
+++ b/builtin/init-db.c
@@ -515,7 +515,10 @@ static int shared_callback(const struct option *opt, const char *arg, int unset)
}
static const char *const init_db_usage[] = {
- N_("git init [-q | --quiet] [--bare] [--template=<template-directory>] [--shared[=<permissions>]] [<directory>]"),
+ N_("git init [-q | --quiet] [--bare] [--template=<template-directory>]\n"
+ " [--separate-git-dir <git-dir>] [--object-format=<format>]\n"
+ " [-b <branch-name> | --initial-branch=<branch-name>]\n"
+ " [--shared[=<permissions>]] [<directory>]"),
NULL
};
diff --git a/builtin/interpret-trailers.c b/builtin/interpret-trailers.c
index 84748eafc0..e58627c72a 100644
--- a/builtin/interpret-trailers.c
+++ b/builtin/interpret-trailers.c
@@ -13,7 +13,9 @@
#include "config.h"
static const char * const git_interpret_trailers_usage[] = {
- N_("git interpret-trailers [--in-place] [--trim-empty] [(--trailer <token>[(=|:)<value>])...] [<file>...]"),
+ N_("git interpret-trailers [--in-place] [--trim-empty]\n"
+ " [(--trailer <token>[(=|:)<value>])...]\n"
+ " [--parse] [<file>...]"),
NULL
};
diff --git a/builtin/log.c b/builtin/log.c
index ee19dc5d45..5eafcf26b4 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -1334,6 +1334,7 @@ static void make_cover_letter(struct rev_info *rev, int use_separate_file,
log.in2 = 4;
log.file = rev->diffopt.file;
log.groups = SHORTLOG_GROUP_AUTHOR;
+ shortlog_finish_setup(&log);
for (i = 0; i < nr; i++)
shortlog_add_commit(&log, list[i]);
@@ -1763,7 +1764,7 @@ static void prepare_bases(struct base_tree_info *bases,
struct object_id *patch_id;
if (*commit_base_at(&commit_base, commit))
continue;
- if (commit_patch_id(commit, &diffopt, &oid, 0, 1))
+ if (commit_patch_id(commit, &diffopt, &oid, 0))
die(_("cannot get patch id"));
ALLOC_GROW(bases->patch_id, bases->nr_patch_id + 1, bases->alloc_patch_id);
patch_id = bases->patch_id + bases->nr_patch_id;
diff --git a/builtin/ls-files.c b/builtin/ls-files.c
index 4cf8a23648..a03b559eca 100644
--- a/builtin/ls-files.c
+++ b/builtin/ls-files.c
@@ -613,6 +613,7 @@ void overlay_tree_on_index(struct index_state *istate,
if (!fn)
fn = read_one_entry_quick;
err = read_tree(the_repository, tree, &pathspec, fn, istate);
+ clear_pathspec(&pathspec);
if (err)
die("unable to read tree entries %s", tree_name);
diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c
index df44e5cc0d..5d5ac03871 100644
--- a/builtin/ls-remote.c
+++ b/builtin/ls-remote.c
@@ -7,7 +7,7 @@
static const char * const ls_remote_usage[] = {
N_("git ls-remote [--heads] [--tags] [--refs] [--upload-pack=<exec>]\n"
- " [-q | --quiet] [--exit-code] [--get-url]\n"
+ " [-q | --quiet] [--exit-code] [--get-url] [--sort=<key>]\n"
" [--symref] [<repository> [<refs>...]]"),
NULL
};
diff --git a/builtin/merge-base.c b/builtin/merge-base.c
index a11f8c6e4b..6f3941f2a4 100644
--- a/builtin/merge-base.c
+++ b/builtin/merge-base.c
@@ -31,8 +31,8 @@ static int show_merge_base(struct commit **rev, int rev_nr, int show_all)
static const char * const merge_base_usage[] = {
N_("git merge-base [-a | --all] <commit> <commit>..."),
N_("git merge-base [-a | --all] --octopus <commit>..."),
- N_("git merge-base --independent <commit>..."),
N_("git merge-base --is-ancestor <commit> <commit>"),
+ N_("git merge-base --independent <commit>..."),
N_("git merge-base --fork-point <ref> [<commit>]"),
NULL
};
diff --git a/builtin/merge-index.c b/builtin/merge-index.c
index c0383fe9df..012f52bd00 100644
--- a/builtin/merge-index.c
+++ b/builtin/merge-index.c
@@ -12,6 +12,7 @@ static int merge_entry(int pos, const char *path)
const char *arguments[] = { pgm, "", "", "", path, "", "", "", NULL };
char hexbuf[4][GIT_MAX_HEXSZ + 1];
char ownbuf[4][60];
+ struct child_process cmd = CHILD_PROCESS_INIT;
if (pos >= active_nr)
die("git merge-index: %s not in the cache", path);
@@ -31,7 +32,8 @@ static int merge_entry(int pos, const char *path)
if (!found)
die("git merge-index: %s not in the cache", path);
- if (run_command_v_opt(arguments, 0)) {
+ strvec_pushv(&cmd.args, arguments);
+ if (run_command(&cmd)) {
if (one_shot)
err++;
else {
diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c
index ae5782917b..330f779e8b 100644
--- a/builtin/merge-tree.c
+++ b/builtin/merge-tree.c
@@ -3,6 +3,7 @@
#include "tree-walk.h"
#include "xdiff-interface.h"
#include "help.h"
+#include "commit.h"
#include "commit-reach.h"
#include "merge-ort.h"
#include "object-store.h"
@@ -402,9 +403,11 @@ struct merge_tree_options {
int allow_unrelated_histories;
int show_messages;
int name_only;
+ int use_stdin;
};
static int real_merge(struct merge_tree_options *o,
+ const char *merge_base,
const char *branch1, const char *branch2,
const char *prefix)
{
@@ -412,6 +415,7 @@ static int real_merge(struct merge_tree_options *o,
struct commit_list *merge_bases = NULL;
struct merge_options opt;
struct merge_result result = { 0 };
+ int show_messages = o->show_messages;
parent1 = get_merge_parent(branch1);
if (!parent1)
@@ -430,22 +434,39 @@ static int real_merge(struct merge_tree_options *o,
opt.branch1 = branch1;
opt.branch2 = branch2;
- /*
- * Get the merge bases, in reverse order; see comment above
- * merge_incore_recursive in merge-ort.h
- */
- merge_bases = get_merge_bases(parent1, parent2);
- if (!merge_bases && !o->allow_unrelated_histories)
- die(_("refusing to merge unrelated histories"));
- merge_bases = reverse_commit_list(merge_bases);
+ if (merge_base) {
+ struct commit *base_commit;
+ struct tree *base_tree, *parent1_tree, *parent2_tree;
+
+ base_commit = lookup_commit_reference_by_name(merge_base);
+ if (!base_commit)
+ die(_("could not lookup commit %s"), merge_base);
+
+ opt.ancestor = merge_base;
+ base_tree = get_commit_tree(base_commit);
+ parent1_tree = get_commit_tree(parent1);
+ parent2_tree = get_commit_tree(parent2);
+ merge_incore_nonrecursive(&opt, base_tree, parent1_tree, parent2_tree, &result);
+ } else {
+ /*
+ * Get the merge bases, in reverse order; see comment above
+ * merge_incore_recursive in merge-ort.h
+ */
+ merge_bases = get_merge_bases(parent1, parent2);
+ if (!merge_bases && !o->allow_unrelated_histories)
+ die(_("refusing to merge unrelated histories"));
+ merge_bases = reverse_commit_list(merge_bases);
+ merge_incore_recursive(&opt, merge_bases, parent1, parent2, &result);
+ }
- merge_incore_recursive(&opt, merge_bases, parent1, parent2, &result);
if (result.clean < 0)
die(_("failure to merge"));
- if (o->show_messages == -1)
- o->show_messages = !result.clean;
+ if (show_messages == -1)
+ show_messages = !result.clean;
+ if (o->use_stdin)
+ printf("%d%c", result.clean, line_termination);
printf("%s%c", oid_to_hex(&result.tree->object.oid), line_termination);
if (!result.clean) {
struct string_list conflicted_files = STRING_LIST_INIT_NODUP;
@@ -467,11 +488,13 @@ static int real_merge(struct merge_tree_options *o,
}
string_list_clear(&conflicted_files, 1);
}
- if (o->show_messages) {
+ if (show_messages) {
putchar(line_termination);
merge_display_update_messages(&opt, line_termination == '\0',
&result);
}
+ if (o->use_stdin)
+ putchar(line_termination);
merge_finalize(&opt, &result);
return !result.clean; /* result.clean < 0 handled above */
}
@@ -481,6 +504,7 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
struct merge_tree_options o = { .show_messages = -1 };
int expected_remaining_argc;
int original_argc;
+ const char *merge_base = NULL;
const char * const merge_tree_usage[] = {
N_("git merge-tree [--write-tree] [<options>] <branch1> <branch2>"),
@@ -505,6 +529,14 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
&o.allow_unrelated_histories,
N_("allow merging unrelated histories"),
PARSE_OPT_NONEG),
+ OPT_BOOL_F(0, "stdin",
+ &o.use_stdin,
+ N_("perform multiple merges, one per line of input"),
+ PARSE_OPT_NONEG),
+ OPT_STRING(0, "merge-base",
+ &merge_base,
+ N_("commit"),
+ N_("specify a merge-base for the merge")),
OPT_END()
};
@@ -512,6 +544,51 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
original_argc = argc - 1; /* ignoring argv[0] */
argc = parse_options(argc, argv, prefix, mt_options,
merge_tree_usage, PARSE_OPT_STOP_AT_NON_OPTION);
+
+ /* Handle --stdin */
+ if (o.use_stdin) {
+ struct strbuf buf = STRBUF_INIT;
+
+ if (o.mode == MODE_TRIVIAL)
+ die(_("--trivial-merge is incompatible with all other options"));
+ if (merge_base)
+ die(_("--merge-base is incompatible with --stdin"));
+ line_termination = '\0';
+ while (strbuf_getline_lf(&buf, stdin) != EOF) {
+ struct strbuf **split;
+ int result;
+ const char *input_merge_base = NULL;
+
+ split = strbuf_split(&buf, ' ');
+ if (!split[0] || !split[1])
+ die(_("malformed input line: '%s'."), buf.buf);
+ strbuf_rtrim(split[0]);
+ strbuf_rtrim(split[1]);
+
+ /* parse the merge-base */
+ if (!strcmp(split[1]->buf, "--")) {
+ input_merge_base = split[0]->buf;
+ }
+
+ if (input_merge_base && split[2] && split[3] && !split[4]) {
+ strbuf_rtrim(split[2]);
+ strbuf_rtrim(split[3]);
+ result = real_merge(&o, input_merge_base, split[2]->buf, split[3]->buf, prefix);
+ } else if (!input_merge_base && !split[2]) {
+ result = real_merge(&o, NULL, split[0]->buf, split[1]->buf, prefix);
+ } else {
+ die(_("malformed input line: '%s'."), buf.buf);
+ }
+
+ if (result < 0)
+ die(_("merging cannot continue; got unclean result of %d"), result);
+ strbuf_list_free(split);
+ }
+ strbuf_release(&buf);
+ return 0;
+ }
+
+ /* Figure out which mode to use */
switch (o.mode) {
default:
BUG("unexpected command mode %d", o.mode);
@@ -545,7 +622,7 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
/* Do the relevant type of merge */
if (o.mode == MODE_REAL)
- return real_merge(&o, argv[0], argv[1], prefix);
+ return real_merge(&o, merge_base, argv[0], argv[1], prefix);
else
return trivial_merge(argv[0], argv[1], argv[2]);
}
diff --git a/builtin/merge.c b/builtin/merge.c
index 5900b81729..584234c50d 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -345,60 +345,49 @@ out:
return rc;
}
-static void read_empty(const struct object_id *oid, int verbose)
+static void read_empty(const struct object_id *oid)
{
- int i = 0;
- const char *args[7];
-
- args[i++] = "read-tree";
- if (verbose)
- args[i++] = "-v";
- args[i++] = "-m";
- args[i++] = "-u";
- args[i++] = empty_tree_oid_hex();
- args[i++] = oid_to_hex(oid);
- args[i] = NULL;
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&cmd.args, "read-tree", "-m", "-u", empty_tree_oid_hex(),
+ oid_to_hex(oid), NULL);
+ cmd.git_cmd = 1;
- if (run_command_v_opt(args, RUN_GIT_CMD))
+ if (run_command(&cmd))
die(_("read-tree failed"));
}
-static void reset_hard(const struct object_id *oid, int verbose)
+static void reset_hard(const struct object_id *oid)
{
- int i = 0;
- const char *args[6];
-
- args[i++] = "read-tree";
- if (verbose)
- args[i++] = "-v";
- args[i++] = "--reset";
- args[i++] = "-u";
- args[i++] = oid_to_hex(oid);
- args[i] = NULL;
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&cmd.args, "read-tree", "-v", "--reset", "-u",
+ oid_to_hex(oid), NULL);
+ cmd.git_cmd = 1;
- if (run_command_v_opt(args, RUN_GIT_CMD))
+ if (run_command(&cmd))
die(_("read-tree failed"));
}
static void restore_state(const struct object_id *head,
const struct object_id *stash)
{
- struct strvec args = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
- reset_hard(head, 1);
+ reset_hard(head);
if (is_null_oid(stash))
goto refresh_cache;
- strvec_pushl(&args, "stash", "apply", "--index", "--quiet", NULL);
- strvec_push(&args, oid_to_hex(stash));
+ strvec_pushl(&cmd.args, "stash", "apply", "--index", "--quiet", NULL);
+ strvec_push(&cmd.args, oid_to_hex(stash));
/*
* It is OK to ignore error here, for example when there was
* nothing to restore.
*/
- run_command_v_opt(args.v, RUN_GIT_CMD);
- strvec_clear(&args);
+ cmd.git_cmd = 1;
+ run_command(&cmd);
refresh_cache:
if (discard_cache() < 0 || read_cache() < 0)
@@ -1470,7 +1459,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
check_trust_level);
remote_head_oid = &remoteheads->item->object.oid;
- read_empty(remote_head_oid, 0);
+ read_empty(remote_head_oid);
update_ref("initial pull", "HEAD", remote_head_oid, NULL, 0,
UPDATE_REFS_DIE_ON_ERR);
goto done;
@@ -1794,5 +1783,6 @@ done:
}
strbuf_release(&buf);
free(branch_to_free);
+ discard_index(&the_index);
return ret;
}
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index 9b126d6ce0..9a18a82b05 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -56,11 +56,12 @@ static struct opts_multi_pack_index {
static int parse_object_dir(const struct option *opt, const char *arg,
int unset)
{
- free(opts.object_dir);
+ char **value = opt->value;
+ free(*value);
if (unset)
- opts.object_dir = xstrdup(get_object_directory());
+ *value = xstrdup(get_object_directory());
else
- opts.object_dir = real_pathdup(arg, 1);
+ *value = real_pathdup(arg, 1);
return 0;
}
diff --git a/builtin/notes.c b/builtin/notes.c
index be51f69225..902418df3f 100644
--- a/builtin/notes.c
+++ b/builtin/notes.c
@@ -181,7 +181,7 @@ static void prepare_note_data(const struct object_id *object, struct note_data *
strbuf_addch(&buf, '\n');
strbuf_add_commented_lines(&buf, "\n", strlen("\n"));
strbuf_add_commented_lines(&buf, _(note_template), strlen(_(note_template)));
- strbuf_addch(&buf, '\n');
+ strbuf_add_commented_lines(&buf, "\n", strlen("\n"));
write_or_die(fd, buf.buf, buf.len);
write_commented_object(fd, object);
@@ -562,13 +562,14 @@ out:
static int append_edit(int argc, const char **argv, const char *prefix)
{
int allow_empty = 0;
+ int blankline = 1;
const char *object_ref;
struct notes_tree *t;
struct object_id object, new_note;
const struct object_id *note;
- char *logmsg;
+ char *logmsg = NULL;
const char * const *usage;
- struct note_data d = { 0, 0, NULL, STRBUF_INIT };
+ struct note_data d = { .buf = STRBUF_INIT };
struct option options[] = {
OPT_CALLBACK_F('m', "message", &d, N_("message"),
N_("note contents as a string"), PARSE_OPT_NONEG,
@@ -584,6 +585,8 @@ static int append_edit(int argc, const char **argv, const char *prefix)
parse_reuse_arg),
OPT_BOOL(0, "allow-empty", &allow_empty,
N_("allow storing empty note")),
+ OPT_BOOL(0, "blank-line", &blankline,
+ N_("insert paragraph break before appending to an existing note")),
OPT_END()
};
int edit = !strcmp(argv[0], "edit");
@@ -618,8 +621,7 @@ static int append_edit(int argc, const char **argv, const char *prefix)
enum object_type type;
char *prev_buf = read_object_file(note, &type, &size);
- strbuf_grow(&d.buf, size + 1);
- if (d.buf.len && prev_buf && size)
+ if (blankline && d.buf.len && prev_buf && size)
strbuf_insertstr(&d.buf, 0, "\n");
if (prev_buf && size)
strbuf_insert(&d.buf, 0, prev_buf, size);
@@ -631,13 +633,11 @@ static int append_edit(int argc, const char **argv, const char *prefix)
if (add_note(t, &object, &new_note, combine_notes_overwrite))
BUG("combine_notes_overwrite failed");
logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]);
- } else {
- fprintf(stderr, _("Removing note for object %s\n"),
+ commit_notes(the_repository, t, logmsg);
+ } else if (!d.buf.len && !note)
+ fprintf(stderr,
+ _("Both original and appended notes are empty in %s, do nothing\n"),
oid_to_hex(&object));
- remove_note(t, object.hash);
- logmsg = xstrfmt("Notes removed by 'git notes %s'", argv[0]);
- }
- commit_notes(the_repository, t, logmsg);
free(logmsg);
free_note_data(&d);
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 3658c05caf..573d0b20b7 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -180,8 +180,8 @@ static inline void oe_set_delta_size(struct packing_data *pack,
#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
static const char *pack_usage[] = {
- N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
- N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"),
+ N_("git pack-objects --stdout [<options>] [< <ref-list> | < <object-list>]"),
+ N_("git pack-objects [<options>] <base-name> [< <ref-list> | < <object-list>]"),
NULL
};
diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c
index ed9b9013a5..ecd49ca268 100644
--- a/builtin/pack-redundant.c
+++ b/builtin/pack-redundant.c
@@ -14,7 +14,7 @@
#define BLKSIZE 512
static const char pack_redundant_usage[] =
-"git pack-redundant [--verbose] [--alt-odb] (--all | <filename.pack>...)";
+"git pack-redundant [--verbose] [--alt-odb] (--all | <pack-filename>...)";
static int load_all_packs, verbose, alt_odb;
diff --git a/builtin/pack-refs.c b/builtin/pack-refs.c
index cfbd5c36c7..27c2ca06ac 100644
--- a/builtin/pack-refs.c
+++ b/builtin/pack-refs.c
@@ -5,7 +5,7 @@
#include "repository.h"
static char const * const pack_refs_usage[] = {
- N_("git pack-refs [<options>]"),
+ N_("git pack-refs [--all] [--no-prune]"),
NULL
};
diff --git a/builtin/patch-id.c b/builtin/patch-id.c
index 881fcf3273..f840fbf1c7 100644
--- a/builtin/patch-id.c
+++ b/builtin/patch-id.c
@@ -2,6 +2,7 @@
#include "builtin.h"
#include "config.h"
#include "diff.h"
+#include "parse-options.h"
static void flush_current_id(int patchlen, struct object_id *id, struct object_id *result)
{
@@ -57,10 +58,12 @@ static int scan_hunk_header(const char *p, int *p_before, int *p_after)
}
static int get_one_patchid(struct object_id *next_oid, struct object_id *result,
- struct strbuf *line_buf, int stable)
+ struct strbuf *line_buf, int stable, int verbatim)
{
int patchlen = 0, found_next = 0;
int before = -1, after = -1;
+ int diff_is_binary = 0;
+ char pre_oid_str[GIT_MAX_HEXSZ + 1], post_oid_str[GIT_MAX_HEXSZ + 1];
git_hash_ctx ctx;
the_hash_algo->init_fn(&ctx);
@@ -71,11 +74,14 @@ static int get_one_patchid(struct object_id *next_oid, struct object_id *result,
const char *p = line;
int len;
- if (!skip_prefix(line, "diff-tree ", &p) &&
- !skip_prefix(line, "commit ", &p) &&
+ /* Possibly skip over the prefix added by "log" or "format-patch" */
+ if (!skip_prefix(line, "commit ", &p) &&
!skip_prefix(line, "From ", &p) &&
- starts_with(line, "\\ ") && 12 < strlen(line))
+ starts_with(line, "\\ ") && 12 < strlen(line)) {
+ if (verbatim)
+ the_hash_algo->update_fn(&ctx, line, strlen(line));
continue;
+ }
if (!get_oid_hex(p, next_oid)) {
found_next = 1;
@@ -88,14 +94,44 @@ static int get_one_patchid(struct object_id *next_oid, struct object_id *result,
/* Parsing diff header? */
if (before == -1) {
- if (starts_with(line, "index "))
+ if (starts_with(line, "GIT binary patch") ||
+ starts_with(line, "Binary files")) {
+ diff_is_binary = 1;
+ before = 0;
+ the_hash_algo->update_fn(&ctx, pre_oid_str,
+ strlen(pre_oid_str));
+ the_hash_algo->update_fn(&ctx, post_oid_str,
+ strlen(post_oid_str));
+ if (stable)
+ flush_one_hunk(result, &ctx);
+ continue;
+ } else if (skip_prefix(line, "index ", &p)) {
+ char *oid1_end = strstr(line, "..");
+ char *oid2_end = NULL;
+ if (oid1_end)
+ oid2_end = strstr(oid1_end, " ");
+ if (!oid2_end)
+ oid2_end = line + strlen(line) - 1;
+ if (oid1_end != NULL && oid2_end != NULL) {
+ *oid1_end = *oid2_end = '\0';
+ strlcpy(pre_oid_str, p, GIT_MAX_HEXSZ + 1);
+ strlcpy(post_oid_str, oid1_end + 2, GIT_MAX_HEXSZ + 1);
+ }
continue;
- else if (starts_with(line, "--- "))
+ } else if (starts_with(line, "--- "))
before = after = 1;
else if (!isalpha(line[0]))
break;
}
+ if (diff_is_binary) {
+ if (starts_with(line, "diff ")) {
+ diff_is_binary = 0;
+ before = -1;
+ }
+ continue;
+ }
+
/* Looking for a valid hunk header? */
if (before == 0 && after == 0) {
if (starts_with(line, "@@ -")) {
@@ -120,8 +156,8 @@ static int get_one_patchid(struct object_id *next_oid, struct object_id *result,
if (line[0] == '+' || line[0] == ' ')
after--;
- /* Compute the sha without whitespace */
- len = remove_space(line);
+ /* Add line to hash algo (possibly removing whitespace) */
+ len = verbatim ? strlen(line) : remove_space(line);
patchlen += len;
the_hash_algo->update_fn(&ctx, line, len);
}
@@ -134,7 +170,7 @@ static int get_one_patchid(struct object_id *next_oid, struct object_id *result,
return patchlen;
}
-static void generate_id_list(int stable)
+static void generate_id_list(int stable, int verbatim)
{
struct object_id oid, n, result;
int patchlen;
@@ -142,21 +178,32 @@ static void generate_id_list(int stable)
oidclr(&oid);
while (!feof(stdin)) {
- patchlen = get_one_patchid(&n, &result, &line_buf, stable);
+ patchlen = get_one_patchid(&n, &result, &line_buf, stable, verbatim);
flush_current_id(patchlen, &oid, &result);
oidcpy(&oid, &n);
}
strbuf_release(&line_buf);
}
-static const char patch_id_usage[] = "git patch-id [--stable | --unstable]";
+static const char *const patch_id_usage[] = {
+ N_("git patch-id [--stable | --unstable | --verbatim]"), NULL
+};
+
+struct patch_id_opts {
+ int stable;
+ int verbatim;
+};
static int git_patch_id_config(const char *var, const char *value, void *cb)
{
- int *stable = cb;
+ struct patch_id_opts *opts = cb;
if (!strcmp(var, "patchid.stable")) {
- *stable = git_config_bool(var, value);
+ opts->stable = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "patchid.verbatim")) {
+ opts->verbatim = git_config_bool(var, value);
return 0;
}
@@ -165,21 +212,29 @@ static int git_patch_id_config(const char *var, const char *value, void *cb)
int cmd_patch_id(int argc, const char **argv, const char *prefix)
{
- int stable = -1;
-
- git_config(git_patch_id_config, &stable);
-
- /* If nothing is set, default to unstable. */
- if (stable < 0)
- stable = 0;
-
- if (argc == 2 && !strcmp(argv[1], "--stable"))
- stable = 1;
- else if (argc == 2 && !strcmp(argv[1], "--unstable"))
- stable = 0;
- else if (argc != 1)
- usage(patch_id_usage);
-
- generate_id_list(stable);
+ /* if nothing is set, default to unstable */
+ struct patch_id_opts config = {0, 0};
+ int opts = 0;
+ struct option builtin_patch_id_options[] = {
+ OPT_CMDMODE(0, "unstable", &opts,
+ N_("use the unstable patch-id algorithm"), 1),
+ OPT_CMDMODE(0, "stable", &opts,
+ N_("use the stable patch-id algorithm"), 2),
+ OPT_CMDMODE(0, "verbatim", &opts,
+ N_("don't strip whitespace from the patch"), 3),
+ OPT_END()
+ };
+
+ git_config(git_patch_id_config, &config);
+
+ /* verbatim implies stable */
+ if (config.verbatim)
+ config.stable = 1;
+
+ argc = parse_options(argc, argv, prefix, builtin_patch_id_options,
+ patch_id_usage, 0);
+
+ generate_id_list(opts ? opts > 1 : config.stable,
+ opts ? opts == 3 : config.verbatim);
return 0;
}
diff --git a/builtin/pull.c b/builtin/pull.c
index 403a24d7ca..b21edd767a 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -515,76 +515,75 @@ static void parse_repo_refspecs(int argc, const char **argv, const char **repo,
*/
static int run_fetch(const char *repo, const char **refspecs)
{
- struct strvec args = STRVEC_INIT;
- int ret;
+ struct child_process cmd = CHILD_PROCESS_INIT;
- strvec_pushl(&args, "fetch", "--update-head-ok", NULL);
+ strvec_pushl(&cmd.args, "fetch", "--update-head-ok", NULL);
/* Shared options */
- argv_push_verbosity(&args);
+ argv_push_verbosity(&cmd.args);
if (opt_progress)
- strvec_push(&args, opt_progress);
+ strvec_push(&cmd.args, opt_progress);
/* Options passed to git-fetch */
if (opt_all)
- strvec_push(&args, opt_all);
+ strvec_push(&cmd.args, opt_all);
if (opt_append)
- strvec_push(&args, opt_append);
+ strvec_push(&cmd.args, opt_append);
if (opt_upload_pack)
- strvec_push(&args, opt_upload_pack);
- argv_push_force(&args);
+ strvec_push(&cmd.args, opt_upload_pack);
+ argv_push_force(&cmd.args);
if (opt_tags)
- strvec_push(&args, opt_tags);
+ strvec_push(&cmd.args, opt_tags);
if (opt_prune)
- strvec_push(&args, opt_prune);
+ strvec_push(&cmd.args, opt_prune);
if (recurse_submodules_cli != RECURSE_SUBMODULES_DEFAULT)
switch (recurse_submodules_cli) {
case RECURSE_SUBMODULES_ON:
- strvec_push(&args, "--recurse-submodules=on");
+ strvec_push(&cmd.args, "--recurse-submodules=on");
break;
case RECURSE_SUBMODULES_OFF:
- strvec_push(&args, "--recurse-submodules=no");
+ strvec_push(&cmd.args, "--recurse-submodules=no");
break;
case RECURSE_SUBMODULES_ON_DEMAND:
- strvec_push(&args, "--recurse-submodules=on-demand");
+ strvec_push(&cmd.args, "--recurse-submodules=on-demand");
break;
default:
BUG("submodule recursion option not understood");
}
if (max_children)
- strvec_push(&args, max_children);
+ strvec_push(&cmd.args, max_children);
if (opt_dry_run)
- strvec_push(&args, "--dry-run");
+ strvec_push(&cmd.args, "--dry-run");
if (opt_keep)
- strvec_push(&args, opt_keep);
+ strvec_push(&cmd.args, opt_keep);
if (opt_depth)
- strvec_push(&args, opt_depth);
+ strvec_push(&cmd.args, opt_depth);
if (opt_unshallow)
- strvec_push(&args, opt_unshallow);
+ strvec_push(&cmd.args, opt_unshallow);
if (opt_update_shallow)
- strvec_push(&args, opt_update_shallow);
+ strvec_push(&cmd.args, opt_update_shallow);
if (opt_refmap)
- strvec_push(&args, opt_refmap);
+ strvec_push(&cmd.args, opt_refmap);
if (opt_ipv4)
- strvec_push(&args, opt_ipv4);
+ strvec_push(&cmd.args, opt_ipv4);
if (opt_ipv6)
- strvec_push(&args, opt_ipv6);
+ strvec_push(&cmd.args, opt_ipv6);
if (opt_show_forced_updates > 0)
- strvec_push(&args, "--show-forced-updates");
+ strvec_push(&cmd.args, "--show-forced-updates");
else if (opt_show_forced_updates == 0)
- strvec_push(&args, "--no-show-forced-updates");
+ strvec_push(&cmd.args, "--no-show-forced-updates");
if (set_upstream)
- strvec_push(&args, set_upstream);
- strvec_pushv(&args, opt_fetch.v);
+ strvec_push(&cmd.args, set_upstream);
+ strvec_pushv(&cmd.args, opt_fetch.v);
if (repo) {
- strvec_push(&args, repo);
- strvec_pushv(&args, refspecs);
+ strvec_push(&cmd.args, repo);
+ strvec_pushv(&cmd.args, refspecs);
} else if (*refspecs)
BUG("refspecs without repo?");
- ret = run_command_v_opt(args.v, RUN_GIT_CMD | RUN_CLOSE_OBJECT_STORE);
- strvec_clear(&args);
- return ret;
+ cmd.git_cmd = 1;
+ cmd.close_object_store = 1;
+ return run_command(&cmd);
}
/**
@@ -653,52 +652,50 @@ static int update_submodules(void)
*/
static int run_merge(void)
{
- int ret;
- struct strvec args = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
- strvec_pushl(&args, "merge", NULL);
+ strvec_pushl(&cmd.args, "merge", NULL);
/* Shared options */
- argv_push_verbosity(&args);
+ argv_push_verbosity(&cmd.args);
if (opt_progress)
- strvec_push(&args, opt_progress);
+ strvec_push(&cmd.args, opt_progress);
/* Options passed to git-merge */
if (opt_diffstat)
- strvec_push(&args, opt_diffstat);
+ strvec_push(&cmd.args, opt_diffstat);
if (opt_log)
- strvec_push(&args, opt_log);
+ strvec_push(&cmd.args, opt_log);
if (opt_signoff)
- strvec_push(&args, opt_signoff);
+ strvec_push(&cmd.args, opt_signoff);
if (opt_squash)
- strvec_push(&args, opt_squash);
+ strvec_push(&cmd.args, opt_squash);
if (opt_commit)
- strvec_push(&args, opt_commit);
+ strvec_push(&cmd.args, opt_commit);
if (opt_edit)
- strvec_push(&args, opt_edit);
+ strvec_push(&cmd.args, opt_edit);
if (cleanup_arg)
- strvec_pushf(&args, "--cleanup=%s", cleanup_arg);
+ strvec_pushf(&cmd.args, "--cleanup=%s", cleanup_arg);
if (opt_ff)
- strvec_push(&args, opt_ff);
+ strvec_push(&cmd.args, opt_ff);
if (opt_verify)
- strvec_push(&args, opt_verify);
+ strvec_push(&cmd.args, opt_verify);
if (opt_verify_signatures)
- strvec_push(&args, opt_verify_signatures);
- strvec_pushv(&args, opt_strategies.v);
- strvec_pushv(&args, opt_strategy_opts.v);
+ strvec_push(&cmd.args, opt_verify_signatures);
+ strvec_pushv(&cmd.args, opt_strategies.v);
+ strvec_pushv(&cmd.args, opt_strategy_opts.v);
if (opt_gpg_sign)
- strvec_push(&args, opt_gpg_sign);
+ strvec_push(&cmd.args, opt_gpg_sign);
if (opt_autostash == 0)
- strvec_push(&args, "--no-autostash");
+ strvec_push(&cmd.args, "--no-autostash");
else if (opt_autostash == 1)
- strvec_push(&args, "--autostash");
+ strvec_push(&cmd.args, "--autostash");
if (opt_allow_unrelated_histories > 0)
- strvec_push(&args, "--allow-unrelated-histories");
+ strvec_push(&cmd.args, "--allow-unrelated-histories");
- strvec_push(&args, "FETCH_HEAD");
- ret = run_command_v_opt(args.v, RUN_GIT_CMD);
- strvec_clear(&args);
- return ret;
+ strvec_push(&cmd.args, "FETCH_HEAD");
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
}
/**
@@ -879,43 +876,41 @@ static int get_rebase_newbase_and_upstream(struct object_id *newbase,
static int run_rebase(const struct object_id *newbase,
const struct object_id *upstream)
{
- int ret;
- struct strvec args = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
- strvec_push(&args, "rebase");
+ strvec_push(&cmd.args, "rebase");
/* Shared options */
- argv_push_verbosity(&args);
+ argv_push_verbosity(&cmd.args);
/* Options passed to git-rebase */
if (opt_rebase == REBASE_MERGES)
- strvec_push(&args, "--rebase-merges");
+ strvec_push(&cmd.args, "--rebase-merges");
else if (opt_rebase == REBASE_INTERACTIVE)
- strvec_push(&args, "--interactive");
+ strvec_push(&cmd.args, "--interactive");
if (opt_diffstat)
- strvec_push(&args, opt_diffstat);
- strvec_pushv(&args, opt_strategies.v);
- strvec_pushv(&args, opt_strategy_opts.v);
+ strvec_push(&cmd.args, opt_diffstat);
+ strvec_pushv(&cmd.args, opt_strategies.v);
+ strvec_pushv(&cmd.args, opt_strategy_opts.v);
if (opt_gpg_sign)
- strvec_push(&args, opt_gpg_sign);
+ strvec_push(&cmd.args, opt_gpg_sign);
if (opt_signoff)
- strvec_push(&args, opt_signoff);
+ strvec_push(&cmd.args, opt_signoff);
if (opt_autostash == 0)
- strvec_push(&args, "--no-autostash");
+ strvec_push(&cmd.args, "--no-autostash");
else if (opt_autostash == 1)
- strvec_push(&args, "--autostash");
+ strvec_push(&cmd.args, "--autostash");
if (opt_verify_signatures &&
!strcmp(opt_verify_signatures, "--verify-signatures"))
warning(_("ignoring --verify-signatures for rebase"));
- strvec_push(&args, "--onto");
- strvec_push(&args, oid_to_hex(newbase));
+ strvec_push(&cmd.args, "--onto");
+ strvec_push(&cmd.args, oid_to_hex(newbase));
- strvec_push(&args, oid_to_hex(upstream));
+ strvec_push(&cmd.args, oid_to_hex(upstream));
- ret = run_command_v_opt(args.v, RUN_GIT_CMD);
- strvec_clear(&args);
- return ret;
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
}
static int get_can_ff(struct object_id *orig_head,
diff --git a/builtin/push.c b/builtin/push.c
index df0d68e599..60ac8017e5 100644
--- a/builtin/push.c
+++ b/builtin/push.c
@@ -169,8 +169,8 @@ static NORETURN void die_push_simple(struct branch *branch,
if (git_branch_track != BRANCH_TRACK_SIMPLE)
advice_automergesimple_maybe = _("\n"
"To avoid automatically configuring "
- "upstream branches when their name\n"
- "doesn't match the local branch, see option "
+ "an upstream branch when its name\n"
+ "won't match the local branch, see option "
"'simple' of branch.autoSetupMerge\n"
"in 'git help config'.\n");
die(_("The upstream branch of your current branch does not match\n"
@@ -466,8 +466,16 @@ static int option_parse_recurse_submodules(const struct option *opt,
if (unset)
*recurse_submodules = RECURSE_SUBMODULES_OFF;
- else
- *recurse_submodules = parse_push_recurse_submodules_arg(opt->long_name, arg);
+ else {
+ if (!strcmp(arg, "only-is-on-demand")) {
+ if (*recurse_submodules == RECURSE_SUBMODULES_ONLY) {
+ warning(_("recursing into submodule with push.recurseSubmodules=only; using on-demand instead"));
+ *recurse_submodules = RECURSE_SUBMODULES_ON_DEMAND;
+ }
+ } else {
+ *recurse_submodules = parse_push_recurse_submodules_arg(opt->long_name, arg);
+ }
+ }
return 0;
}
diff --git a/builtin/read-tree.c b/builtin/read-tree.c
index 9f1f33e954..45c6652444 100644
--- a/builtin/read-tree.c
+++ b/builtin/read-tree.c
@@ -38,7 +38,9 @@ static int list_tree(struct object_id *oid)
}
static const char * const read_tree_usage[] = {
- N_("git read-tree [(-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>) [-u | -i]] [--no-sparse-checkout] [--index-output=<file>] (--empty | <tree-ish1> [<tree-ish2> [<tree-ish3>]])"),
+ N_("git read-tree [(-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>)\n"
+ " [-u | -i]] [--index-output=<file>] [--no-sparse-checkout]\n"
+ " (--empty | <tree-ish1> [<tree-ish2> [<tree-ish3>]])"),
NULL
};
@@ -247,6 +249,10 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
if (opts.debug_unpack)
opts.fn = debug_merge;
+ /* If we're going to prime_cache_tree later, skip cache tree update */
+ if (nr_trees == 1 && !opts.prefix)
+ opts.skip_cache_tree_update = 1;
+
cache_tree_free(&active_cache_tree);
for (i = 0; i < nr_trees; i++) {
struct tree *tree = trees[i];
diff --git a/builtin/rebase.c b/builtin/rebase.c
index 56e4214b44..3f360eb2f3 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -30,8 +30,6 @@
#include "reset.h"
#include "hook.h"
-#define DEFAULT_REFLOG_ACTION "rebase"
-
static char const * const builtin_rebase_usage[] = {
N_("git rebase [-i] [options] [--exec <cmd>] "
"[--onto <newbase> | --keep-base] [<upstream> [<branch>]]"),
@@ -59,6 +57,26 @@ enum empty_type {
EMPTY_ASK
};
+enum action {
+ ACTION_NONE = 0,
+ ACTION_CONTINUE,
+ ACTION_SKIP,
+ ACTION_ABORT,
+ ACTION_QUIT,
+ ACTION_EDIT_TODO,
+ ACTION_SHOW_CURRENT_PATCH
+};
+
+static const char *action_names[] = {
+ "undefined",
+ "continue",
+ "skip",
+ "abort",
+ "quit",
+ "edit_todo",
+ "show_current_patch"
+};
+
struct rebase_options {
enum rebase_type type;
enum empty_type empty;
@@ -68,7 +86,7 @@ struct rebase_options {
const char *upstream_name;
const char *upstream_arg;
char *head_name;
- struct object_id orig_head;
+ struct commit *orig_head;
struct commit *onto;
const char *onto_name;
const char *revisions;
@@ -85,7 +103,8 @@ struct rebase_options {
REBASE_INTERACTIVE_EXPLICIT = 1<<4,
} flags;
struct strvec git_am_opts;
- const char *action;
+ enum action action;
+ char *reflog_action;
int signoff;
int allow_rerere_autoupdate;
int keep_empty;
@@ -139,6 +158,7 @@ static struct replay_opts get_replay_opts(const struct rebase_options *opts)
opts->committer_date_is_author_date;
replay.ignore_date = opts->ignore_date;
replay.gpg_sign = xstrdup_or_null(opts->gpg_sign_opt);
+ replay.reflog_action = xstrdup(opts->reflog_action);
if (opts->strategy)
replay.strategy = xstrdup_or_null(opts->strategy);
else if (!replay.strategy && replay.default_strategy) {
@@ -157,24 +177,6 @@ static struct replay_opts get_replay_opts(const struct rebase_options *opts)
return replay;
}
-enum action {
- ACTION_NONE = 0,
- ACTION_CONTINUE,
- ACTION_SKIP,
- ACTION_ABORT,
- ACTION_QUIT,
- ACTION_EDIT_TODO,
- ACTION_SHOW_CURRENT_PATCH
-};
-
-static const char *action_names[] = { "undefined",
- "continue",
- "skip",
- "abort",
- "quit",
- "edit_todo",
- "show_current_patch" };
-
static int edit_todo_file(unsigned flags)
{
const char *todo_file = rebase_path_todo();
@@ -261,13 +263,13 @@ static int do_interactive_rebase(struct rebase_options *opts, unsigned flags)
struct replay_opts replay = get_replay_opts(opts);
struct string_list commands = STRING_LIST_INIT_DUP;
- if (get_revision_ranges(opts->upstream, opts->onto, &opts->orig_head,
+ if (get_revision_ranges(opts->upstream, opts->onto, &opts->orig_head->object.oid,
&revisions, &shortrevisions))
return -1;
if (init_basic_state(&replay,
opts->head_name ? opts->head_name : "detached HEAD",
- opts->onto, &opts->orig_head)) {
+ opts->onto, &opts->orig_head->object.oid)) {
free(revisions);
free(shortrevisions);
@@ -298,9 +300,8 @@ static int do_interactive_rebase(struct rebase_options *opts, unsigned flags)
split_exec_commands(opts->cmd, &commands);
ret = complete_action(the_repository, &replay, flags,
shortrevisions, opts->onto_name, opts->onto,
- &opts->orig_head, &commands, opts->autosquash,
- opts->update_refs,
- &todo_list);
+ &opts->orig_head->object.oid, &commands,
+ opts->autosquash, opts->update_refs, &todo_list);
}
string_list_clear(&commands, 0);
@@ -312,8 +313,7 @@ static int do_interactive_rebase(struct rebase_options *opts, unsigned flags)
return ret;
}
-static int run_sequencer_rebase(struct rebase_options *opts,
- enum action command)
+static int run_sequencer_rebase(struct rebase_options *opts)
{
unsigned flags = 0;
int abbreviate_commands = 0, ret = 0;
@@ -328,7 +328,7 @@ static int run_sequencer_rebase(struct rebase_options *opts,
flags |= opts->reapply_cherry_picks ? TODO_LIST_REAPPLY_CHERRY_PICKS : 0;
flags |= opts->flags & REBASE_NO_QUIET ? TODO_LIST_WARN_SKIPPED_CHERRY_PICKS : 0;
- switch (command) {
+ switch (opts->action) {
case ACTION_NONE: {
if (!opts->onto && !opts->upstream)
die(_("a base commit must be provided with --upstream or --onto"));
@@ -361,7 +361,7 @@ static int run_sequencer_rebase(struct rebase_options *opts,
break;
}
default:
- BUG("invalid command '%d'", command);
+ BUG("invalid command '%d'", opts->action);
}
return ret;
@@ -431,9 +431,9 @@ static int read_basic_state(struct rebase_options *opts)
opts->head_name = starts_with(head_name.buf, "refs/") ?
xstrdup(head_name.buf) : NULL;
strbuf_release(&head_name);
- if (get_oid(buf.buf, &oid))
- return error(_("could not get 'onto': '%s'"), buf.buf);
- opts->onto = lookup_commit_or_die(&oid, buf.buf);
+ if (get_oid_hex(buf.buf, &oid) ||
+ !(opts->onto = lookup_commit_object(the_repository, &oid)))
+ return error(_("invalid onto: '%s'"), buf.buf);
/*
* We always write to orig-head, but interactive rebase used to write to
@@ -448,7 +448,8 @@ static int read_basic_state(struct rebase_options *opts)
} else if (!read_oneliner(&buf, state_dir_path("head", opts),
READ_ONELINER_WARN_MISSING))
return -1;
- if (get_oid(buf.buf, &opts->orig_head))
+ if (get_oid_hex(buf.buf, &oid) ||
+ !(opts->orig_head = lookup_commit_object(the_repository, &oid)))
return error(_("invalid orig-head: '%s'"), buf.buf);
if (file_exists(state_dir_path("quiet", opts)))
@@ -517,7 +518,7 @@ static int rebase_write_basic_state(struct rebase_options *opts)
write_file(state_dir_path("onto", opts), "%s",
opts->onto ? oid_to_hex(&opts->onto->object.oid) : "");
write_file(state_dir_path("orig-head", opts), "%s",
- oid_to_hex(&opts->orig_head));
+ oid_to_hex(&opts->orig_head->object.oid));
if (!(opts->flags & REBASE_NO_QUIET))
write_file(state_dir_path("quiet", opts), "%s", "");
if (opts->flags & REBASE_VERBOSE)
@@ -583,10 +584,11 @@ static int move_to_original_branch(struct rebase_options *opts)
if (!opts->onto)
BUG("move_to_original_branch without onto");
- strbuf_addf(&branch_reflog, "rebase finished: %s onto %s",
+ strbuf_addf(&branch_reflog, "%s (finish): %s onto %s",
+ opts->reflog_action,
opts->head_name, oid_to_hex(&opts->onto->object.oid));
- strbuf_addf(&head_reflog, "rebase finished: returning to %s",
- opts->head_name);
+ strbuf_addf(&head_reflog, "%s (finish): returning to %s",
+ opts->reflog_action, opts->head_name);
ropts.branch = opts->head_name;
ropts.flags = RESET_HEAD_REFS_ONLY;
ropts.branch_msg = branch_reflog.buf;
@@ -615,8 +617,9 @@ static int run_am(struct rebase_options *opts)
am.git_cmd = 1;
strvec_push(&am.args, "am");
-
- if (opts->action && !strcmp("continue", opts->action)) {
+ strvec_pushf(&am.env, GIT_REFLOG_ACTION_ENVIRONMENT "=%s (pick)",
+ opts->reflog_action);
+ if (opts->action == ACTION_CONTINUE) {
strvec_push(&am.args, "--resolved");
strvec_pushf(&am.args, "--resolvemsg=%s", resolvemsg);
if (opts->gpg_sign_opt)
@@ -627,7 +630,7 @@ static int run_am(struct rebase_options *opts)
return move_to_original_branch(opts);
}
- if (opts->action && !strcmp("skip", opts->action)) {
+ if (opts->action == ACTION_SKIP) {
strvec_push(&am.args, "--skip");
strvec_pushf(&am.args, "--resolvemsg=%s", resolvemsg);
status = run_command(&am);
@@ -636,7 +639,7 @@ static int run_am(struct rebase_options *opts)
return move_to_original_branch(opts);
}
- if (opts->action && !strcmp("show-current-patch", opts->action)) {
+ if (opts->action == ACTION_SHOW_CURRENT_PATCH) {
strvec_push(&am.args, "--show-current-patch");
return run_command(&am);
}
@@ -646,7 +649,7 @@ static int run_am(struct rebase_options *opts)
/* this is now equivalent to !opts->upstream */
&opts->onto->object.oid :
&opts->upstream->object.oid),
- oid_to_hex(&opts->orig_head));
+ oid_to_hex(&opts->orig_head->object.oid));
rebased_patches = xstrdup(git_path("rebased-patches"));
format_patch.out = open(rebased_patches,
@@ -680,9 +683,9 @@ static int run_am(struct rebase_options *opts)
free(rebased_patches);
strvec_clear(&am.args);
- ropts.oid = &opts->orig_head;
+ ropts.oid = &opts->orig_head->object.oid;
ropts.branch = opts->head_name;
- ropts.default_reflog_action = DEFAULT_REFLOG_ACTION;
+ ropts.default_reflog_action = opts->reflog_action;
reset_head(the_repository, &ropts);
error(_("\ngit encountered an error while preparing the "
"patches to replay\n"
@@ -729,7 +732,7 @@ static int run_am(struct rebase_options *opts)
return status;
}
-static int run_specific_rebase(struct rebase_options *opts, enum action action)
+static int run_specific_rebase(struct rebase_options *opts)
{
int status;
@@ -747,7 +750,7 @@ static int run_specific_rebase(struct rebase_options *opts, enum action action)
opts->gpg_sign_opt = tmp;
}
- status = run_sequencer_rebase(opts, action);
+ status = run_sequencer_rebase(opts);
} else if (opts->type == REBASE_APPLY)
status = run_am(opts);
else
@@ -831,9 +834,8 @@ static int checkout_up_to_date(struct rebase_options *options)
int ret = 0;
strbuf_addf(&buf, "%s: checkout %s",
- getenv(GIT_REFLOG_ACTION_ENVIRONMENT),
- options->switch_to);
- ropts.oid = &options->orig_head;
+ options->reflog_action, options->switch_to);
+ ropts.oid = &options->orig_head->object.oid;
ropts.branch = options->head_name;
ropts.flags = RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
if (!ropts.branch)
@@ -866,32 +868,23 @@ static int is_linear_history(struct commit *from, struct commit *to)
static int can_fast_forward(struct commit *onto, struct commit *upstream,
struct commit *restrict_revision,
- struct object_id *head_oid, struct object_id *merge_base)
+ struct commit *head, struct object_id *branch_base)
{
- struct commit *head = lookup_commit(the_repository, head_oid);
struct commit_list *merge_bases = NULL;
int res = 0;
- if (!head)
- goto done;
+ if (is_null_oid(branch_base))
+ goto done; /* fill_branch_base() found multiple merge bases */
- merge_bases = get_merge_bases(onto, head);
- if (!merge_bases || merge_bases->next) {
- oidcpy(merge_base, null_oid());
+ if (!oideq(branch_base, &onto->object.oid))
goto done;
- }
- oidcpy(merge_base, &merge_bases->item->object.oid);
- if (!oideq(merge_base, &onto->object.oid))
- goto done;
-
- if (restrict_revision && !oideq(&restrict_revision->object.oid, merge_base))
+ if (restrict_revision && !oideq(&restrict_revision->object.oid, branch_base))
goto done;
if (!upstream)
goto done;
- free_commit_list(merge_bases);
merge_bases = get_merge_bases(upstream, head);
if (!merge_bases || merge_bases->next)
goto done;
@@ -906,6 +899,20 @@ done:
return res && is_linear_history(onto, head);
}
+static void fill_branch_base(struct rebase_options *options,
+ struct object_id *branch_base)
+{
+ struct commit_list *merge_bases = NULL;
+
+ merge_bases = get_merge_bases(options->onto, options->orig_head);
+ if (!merge_bases || merge_bases->next)
+ oidcpy(branch_base, null_oid());
+ else
+ oidcpy(branch_base, &merge_bases->item->object.oid);
+
+ free_commit_list(merge_bases);
+}
+
static int parse_opt_am(const struct option *opt, const char *arg, int unset)
{
struct rebase_options *opts = opt->value;
@@ -1000,23 +1007,6 @@ static void NORETURN error_on_missing_default_upstream(void)
exit(1);
}
-static void set_reflog_action(struct rebase_options *options)
-{
- const char *env;
- struct strbuf buf = STRBUF_INIT;
-
- if (!is_merge(options))
- return;
-
- env = getenv(GIT_REFLOG_ACTION_ENVIRONMENT);
- if (env && strcmp("rebase", env))
- return; /* only override it if it is "rebase" */
-
- strbuf_addf(&buf, "rebase (%s)", options->action);
- setenv(GIT_REFLOG_ACTION_ENVIRONMENT, buf.buf, 1);
- strbuf_release(&buf);
-}
-
static int check_exec_cmd(const char *cmd)
{
if (strchr(cmd, '\n'))
@@ -1039,9 +1029,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
struct strbuf msg = STRBUF_INIT;
struct strbuf revisions = STRBUF_INIT;
struct strbuf buf = STRBUF_INIT;
- struct object_id merge_base;
+ struct object_id branch_base;
int ignore_whitespace = 0;
- enum action action = ACTION_NONE;
const char *gpg_sign = NULL;
struct string_list exec = STRING_LIST_INIT_NODUP;
const char *rebase_merges = NULL;
@@ -1090,18 +1079,18 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
OPT_BIT(0, "no-ff", &options.flags,
N_("cherry-pick all commits, even if unchanged"),
REBASE_FORCE),
- OPT_CMDMODE(0, "continue", &action, N_("continue"),
+ OPT_CMDMODE(0, "continue", &options.action, N_("continue"),
ACTION_CONTINUE),
- OPT_CMDMODE(0, "skip", &action,
+ OPT_CMDMODE(0, "skip", &options.action,
N_("skip current patch and continue"), ACTION_SKIP),
- OPT_CMDMODE(0, "abort", &action,
+ OPT_CMDMODE(0, "abort", &options.action,
N_("abort and check out the original branch"),
ACTION_ABORT),
- OPT_CMDMODE(0, "quit", &action,
+ OPT_CMDMODE(0, "quit", &options.action,
N_("abort but keep HEAD where it is"), ACTION_QUIT),
- OPT_CMDMODE(0, "edit-todo", &action, N_("edit the todo list "
+ OPT_CMDMODE(0, "edit-todo", &options.action, N_("edit the todo list "
"during an interactive rebase"), ACTION_EDIT_TODO),
- OPT_CMDMODE(0, "show-current-patch", &action,
+ OPT_CMDMODE(0, "show-current-patch", &options.action,
N_("show the patch file being applied or merged"),
ACTION_SHOW_CURRENT_PATCH),
OPT_CALLBACK_F(0, "apply", &options, NULL,
@@ -1175,6 +1164,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
prepare_repo_settings(the_repository);
the_repository->settings.command_requires_full_index = 0;
+ options.reapply_cherry_picks = -1;
options.allow_empty_message = 1;
git_config(rebase_config, &options);
/* options.gpg_sign_opt will be either "-S" or NULL */
@@ -1192,7 +1182,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
} else if (is_directory(merge_dir())) {
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/rewritten", merge_dir());
- if (!(action == ACTION_ABORT) && is_directory(buf.buf)) {
+ if (!(options.action == ACTION_ABORT) && is_directory(buf.buf)) {
die(_("`rebase --preserve-merges` (-p) is no longer supported.\n"
"Use `git rebase --abort` to terminate current rebase.\n"
"Or downgrade to v2.33, or earlier, to complete the rebase."));
@@ -1219,7 +1209,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
"Note: Your `pull.rebase` configuration may also be set to 'preserve',\n"
"which is no longer supported; use 'merges' instead"));
- if (action != ACTION_NONE && total_argc != 2) {
+ if (options.action != ACTION_NONE && total_argc != 2) {
usage_with_options(builtin_rebase_usage,
builtin_rebase_options);
}
@@ -1233,16 +1223,27 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
die(_("options '%s' and '%s' cannot be used together"), "--keep-base", "--onto");
if (options.root)
die(_("options '%s' and '%s' cannot be used together"), "--keep-base", "--root");
+ /*
+ * --keep-base defaults to --no-fork-point to keep the
+ * base the same.
+ */
+ if (options.fork_point < 0)
+ options.fork_point = 0;
}
+ /*
+ * --keep-base defaults to --reapply-cherry-picks to avoid losing
+ * commits when using this option.
+ */
+ if (options.reapply_cherry_picks < 0)
+ options.reapply_cherry_picks = keep_base;
if (options.root && options.fork_point > 0)
die(_("options '%s' and '%s' cannot be used together"), "--root", "--fork-point");
- if (action != ACTION_NONE && !in_progress)
+ if (options.action != ACTION_NONE && !in_progress)
die(_("No rebase in progress?"));
- setenv(GIT_REFLOG_ACTION_ENVIRONMENT, "rebase", 0);
- if (action == ACTION_EDIT_TODO && !is_merge(&options))
+ if (options.action == ACTION_EDIT_TODO && !is_merge(&options))
die(_("The --edit-todo action can only be used during "
"interactive rebase."));
@@ -1252,18 +1253,19 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
else if (exec.nr)
trace2_cmd_mode("interactive-exec");
else
- trace2_cmd_mode(action_names[action]);
+ trace2_cmd_mode(action_names[options.action]);
}
- switch (action) {
+ options.reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT);
+ options.reflog_action =
+ xstrdup(options.reflog_action ? options.reflog_action : "rebase");
+
+ switch (options.action) {
case ACTION_CONTINUE: {
struct object_id head;
struct lock_file lock_file = LOCK_INIT;
int fd;
- options.action = "continue";
- set_reflog_action(&options);
-
/* Sanity check */
if (get_oid("HEAD", &head))
die(_("Cannot read HEAD"));
@@ -1289,9 +1291,6 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
case ACTION_SKIP: {
struct string_list merge_rr = STRING_LIST_INIT_DUP;
- options.action = "skip";
- set_reflog_action(&options);
-
rerere_clear(the_repository, &merge_rr);
string_list_clear(&merge_rr, 1);
ropts.flags = RESET_HEAD_HARD;
@@ -1304,21 +1303,26 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
}
case ACTION_ABORT: {
struct string_list merge_rr = STRING_LIST_INIT_DUP;
- options.action = "abort";
- set_reflog_action(&options);
+ struct strbuf head_msg = STRBUF_INIT;
rerere_clear(the_repository, &merge_rr);
string_list_clear(&merge_rr, 1);
if (read_basic_state(&options))
exit(1);
- ropts.oid = &options.orig_head;
+
+ strbuf_addf(&head_msg, "%s (abort): returning to %s",
+ options.reflog_action,
+ options.head_name ? options.head_name
+ : oid_to_hex(&options.orig_head->object.oid));
+ ropts.oid = &options.orig_head->object.oid;
+ ropts.head_msg = head_msg.buf;
ropts.branch = options.head_name;
ropts.flags = RESET_HEAD_HARD;
- ropts.default_reflog_action = DEFAULT_REFLOG_ACTION;
if (reset_head(the_repository, &ropts) < 0)
die(_("could not move back to %s"),
- oid_to_hex(&options.orig_head));
+ oid_to_hex(&options.orig_head->object.oid));
+ strbuf_release(&head_msg);
remove_branch_state(the_repository, 0);
ret = finish_rebase(&options);
goto cleanup;
@@ -1341,17 +1345,15 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
goto cleanup;
}
case ACTION_EDIT_TODO:
- options.action = "edit-todo";
options.dont_finish_rebase = 1;
goto run_rebase;
case ACTION_SHOW_CURRENT_PATCH:
- options.action = "show-current-patch";
options.dont_finish_rebase = 1;
goto run_rebase;
case ACTION_NONE:
break;
default:
- BUG("action: %d", action);
+ BUG("action: %d", options.action);
}
/* Make sure no rebase is in progress */
@@ -1375,7 +1377,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
}
if ((options.flags & REBASE_INTERACTIVE_EXPLICIT) ||
- (action != ACTION_NONE) ||
+ (options.action != ACTION_NONE) ||
(exec.nr > 0) ||
options.autosquash) {
allow_preemptive_ff = 0;
@@ -1410,7 +1412,11 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
if (options.empty != EMPTY_UNSPECIFIED)
imply_merge(&options, "--empty");
- if (options.reapply_cherry_picks)
+ /*
+ * --keep-base implements --reapply-cherry-picks by altering upstream so
+ * it works with both backends.
+ */
+ if (options.reapply_cherry_picks && !keep_base)
imply_merge(&options, "--reapply-cherry-picks");
if (gpg_sign)
@@ -1604,25 +1610,27 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
*/
if (argc == 1) {
/* Is it "rebase other branchname" or "rebase other commit"? */
+ struct object_id branch_oid;
branch_name = argv[0];
options.switch_to = argv[0];
/* Is it a local branch? */
strbuf_reset(&buf);
strbuf_addf(&buf, "refs/heads/%s", branch_name);
- if (!read_ref(buf.buf, &options.orig_head)) {
+ if (!read_ref(buf.buf, &branch_oid)) {
die_if_checked_out(buf.buf, 1);
options.head_name = xstrdup(buf.buf);
+ options.orig_head =
+ lookup_commit_object(the_repository,
+ &branch_oid);
/* If not is it a valid ref (branch or commit)? */
} else {
- struct commit *commit =
+ options.orig_head =
lookup_commit_reference_by_name(branch_name);
- if (!commit)
- die(_("no such branch/commit '%s'"),
- branch_name);
- oidcpy(&options.orig_head, &commit->object.oid);
options.head_name = NULL;
}
+ if (!options.orig_head)
+ die(_("no such branch/commit '%s'"), branch_name);
} else if (argc == 0) {
/* Do not need to switch branches, we are already on it. */
options.head_name =
@@ -1639,8 +1647,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
FREE_AND_NULL(options.head_name);
branch_name = "HEAD";
}
- if (get_oid("HEAD", &options.orig_head))
- die(_("Could not resolve HEAD to a revision"));
+ options.orig_head = lookup_commit_reference_by_name("HEAD");
+ if (!options.orig_head)
+ die(_("Could not resolve HEAD to a commit"));
} else
BUG("unexpected number of arguments left to parse");
@@ -1654,7 +1663,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
} else if (!options.onto_name)
options.onto_name = options.upstream_name;
if (strstr(options.onto_name, "...")) {
- if (get_oid_mb(options.onto_name, &merge_base) < 0) {
+ if (get_oid_mb(options.onto_name, &branch_base) < 0) {
if (keep_base)
die(_("'%s': need exactly one merge base with branch"),
options.upstream_name);
@@ -1662,7 +1671,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
die(_("'%s': need exactly one merge base"),
options.onto_name);
}
- options.onto = lookup_commit_or_die(&merge_base,
+ options.onto = lookup_commit_or_die(&branch_base,
options.onto_name);
} else {
options.onto =
@@ -1670,15 +1679,15 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
if (!options.onto)
die(_("Does not point to a valid commit '%s'"),
options.onto_name);
+ fill_branch_base(&options, &branch_base);
}
- if (options.fork_point > 0) {
- struct commit *head =
- lookup_commit_reference(the_repository,
- &options.orig_head);
+ if (keep_base && options.reapply_cherry_picks)
+ options.upstream = options.onto;
+
+ if (options.fork_point > 0)
options.restrict_revision =
- get_fork_point(options.upstream_name, head);
- }
+ get_fork_point(options.upstream_name, options.orig_head);
if (repo_read_index(the_repository) < 0)
die(_("could not read index"));
@@ -1703,13 +1712,10 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
* Check if we are already based on onto with linear history,
* in which case we could fast-forward without replacing the commits
* with new commits recreated by replaying their changes.
- *
- * Note that can_fast_forward() initializes merge_base, so we have to
- * call it before checking allow_preemptive_ff.
*/
- if (can_fast_forward(options.onto, options.upstream, options.restrict_revision,
- &options.orig_head, &merge_base) &&
- allow_preemptive_ff) {
+ if (allow_preemptive_ff &&
+ can_fast_forward(options.onto, options.upstream, options.restrict_revision,
+ options.orig_head, &branch_base)) {
int flag;
if (!(options.flags & REBASE_FORCE)) {
@@ -1750,12 +1756,12 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
struct diff_options opts;
if (options.flags & REBASE_VERBOSE) {
- if (is_null_oid(&merge_base))
+ if (is_null_oid(&branch_base))
printf(_("Changes to %s:\n"),
oid_to_hex(&options.onto->object.oid));
else
printf(_("Changes from %s to %s:\n"),
- oid_to_hex(&merge_base),
+ oid_to_hex(&branch_base),
oid_to_hex(&options.onto->object.oid));
}
@@ -1767,8 +1773,8 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
opts.detect_rename = DIFF_DETECT_RENAME;
diff_setup_done(&opts);
- diff_tree_oid(is_null_oid(&merge_base) ?
- the_hash_algo->empty_tree : &merge_base,
+ diff_tree_oid(is_null_oid(&branch_base) ?
+ the_hash_algo->empty_tree : &branch_base,
&options.onto->object.oid, "", &opts);
diffcore_std(&opts);
diff_flush(&opts);
@@ -1782,14 +1788,14 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
printf(_("First, rewinding head to replay your work on top of "
"it...\n"));
- strbuf_addf(&msg, "%s: checkout %s",
- getenv(GIT_REFLOG_ACTION_ENVIRONMENT), options.onto_name);
+ strbuf_addf(&msg, "%s (start): checkout %s",
+ options.reflog_action, options.onto_name);
ropts.oid = &options.onto->object.oid;
- ropts.orig_head = &options.orig_head,
+ ropts.orig_head = &options.orig_head->object.oid,
ropts.flags = RESET_HEAD_DETACH | RESET_ORIG_HEAD |
RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
ropts.head_msg = msg.buf;
- ropts.default_reflog_action = DEFAULT_REFLOG_ACTION;
+ ropts.default_reflog_action = options.reflog_action;
if (reset_head(the_repository, &ropts))
die(_("Could not detach HEAD"));
strbuf_release(&msg);
@@ -1798,19 +1804,10 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
* If the onto is a proper descendant of the tip of the branch, then
* we just fast-forwarded.
*/
- strbuf_reset(&msg);
- if (oideq(&merge_base, &options.orig_head)) {
+ if (oideq(&branch_base, &options.orig_head->object.oid)) {
printf(_("Fast-forwarded %s to %s.\n"),
branch_name, options.onto_name);
- strbuf_addf(&msg, "rebase finished: %s onto %s",
- options.head_name ? options.head_name : "detached HEAD",
- oid_to_hex(&options.onto->object.oid));
- memset(&ropts, 0, sizeof(ropts));
- ropts.branch = options.head_name;
- ropts.flags = RESET_HEAD_REFS_ONLY;
- ropts.head_msg = msg.buf;
- reset_head(the_repository, &ropts);
- strbuf_release(&msg);
+ move_to_original_branch(&options);
ret = finish_rebase(&options);
goto cleanup;
}
@@ -1820,21 +1817,25 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
(options.restrict_revision ?
oid_to_hex(&options.restrict_revision->object.oid) :
oid_to_hex(&options.upstream->object.oid)),
- oid_to_hex(&options.orig_head));
+ oid_to_hex(&options.orig_head->object.oid));
options.revisions = revisions.buf;
run_rebase:
- ret = run_specific_rebase(&options, action);
+ ret = run_specific_rebase(&options);
cleanup:
strbuf_release(&buf);
strbuf_release(&revisions);
+ free(options.reflog_action);
free(options.head_name);
+ strvec_clear(&options.git_am_opts);
free(options.gpg_sign_opt);
free(options.cmd);
free(options.strategy);
strbuf_release(&options.git_format_patch_opt);
free(squash_onto_name);
+ string_list_clear(&exec, 0);
+ string_list_clear(&strategy_options, 0);
return !!ret;
}
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index 44bcea3a5b..a90af30363 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -80,6 +80,7 @@ static struct object_id push_cert_oid;
static struct signature_check sigcheck;
static const char *push_cert_nonce;
static const char *cert_nonce_seed;
+static struct string_list hidden_refs = STRING_LIST_INIT_DUP;
static const char *NONCE_UNSOLICITED = "UNSOLICITED";
static const char *NONCE_BAD = "BAD";
@@ -130,7 +131,7 @@ static enum deny_action parse_deny_action(const char *var, const char *value)
static int receive_pack_config(const char *var, const char *value, void *cb)
{
- int status = parse_hide_refs_config(var, value, "receive");
+ int status = parse_hide_refs_config(var, value, "receive", &hidden_refs);
if (status)
return status;
@@ -296,7 +297,7 @@ static int show_ref_cb(const char *path_full, const struct object_id *oid,
struct oidset *seen = data;
const char *path = strip_namespace(path_full);
- if (ref_is_hidden(path, path_full))
+ if (ref_is_hidden(path, path_full, &hidden_refs))
return 0;
/*
@@ -1794,7 +1795,7 @@ static void reject_updates_to_hidden(struct command *commands)
strbuf_setlen(&refname_full, prefix_len);
strbuf_addstr(&refname_full, cmd->ref_name);
- if (!ref_is_hidden(cmd->ref_name, refname_full.buf))
+ if (!ref_is_hidden(cmd->ref_name, refname_full.buf, &hidden_refs))
continue;
if (is_null_oid(&cmd->new_oid))
cmd->error_string = "deny deleting a hidden ref";
@@ -1928,6 +1929,8 @@ static void execute_commands(struct command *commands,
opt.err_fd = err_fd;
opt.progress = err_fd && !quiet;
opt.env = tmp_objdir_env(tmp_objdir);
+ opt.exclude_hidden_refs_section = "receive";
+
if (check_connected(iterate_receive_command_list, &data, &opt))
set_connectivity_errors(commands, si);
@@ -2591,6 +2594,7 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
packet_flush(1);
oid_array_clear(&shallow);
oid_array_clear(&ref);
+ string_list_clear(&hidden_refs, 0);
free((void *)push_cert_nonce);
return 0;
}
diff --git a/builtin/reflog.c b/builtin/reflog.c
index 57c5c0d061..270681dcdf 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -67,7 +67,8 @@ static int collect_reflog(const char *ref, const struct object_id *oid UNUSED,
* Avoid collecting the same shared ref multiple times because
* they are available via all worktrees.
*/
- if (!worktree->is_current && ref_type(ref) == REF_TYPE_NORMAL)
+ if (!worktree->is_current &&
+ parse_worktree_ref(ref, NULL, NULL, NULL) == REF_WORKTREE_SHARED)
return 0;
strbuf_worktree_ref(worktree, &newref, ref);
diff --git a/builtin/remote.c b/builtin/remote.c
index 985b845a18..729f6f3643 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -92,13 +92,15 @@ static int verbose;
static int fetch_remote(const char *name)
{
- const char *argv[] = { "fetch", name, NULL, NULL };
- if (verbose) {
- argv[1] = "-v";
- argv[2] = name;
- }
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_push(&cmd.args, "fetch");
+ if (verbose)
+ strvec_push(&cmd.args, "-v");
+ strvec_push(&cmd.args, name);
+ cmd.git_cmd = 1;
printf_ln(_("Updating %s"), name);
- if (run_command_v_opt(argv, RUN_GIT_CMD))
+ if (run_command(&cmd))
return error(_("Could not fetch %s"), name);
return 0;
}
@@ -733,29 +735,31 @@ static int mv(int argc, const char **argv, const char *prefix)
return error(_("Could not rename config section '%s' to '%s'"),
buf.buf, buf2.buf);
- strbuf_reset(&buf);
- strbuf_addf(&buf, "remote.%s.fetch", rename.new_name);
- git_config_set_multivar(buf.buf, NULL, NULL, CONFIG_FLAGS_MULTI_REPLACE);
- strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old_name);
- for (i = 0; i < oldremote->fetch.raw_nr; i++) {
- char *ptr;
-
- strbuf_reset(&buf2);
- strbuf_addstr(&buf2, oldremote->fetch.raw[i]);
- ptr = strstr(buf2.buf, old_remote_context.buf);
- if (ptr) {
- refspec_updated = 1;
- strbuf_splice(&buf2,
- ptr-buf2.buf + strlen(":refs/remotes/"),
- strlen(rename.old_name), rename.new_name,
- strlen(rename.new_name));
- } else
- warning(_("Not updating non-default fetch refspec\n"
- "\t%s\n"
- "\tPlease update the configuration manually if necessary."),
- buf2.buf);
-
- git_config_set_multivar(buf.buf, buf2.buf, "^$", 0);
+ if (oldremote->fetch.raw_nr) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "remote.%s.fetch", rename.new_name);
+ git_config_set_multivar(buf.buf, NULL, NULL, CONFIG_FLAGS_MULTI_REPLACE);
+ strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old_name);
+ for (i = 0; i < oldremote->fetch.raw_nr; i++) {
+ char *ptr;
+
+ strbuf_reset(&buf2);
+ strbuf_addstr(&buf2, oldremote->fetch.raw[i]);
+ ptr = strstr(buf2.buf, old_remote_context.buf);
+ if (ptr) {
+ refspec_updated = 1;
+ strbuf_splice(&buf2,
+ ptr-buf2.buf + strlen(":refs/remotes/"),
+ strlen(rename.old_name), rename.new_name,
+ strlen(rename.new_name));
+ } else
+ warning(_("Not updating non-default fetch refspec\n"
+ "\t%s\n"
+ "\tPlease update the configuration manually if necessary."),
+ buf2.buf);
+
+ git_config_set_multivar(buf.buf, buf2.buf, "^$", 0);
+ }
}
read_branches();
@@ -940,7 +944,7 @@ static int rm(int argc, const char **argv, const char *prefix)
return result;
}
-static void clear_push_info(void *util, const char *string)
+static void clear_push_info(void *util, const char *string UNUSED)
{
struct push_info *info = util;
free(info->dest);
@@ -1506,37 +1510,35 @@ static int update(int argc, const char **argv, const char *prefix)
N_("prune remotes after fetching")),
OPT_END()
};
- struct strvec fetch_argv = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
int default_defined = 0;
- int retval;
argc = parse_options(argc, argv, prefix, options,
builtin_remote_update_usage,
PARSE_OPT_KEEP_ARGV0);
- strvec_push(&fetch_argv, "fetch");
+ strvec_push(&cmd.args, "fetch");
if (prune != -1)
- strvec_push(&fetch_argv, prune ? "--prune" : "--no-prune");
+ strvec_push(&cmd.args, prune ? "--prune" : "--no-prune");
if (verbose)
- strvec_push(&fetch_argv, "-v");
- strvec_push(&fetch_argv, "--multiple");
+ strvec_push(&cmd.args, "-v");
+ strvec_push(&cmd.args, "--multiple");
if (argc < 2)
- strvec_push(&fetch_argv, "default");
+ strvec_push(&cmd.args, "default");
for (i = 1; i < argc; i++)
- strvec_push(&fetch_argv, argv[i]);
+ strvec_push(&cmd.args, argv[i]);
- if (strcmp(fetch_argv.v[fetch_argv.nr-1], "default") == 0) {
+ if (strcmp(cmd.args.v[cmd.args.nr-1], "default") == 0) {
git_config(get_remote_default, &default_defined);
if (!default_defined) {
- strvec_pop(&fetch_argv);
- strvec_push(&fetch_argv, "--all");
+ strvec_pop(&cmd.args);
+ strvec_push(&cmd.args, "--all");
}
}
- retval = run_command_v_opt(fetch_argv.v, RUN_GIT_CMD);
- strvec_clear(&fetch_argv);
- return retval;
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
}
static int remove_all_fetch_refspecs(const char *key)
diff --git a/builtin/repack.c b/builtin/repack.c
index a5bacc7797..c1402ad038 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -32,7 +32,6 @@ static int write_bitmaps = -1;
static int use_delta_islands;
static int run_update_server_info = 1;
static char *packdir, *packtmp_name, *packtmp;
-static char *cruft_expiration;
static const char *const git_repack_usage[] = {
N_("git repack [<options>]"),
@@ -92,44 +91,6 @@ static int repack_config(const char *var, const char *value, void *cb)
}
/*
- * Remove temporary $GIT_OBJECT_DIRECTORY/pack/.tmp-$$-pack-* files.
- */
-static void remove_temporary_files(void)
-{
- struct strbuf buf = STRBUF_INIT;
- size_t dirlen, prefixlen;
- DIR *dir;
- struct dirent *e;
-
- dir = opendir(packdir);
- if (!dir)
- return;
-
- /* Point at the slash at the end of ".../objects/pack/" */
- dirlen = strlen(packdir) + 1;
- strbuf_addstr(&buf, packtmp);
- /* Hold the length of ".tmp-%d-pack-" */
- prefixlen = buf.len - dirlen;
-
- while ((e = readdir(dir))) {
- if (strncmp(e->d_name, buf.buf + dirlen, prefixlen))
- continue;
- strbuf_setlen(&buf, dirlen);
- strbuf_addstr(&buf, e->d_name);
- unlink(buf.buf);
- }
- closedir(dir);
- strbuf_release(&buf);
-}
-
-static void remove_pack_on_signal(int signo)
-{
- remove_temporary_files();
- sigchain_pop(signo);
- raise(signo);
-}
-
-/*
* Adds all packs hex strings to either fname_nonkept_list or
* fname_kept_list based on whether each pack has a corresponding
* .keep file or not. Packs without a .keep file are not to be kept
@@ -188,7 +149,8 @@ static void remove_redundant_pack(const char *dir_name, const char *base_name)
}
static void prepare_pack_objects(struct child_process *cmd,
- const struct pack_objects_args *args)
+ const struct pack_objects_args *args,
+ const char *out)
{
strvec_push(&cmd->args, "pack-objects");
if (args->window)
@@ -211,7 +173,7 @@ static void prepare_pack_objects(struct child_process *cmd,
strvec_push(&cmd->args, "--quiet");
if (delta_base_offset)
strvec_push(&cmd->args, "--delta-base-offset");
- strvec_push(&cmd->args, packtmp);
+ strvec_push(&cmd->args, out);
cmd->git_cmd = 1;
cmd->out = -1;
}
@@ -247,11 +209,15 @@ static struct {
{".idx"},
};
-static unsigned populate_pack_exts(char *name)
+struct generated_pack_data {
+ struct tempfile *tempfiles[ARRAY_SIZE(exts)];
+};
+
+static struct generated_pack_data *populate_pack_exts(const char *name)
{
struct stat statbuf;
struct strbuf path = STRBUF_INIT;
- unsigned ret = 0;
+ struct generated_pack_data *data = xcalloc(1, sizeof(*data));
int i;
for (i = 0; i < ARRAY_SIZE(exts); i++) {
@@ -261,11 +227,11 @@ static unsigned populate_pack_exts(char *name)
if (stat(path.buf, &statbuf))
continue;
- ret |= (1 << i);
+ data->tempfiles[i] = register_tempfile(path.buf);
}
strbuf_release(&path);
- return ret;
+ return data;
}
static void repack_promisor_objects(const struct pack_objects_args *args,
@@ -275,7 +241,7 @@ static void repack_promisor_objects(const struct pack_objects_args *args,
FILE *out;
struct strbuf line = STRBUF_INIT;
- prepare_pack_objects(&cmd, args);
+ prepare_pack_objects(&cmd, args, packtmp);
cmd.in = -1;
/*
@@ -320,7 +286,7 @@ static void repack_promisor_objects(const struct pack_objects_args *args,
line.buf);
write_promisor_file(promisor_name, NULL, 0);
- item->util = (void *)(uintptr_t)populate_pack_exts(item->string);
+ item->util = populate_pack_exts(item->string);
free(promisor_name);
}
@@ -661,8 +627,39 @@ static int write_midx_included_packs(struct string_list *include,
return finish_command(&cmd);
}
+static void remove_redundant_bitmaps(struct string_list *include,
+ const char *packdir)
+{
+ struct strbuf path = STRBUF_INIT;
+ struct string_list_item *item;
+ size_t packdir_len;
+
+ strbuf_addstr(&path, packdir);
+ strbuf_addch(&path, '/');
+ packdir_len = path.len;
+
+ /*
+ * Remove any pack bitmaps corresponding to packs which are now
+ * included in the MIDX.
+ */
+ for_each_string_list_item(item, include) {
+ strbuf_addstr(&path, item->string);
+ strbuf_strip_suffix(&path, ".idx");
+ strbuf_addstr(&path, ".bitmap");
+
+ if (unlink(path.buf) && errno != ENOENT)
+ warning_errno(_("could not remove stale bitmap: %s"),
+ path.buf);
+
+ strbuf_setlen(&path, packdir_len);
+ }
+ strbuf_release(&path);
+}
+
static int write_cruft_pack(const struct pack_objects_args *args,
+ const char *destination,
const char *pack_prefix,
+ const char *cruft_expiration,
struct string_list *names,
struct string_list *existing_packs,
struct string_list *existing_kept_packs)
@@ -672,8 +669,10 @@ static int write_cruft_pack(const struct pack_objects_args *args,
struct string_list_item *item;
FILE *in, *out;
int ret;
+ const char *scratch;
+ int local = skip_prefix(destination, packdir, &scratch);
- prepare_pack_objects(&cmd, args);
+ prepare_pack_objects(&cmd, args, destination);
strvec_push(&cmd.args, "--cruft");
if (cruft_expiration)
@@ -698,6 +697,10 @@ static int write_cruft_pack(const struct pack_objects_args *args,
* By the time it is read here, it contains only the pack(s)
* that were just written, which is exactly the set of packs we
* want to consider kept.
+ *
+ * If `--expire-to` is given, the double-use served by `names`
+ * ensures that the pack written to `--expire-to` excludes any
+ * objects contained in the cruft pack.
*/
in = xfdopen(cmd.in, "w");
for_each_string_list_item(item, names)
@@ -710,10 +713,19 @@ static int write_cruft_pack(const struct pack_objects_args *args,
out = xfdopen(cmd.out, "r");
while (strbuf_getline_lf(&line, out) != EOF) {
+ struct string_list_item *item;
+
if (line.len != the_hash_algo->hexsz)
die(_("repack: Expecting full hex object ID lines only "
"from pack-objects."));
- string_list_append(names, line.buf);
+ /*
+ * avoid putting packs written outside of the repository in the
+ * list of names
+ */
+ if (local) {
+ item = string_list_append(names, line.buf);
+ item->util = populate_pack_exts(line.buf);
+ }
}
fclose(out);
@@ -745,6 +757,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
struct pack_objects_args cruft_po_args = {NULL};
int geometric_factor = 0;
int write_midx = 0;
+ const char *cruft_expiration = NULL;
+ const char *expire_to = NULL;
struct option builtin_repack_options[] = {
OPT_BIT('a', NULL, &pack_everything,
@@ -794,6 +808,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
N_("find a geometric progression with factor <N>")),
OPT_BOOL('m', "write-midx", &write_midx,
N_("write a multi-pack index of the resulting packs")),
+ OPT_STRING(0, "expire-to", &expire_to, N_("dir"),
+ N_("pack prefix to store a pack containing pruned objects")),
OPT_END()
};
@@ -859,9 +875,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
split_pack_geometry(geometry, geometric_factor);
}
- sigchain_push_common(remove_pack_on_signal);
-
- prepare_pack_objects(&cmd, &po_args);
+ prepare_pack_objects(&cmd, &po_args, packtmp);
show_progress = !po_args.quiet && isatty(2);
@@ -952,10 +966,14 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
out = xfdopen(cmd.out, "r");
while (strbuf_getline_lf(&line, out) != EOF) {
+ struct string_list_item *item;
+
if (line.len != the_hash_algo->hexsz)
die(_("repack: Expecting full hex object ID lines only from pack-objects."));
- string_list_append(&names, line.buf);
+ item = string_list_append(&names, line.buf);
+ item->util = populate_pack_exts(item->string);
}
+ strbuf_release(&line);
fclose(out);
ret = finish_command(&cmd);
if (ret)
@@ -984,49 +1002,81 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
cruft_po_args.local = po_args.local;
cruft_po_args.quiet = po_args.quiet;
- ret = write_cruft_pack(&cruft_po_args, pack_prefix, &names,
+ ret = write_cruft_pack(&cruft_po_args, packtmp, pack_prefix,
+ cruft_expiration, &names,
&existing_nonkept_packs,
&existing_kept_packs);
if (ret)
return ret;
+
+ if (delete_redundant && expire_to) {
+ /*
+ * If `--expire-to` is given with `-d`, it's possible
+ * that we're about to prune some objects. With cruft
+ * packs, pruning is implicit: any objects from existing
+ * packs that weren't picked up by new packs are removed
+ * when their packs are deleted.
+ *
+ * Generate an additional cruft pack, with one twist:
+ * `names` now includes the name of the cruft pack
+ * written in the previous step. So the contents of
+ * _this_ cruft pack exclude everything contained in the
+ * existing cruft pack (that is, all of the unreachable
+ * objects which are no older than
+ * `--cruft-expiration`).
+ *
+ * To make this work, cruft_expiration must become NULL
+ * so that this cruft pack doesn't actually prune any
+ * objects. If it were non-NULL, this call would always
+ * generate an empty pack (since every object not in the
+ * cruft pack generated above will have an mtime older
+ * than the expiration).
+ */
+ ret = write_cruft_pack(&cruft_po_args, expire_to,
+ pack_prefix,
+ NULL,
+ &names,
+ &existing_nonkept_packs,
+ &existing_kept_packs);
+ if (ret)
+ return ret;
+ }
}
string_list_sort(&names);
- for_each_string_list_item(item, &names) {
- item->util = (void *)(uintptr_t)populate_pack_exts(item->string);
- }
-
close_object_store(the_repository->objects);
/*
* Ok we have prepared all new packfiles.
*/
for_each_string_list_item(item, &names) {
+ struct generated_pack_data *data = item->util;
+
for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
- char *fname, *fname_old;
+ char *fname;
fname = mkpathdup("%s/pack-%s%s",
packdir, item->string, exts[ext].name);
- fname_old = mkpathdup("%s-%s%s",
- packtmp, item->string, exts[ext].name);
- if (((uintptr_t)item->util) & ((uintptr_t)1 << ext)) {
+ if (data->tempfiles[ext]) {
+ const char *fname_old = get_tempfile_path(data->tempfiles[ext]);
struct stat statbuffer;
+
if (!stat(fname_old, &statbuffer)) {
statbuffer.st_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
chmod(fname_old, statbuffer.st_mode);
}
- if (rename(fname_old, fname))
- die_errno(_("renaming '%s' failed"), fname_old);
+ if (rename_tempfile(&data->tempfiles[ext], fname))
+ die_errno(_("renaming pack to '%s' failed"), fname);
} else if (!exts[ext].optional)
- die(_("missing required file: %s"), fname_old);
+ die(_("pack-objects did not write a '%s' file for pack %s-%s"),
+ exts[ext].name, packtmp, item->string);
else if (unlink(fname) < 0 && errno != ENOENT)
die_errno(_("could not unlink: %s"), fname);
free(fname);
- free(fname_old);
}
}
/* End of pack replacement. */
@@ -1059,6 +1109,9 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
refs_snapshot ? get_tempfile_path(refs_snapshot) : NULL,
show_progress, write_bitmaps > 0);
+ if (!ret && write_bitmaps)
+ remove_redundant_bitmaps(&include, packdir);
+
string_list_clear(&include, 0);
if (ret)
@@ -1089,6 +1142,11 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
strbuf_addstr(&buf, pack_basename(p));
strbuf_strip_suffix(&buf, ".pack");
+ if ((p->pack_keep) ||
+ (string_list_has_string(&existing_kept_packs,
+ buf.buf)))
+ continue;
+
remove_redundant_pack(packdir, buf.buf);
}
strbuf_release(&buf);
@@ -1106,7 +1164,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (run_update_server_info)
update_server_info(0);
- remove_temporary_files();
if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0)) {
unsigned flags = 0;
@@ -1115,11 +1172,10 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
write_midx_file(get_object_directory(), NULL, NULL, flags);
}
- string_list_clear(&names, 0);
+ string_list_clear(&names, 1);
string_list_clear(&existing_nonkept_packs, 0);
string_list_clear(&existing_kept_packs, 0);
clear_pack_geometry(geometry);
- strbuf_release(&line);
return 0;
}
diff --git a/builtin/rerere.c b/builtin/rerere.c
index 83d7a778e3..8b7392d5b4 100644
--- a/builtin/rerere.c
+++ b/builtin/rerere.c
@@ -10,7 +10,7 @@
#include "pathspec.h"
static const char * const rerere_usage[] = {
- N_("git rerere [clear | forget <path>... | status | remaining | diff | gc]"),
+ N_("git rerere [clear | forget <pathspec>... | diff | status | remaining | gc]"),
NULL,
};
diff --git a/builtin/reset.c b/builtin/reset.c
index fdce6f8c85..376ec95521 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -73,9 +73,11 @@ static int reset_index(const char *ref, const struct object_id *oid, int reset_t
case HARD:
opts.update = 1;
opts.reset = UNPACK_RESET_OVERWRITE_UNTRACKED;
+ opts.skip_cache_tree_update = 1;
break;
case MIXED:
opts.reset = UNPACK_RESET_PROTECT_UNTRACKED;
+ opts.skip_cache_tree_update = 1;
/* but opts.update=0, so working tree not updated */
break;
default:
@@ -481,5 +483,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
if (!pathspec.nr)
remove_branch_state(the_repository, 0);
+ discard_index(&the_index);
+
return update_ref_status;
}
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index fba6f5d51f..d42db0b0cc 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -20,7 +20,8 @@
#include "packfile.h"
static const char rev_list_usage[] =
-"git rev-list [<options>] <commit-id>... [-- <path>...]\n"
+"git rev-list [<options>] <commit>... [--] [<path>...]\n"
+"\n"
" limiting output:\n"
" --max-count=<n>\n"
" --max-age=<epoch>\n"
@@ -37,6 +38,7 @@ static const char rev_list_usage[] =
" --tags\n"
" --remotes\n"
" --stdin\n"
+" --exclude-hidden=[receive|uploadpack]\n"
" --quiet\n"
" ordering output:\n"
" --topo-order\n"
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 8f61050bde..334a9d4af2 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -39,7 +39,7 @@ static int abbrev_ref_strict;
static int output_sq;
static int stuck_long;
-static struct string_list *ref_excludes;
+static struct ref_exclusions ref_excludes = REF_EXCLUSIONS_INIT;
/*
* Some arguments are relevant "revision" arguments,
@@ -198,7 +198,7 @@ static int show_default(void)
static int show_reference(const char *refname, const struct object_id *oid,
int flag UNUSED, void *cb_data UNUSED)
{
- if (ref_excluded(ref_excludes, refname))
+ if (ref_excluded(&ref_excludes, refname))
return 0;
show_rev(NORMAL, oid, refname);
return 0;
@@ -530,6 +530,7 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix)
strbuf_addstr(&parsed, " --");
sq_quote_argv(&parsed, argv);
puts(parsed.buf);
+ strbuf_release(&parsed);
return 0;
}
@@ -585,7 +586,7 @@ static void handle_ref_opt(const char *pattern, const char *prefix)
for_each_glob_ref_in(show_reference, pattern, prefix, NULL);
else
for_each_ref_in(prefix, show_reference, NULL);
- clear_ref_exclusion(&ref_excludes);
+ clear_ref_exclusions(&ref_excludes);
}
enum format_type {
@@ -863,7 +864,7 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
}
if (!strcmp(arg, "--all")) {
for_each_ref(show_reference, NULL);
- clear_ref_exclusion(&ref_excludes);
+ clear_ref_exclusions(&ref_excludes);
continue;
}
if (skip_prefix(arg, "--disambiguate=", &arg)) {
@@ -876,10 +877,14 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
continue;
}
if (opt_with_value(arg, "--branches", &arg)) {
+ if (ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --branches"));
handle_ref_opt(arg, "refs/heads/");
continue;
}
if (opt_with_value(arg, "--tags", &arg)) {
+ if (ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --tags"));
handle_ref_opt(arg, "refs/tags/");
continue;
}
@@ -888,6 +893,8 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
continue;
}
if (opt_with_value(arg, "--remotes", &arg)) {
+ if (ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --remotes"));
handle_ref_opt(arg, "refs/remotes/");
continue;
}
@@ -895,6 +902,10 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
add_ref_exclusion(&ref_excludes, arg);
continue;
}
+ if (skip_prefix(arg, "--exclude-hidden=", &arg)) {
+ exclude_hidden_refs(&ref_excludes, arg);
+ continue;
+ }
if (!strcmp(arg, "--show-toplevel")) {
const char *work_tree = get_git_work_tree();
if (work_tree)
diff --git a/builtin/revert.c b/builtin/revert.c
index ee2a0807f0..8bc87e4c77 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -21,14 +21,15 @@
*/
static const char * const revert_usage[] = {
- N_("git revert [<options>] <commit-ish>..."),
- N_("git revert <subcommand>"),
+ N_("git revert [--[no-]edit] [-n] [-m parent-number] [-s] [-S[<keyid>]] <commit>..."),
+ N_("git revert (--continue | --skip | --abort | --quit)"),
NULL
};
static const char * const cherry_pick_usage[] = {
- N_("git cherry-pick [<options>] <commit-ish>..."),
- N_("git cherry-pick <subcommand>"),
+ N_("git cherry-pick [--edit] [-n] [-m <parent-number>] [-s] [-x] [--ff]\n"
+ " [-S[<keyid>]] <commit>..."),
+ N_("git cherry-pick (--continue | --skip | --abort | --quit)"),
NULL
};
@@ -220,6 +221,7 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
opts->strategy = xstrdup_or_null(opts->strategy);
if (!opts->strategy && getenv("GIT_TEST_MERGE_ALGORITHM"))
opts->strategy = xstrdup(getenv("GIT_TEST_MERGE_ALGORITHM"));
+ free(options);
if (cmd == 'q') {
int ret = sequencer_remove_state(opts);
@@ -260,6 +262,9 @@ int cmd_cherry_pick(int argc, const char **argv, const char *prefix)
opts.action = REPLAY_PICK;
sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
+ if (opts.revs)
+ release_revisions(opts.revs);
+ free(opts.revs);
if (res < 0)
die(_("cherry-pick failed"));
return res;
diff --git a/builtin/rm.c b/builtin/rm.c
index b6ba859fe4..05bfe20a46 100644
--- a/builtin/rm.c
+++ b/builtin/rm.c
@@ -17,7 +17,9 @@
#include "pathspec.h"
static const char * const builtin_rm_usage[] = {
- N_("git rm [<options>] [--] <file>..."),
+ N_("git rm [-f | --force] [-n] [-r] [--cached] [--ignore-unmatch]\n"
+ " [--quiet] [--pathspec-from-file=<file> [--pathspec-file-nul]]\n"
+ " [--] [<pathspec>...]"),
NULL
};
@@ -84,8 +86,7 @@ static void submodules_absorb_gitdir_if_needed(void)
continue;
if (!submodule_uses_gitfile(name))
- absorb_git_dir_into_superproject(name,
- ABSORB_GITDIR_RECURSE_SUBMODULES);
+ absorb_git_dir_into_superproject(name);
}
}
diff --git a/builtin/send-pack.c b/builtin/send-pack.c
index 64962be016..4c5d125fa0 100644
--- a/builtin/send-pack.c
+++ b/builtin/send-pack.c
@@ -20,6 +20,7 @@ static const char * const send_pack_usage[] = {
N_("git send-pack [--mirror] [--dry-run] [--force]\n"
" [--receive-pack=<git-receive-pack>]\n"
" [--verbose] [--thin] [--atomic]\n"
+ " [--[no-]signed | --signed=(true|false|if-asked)]\n"
" [<host>:]<directory> (--all | <ref>...)"),
NULL,
};
diff --git a/builtin/shortlog.c b/builtin/shortlog.c
index 7a1e1fe7c0..27a87167e1 100644
--- a/builtin/shortlog.c
+++ b/builtin/shortlog.c
@@ -132,7 +132,9 @@ static void read_from_stdin(struct shortlog *log)
match = committer_match;
break;
case SHORTLOG_GROUP_TRAILER:
- die(_("using --group=trailer with stdin is not supported"));
+ die(_("using %s with stdin is not supported"), "--group=trailer");
+ case SHORTLOG_GROUP_FORMAT:
+ die(_("using %s with stdin is not supported"), "--group=format");
default:
BUG("unhandled shortlog group");
}
@@ -170,6 +172,9 @@ static void insert_records_from_trailers(struct shortlog *log,
const char *commit_buffer, *body;
struct strbuf ident = STRBUF_INIT;
+ if (!log->trailers.nr)
+ return;
+
/*
* Using format_commit_message("%B") would be simpler here, but
* this saves us copying the message.
@@ -200,9 +205,34 @@ static void insert_records_from_trailers(struct shortlog *log,
unuse_commit_buffer(commit, commit_buffer);
}
+static int shortlog_needs_dedup(const struct shortlog *log)
+{
+ return HAS_MULTI_BITS(log->groups) || log->format.nr > 1 || log->trailers.nr;
+}
+
+static void insert_records_from_format(struct shortlog *log,
+ struct strset *dups,
+ struct commit *commit,
+ struct pretty_print_context *ctx,
+ const char *oneline)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, &log->format) {
+ strbuf_reset(&buf);
+
+ format_commit_message(commit, item->string, &buf, ctx);
+
+ if (!shortlog_needs_dedup(log) || strset_add(dups, buf.buf))
+ insert_one_record(log, buf.buf, oneline);
+ }
+
+ strbuf_release(&buf);
+}
+
void shortlog_add_commit(struct shortlog *log, struct commit *commit)
{
- struct strbuf ident = STRBUF_INIT;
struct strbuf oneline = STRBUF_INIT;
struct strset dups = STRSET_INIT;
struct pretty_print_context ctx = {0};
@@ -211,7 +241,7 @@ void shortlog_add_commit(struct shortlog *log, struct commit *commit)
ctx.fmt = CMIT_FMT_USERFORMAT;
ctx.abbrev = log->abbrev;
ctx.print_email_subject = 1;
- ctx.date_mode.type = DATE_NORMAL;
+ ctx.date_mode = log->date_mode;
ctx.output_encoding = get_log_output_encoding();
if (!log->summary) {
@@ -222,30 +252,10 @@ void shortlog_add_commit(struct shortlog *log, struct commit *commit)
}
oneline_str = oneline.len ? oneline.buf : "<none>";
- if (log->groups & SHORTLOG_GROUP_AUTHOR) {
- strbuf_reset(&ident);
- format_commit_message(commit,
- log->email ? "%aN <%aE>" : "%aN",
- &ident, &ctx);
- if (!HAS_MULTI_BITS(log->groups) ||
- strset_add(&dups, ident.buf))
- insert_one_record(log, ident.buf, oneline_str);
- }
- if (log->groups & SHORTLOG_GROUP_COMMITTER) {
- strbuf_reset(&ident);
- format_commit_message(commit,
- log->email ? "%cN <%cE>" : "%cN",
- &ident, &ctx);
- if (!HAS_MULTI_BITS(log->groups) ||
- strset_add(&dups, ident.buf))
- insert_one_record(log, ident.buf, oneline_str);
- }
- if (log->groups & SHORTLOG_GROUP_TRAILER) {
- insert_records_from_trailers(log, &dups, commit, &ctx, oneline_str);
- }
+ insert_records_from_trailers(log, &dups, commit, &ctx, oneline_str);
+ insert_records_from_format(log, &dups, commit, &ctx, oneline_str);
strset_clear(&dups);
- strbuf_release(&ident);
strbuf_release(&oneline);
}
@@ -314,6 +324,7 @@ static int parse_group_option(const struct option *opt, const char *arg, int uns
if (unset) {
log->groups = 0;
string_list_clear(&log->trailers, 0);
+ string_list_clear(&log->format, 0);
} else if (!strcasecmp(arg, "author"))
log->groups |= SHORTLOG_GROUP_AUTHOR;
else if (!strcasecmp(arg, "committer"))
@@ -321,8 +332,15 @@ static int parse_group_option(const struct option *opt, const char *arg, int uns
else if (skip_prefix(arg, "trailer:", &field)) {
log->groups |= SHORTLOG_GROUP_TRAILER;
string_list_append(&log->trailers, field);
- } else
+ } else if (skip_prefix(arg, "format:", &field)) {
+ log->groups |= SHORTLOG_GROUP_FORMAT;
+ string_list_append(&log->format, field);
+ } else if (strchr(arg, '%')) {
+ log->groups |= SHORTLOG_GROUP_FORMAT;
+ string_list_append(&log->format, arg);
+ } else {
return error(_("unknown group type: %s"), arg);
+ }
return 0;
}
@@ -340,6 +358,19 @@ void shortlog_init(struct shortlog *log)
log->in2 = DEFAULT_INDENT2;
log->trailers.strdup_strings = 1;
log->trailers.cmp = strcasecmp;
+ log->format.strdup_strings = 1;
+}
+
+void shortlog_finish_setup(struct shortlog *log)
+{
+ if (log->groups & SHORTLOG_GROUP_AUTHOR)
+ string_list_append(&log->format,
+ log->email ? "%aN <%aE>" : "%aN");
+ if (log->groups & SHORTLOG_GROUP_COMMITTER)
+ string_list_append(&log->format,
+ log->email ? "%cN <%cE>" : "%cN");
+
+ string_list_sort(&log->trailers);
}
int cmd_shortlog(int argc, const char **argv, const char *prefix)
@@ -407,10 +438,11 @@ parse_done:
log.user_format = rev.commit_format == CMIT_FMT_USERFORMAT;
log.abbrev = rev.abbrev;
log.file = rev.diffopt.file;
+ log.date_mode = rev.date_mode;
if (!log.groups)
log.groups = SHORTLOG_GROUP_AUTHOR;
- string_list_sort(&log.trailers);
+ shortlog_finish_setup(&log);
/* assume HEAD if from a tty */
if (!nongit && !rev.pending.nr && isatty(0))
@@ -479,4 +511,5 @@ void shortlog_output(struct shortlog *log)
log->list.strdup_strings = 1;
string_list_clear(&log->list, 1);
clear_mailmap(&log->mailmap);
+ string_list_clear(&log->format, 0);
}
diff --git a/builtin/show-branch.c b/builtin/show-branch.c
index d3f5715e3e..c013abaf94 100644
--- a/builtin/show-branch.c
+++ b/builtin/show-branch.c
@@ -14,7 +14,8 @@ static const char* show_branch_usage[] = {
N_("git show-branch [-a | --all] [-r | --remotes] [--topo-order | --date-order]\n"
" [--current] [--color[=<when>] | --no-color] [--sparse]\n"
" [--more=<n> | --list | --independent | --merge-base]\n"
- " [--no-name | --sha1-name] [--topics] [(<rev> | <glob>)...]"),
+ " [--no-name | --sha1-name] [--topics]\n"
+ " [(<rev> | <glob>)...]"),
N_("git show-branch (-g | --reflog)[=<n>[,<base>]] [--list] [<ref>]"),
NULL
};
diff --git a/builtin/show-ref.c b/builtin/show-ref.c
index 4856906108..3af6a53ee9 100644
--- a/builtin/show-ref.c
+++ b/builtin/show-ref.c
@@ -9,7 +9,9 @@
#include "parse-options.h"
static const char * const show_ref_usage[] = {
- N_("git show-ref [-q | --quiet] [--verify] [--head] [-d | --dereference] [-s | --hash[=<n>]] [--abbrev[=<n>]] [--tags] [--heads] [--] [<pattern>...]"),
+ N_("git show-ref [-q | --quiet] [--verify] [--head] [-d | --dereference]\n"
+ " [-s | --hash[=<n>]] [--abbrev[=<n>]] [--tags]\n"
+ " [--heads] [--] [<pattern>...]"),
N_("git show-ref --exclude-existing[=<pattern>]"),
NULL
};
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 287716db68..58a22503f0 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -20,7 +20,7 @@
static const char *empty_base = "";
static char const * const builtin_sparse_checkout_usage[] = {
- N_("git sparse-checkout (init|list|set|add|reapply|disable) <options>"),
+ N_("git sparse-checkout (init | list | set | add | reapply | disable) [<options>]"),
NULL
};
diff --git a/builtin/stash.c b/builtin/stash.c
index 2274aae255..8a64d564a1 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -21,72 +21,95 @@
#define INCLUDE_ALL_FILES 2
+#define BUILTIN_STASH_LIST_USAGE \
+ N_("git stash list [<log-options>]")
+#define BUILTIN_STASH_SHOW_USAGE \
+ N_("git stash show [-u | --include-untracked | --only-untracked] [<diff-options>] [<stash>]")
+#define BUILTIN_STASH_DROP_USAGE \
+ N_("git stash drop [-q | --quiet] [<stash>]")
+#define BUILTIN_STASH_POP_USAGE \
+ N_("git stash pop [--index] [-q | --quiet] [<stash>]")
+#define BUILTIN_STASH_APPLY_USAGE \
+ N_("git stash apply [--index] [-q | --quiet] [<stash>]")
+#define BUILTIN_STASH_BRANCH_USAGE \
+ N_("git stash branch <branchname> [<stash>]")
+#define BUILTIN_STASH_STORE_USAGE \
+ N_("git stash store [(-m | --message) <message>] [-q | --quiet] <commit>")
+#define BUILTIN_STASH_PUSH_USAGE \
+ N_("git stash [push [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]\n" \
+ " [-u | --include-untracked] [-a | --all] [(-m | --message) <message>]\n" \
+ " [--pathspec-from-file=<file> [--pathspec-file-nul]]\n" \
+ " [--] [<pathspec>...]]")
+#define BUILTIN_STASH_SAVE_USAGE \
+ N_("git stash save [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]\n" \
+ " [-u | --include-untracked] [-a | --all] [<message>]")
+#define BUILTIN_STASH_CREATE_USAGE \
+ N_("git stash create [<message>]")
+#define BUILTIN_STASH_CLEAR_USAGE \
+ "git stash clear"
+
static const char * const git_stash_usage[] = {
- N_("git stash list [<options>]"),
- N_("git stash show [<options>] [<stash>]"),
- N_("git stash drop [-q|--quiet] [<stash>]"),
- N_("git stash ( pop | apply ) [--index] [-q|--quiet] [<stash>]"),
- N_("git stash branch <branchname> [<stash>]"),
- "git stash clear",
- N_("git stash [push [-p|--patch] [-S|--staged] [-k|--[no-]keep-index] [-q|--quiet]\n"
- " [-u|--include-untracked] [-a|--all] [-m|--message <message>]\n"
- " [--pathspec-from-file=<file> [--pathspec-file-nul]]\n"
- " [--] [<pathspec>...]]"),
- N_("git stash save [-p|--patch] [-S|--staged] [-k|--[no-]keep-index] [-q|--quiet]\n"
- " [-u|--include-untracked] [-a|--all] [<message>]"),
+ BUILTIN_STASH_LIST_USAGE,
+ BUILTIN_STASH_SHOW_USAGE,
+ BUILTIN_STASH_DROP_USAGE,
+ BUILTIN_STASH_POP_USAGE,
+ BUILTIN_STASH_APPLY_USAGE,
+ BUILTIN_STASH_BRANCH_USAGE,
+ BUILTIN_STASH_PUSH_USAGE,
+ BUILTIN_STASH_SAVE_USAGE,
+ BUILTIN_STASH_CLEAR_USAGE,
+ BUILTIN_STASH_CREATE_USAGE,
+ BUILTIN_STASH_STORE_USAGE,
NULL
};
static const char * const git_stash_list_usage[] = {
- N_("git stash list [<options>]"),
+ BUILTIN_STASH_LIST_USAGE,
NULL
};
static const char * const git_stash_show_usage[] = {
- N_("git stash show [<options>] [<stash>]"),
+ BUILTIN_STASH_SHOW_USAGE,
NULL
};
static const char * const git_stash_drop_usage[] = {
- N_("git stash drop [-q|--quiet] [<stash>]"),
+ BUILTIN_STASH_DROP_USAGE,
NULL
};
static const char * const git_stash_pop_usage[] = {
- N_("git stash pop [--index] [-q|--quiet] [<stash>]"),
+ BUILTIN_STASH_POP_USAGE,
NULL
};
static const char * const git_stash_apply_usage[] = {
- N_("git stash apply [--index] [-q|--quiet] [<stash>]"),
+ BUILTIN_STASH_APPLY_USAGE,
NULL
};
static const char * const git_stash_branch_usage[] = {
- N_("git stash branch <branchname> [<stash>]"),
+ BUILTIN_STASH_BRANCH_USAGE,
NULL
};
static const char * const git_stash_clear_usage[] = {
- "git stash clear",
+ BUILTIN_STASH_CLEAR_USAGE,
NULL
};
static const char * const git_stash_store_usage[] = {
- N_("git stash store [-m|--message <message>] [-q|--quiet] <commit>"),
+ BUILTIN_STASH_STORE_USAGE,
NULL
};
static const char * const git_stash_push_usage[] = {
- N_("git stash [push [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
- " [-u|--include-untracked] [-a|--all] [-m|--message <message>]\n"
- " [--] [<pathspec>...]]"),
+ BUILTIN_STASH_PUSH_USAGE,
NULL
};
static const char * const git_stash_save_usage[] = {
- N_("git stash save [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
- " [-u|--include-untracked] [-a|--all] [<message>]"),
+ BUILTIN_STASH_SAVE_USAGE,
NULL
};
@@ -1663,8 +1686,10 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
}
done:
+ strbuf_release(&patch);
free_stash_info(&info);
strbuf_release(&stash_msg_buf);
+ strbuf_release(&untracked_files);
return ret;
}
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index 0b4acb442b..28e66a3d70 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -181,7 +181,7 @@ static void module_list_release(struct module_list *ml)
free(ml->entries);
}
-static int module_list_compute(int argc, const char **argv,
+static int module_list_compute(const char **argv,
const char *prefix,
struct pathspec *pathspec,
struct module_list *list)
@@ -405,7 +405,7 @@ static int module_foreach(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, module_foreach_options,
git_submodule_helper_usage, 0);
- if (module_list_compute(0, NULL, prefix, &pathspec, &list) < 0)
+ if (module_list_compute(NULL, prefix, &pathspec, &list) < 0)
goto cleanup;
info.argc = argc;
@@ -545,7 +545,7 @@ static int module_init(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, module_init_options,
git_submodule_helper_usage, 0);
- if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ if (module_list_compute(argv, prefix, &pathspec, &list) < 0)
goto cleanup;
/*
@@ -616,6 +616,9 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
int diff_files_result;
struct strbuf buf = STRBUF_INIT;
const char *git_dir;
+ struct setup_revision_opt opt = {
+ .free_removed_argv_elements = 1,
+ };
if (!submodule_from_path(the_repository, null_oid(), path))
die(_("no submodule mapping found in .gitmodules for path '%s'"),
@@ -649,9 +652,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
repo_init_revisions(the_repository, &rev, NULL);
rev.abbrev = 0;
- diff_files_args.nr = setup_revisions(diff_files_args.nr,
- diff_files_args.v,
- &rev, NULL);
+ setup_revisions(diff_files_args.nr, diff_files_args.v, &rev, &opt);
diff_files_result = run_diff_files(&rev, 0);
if (!diff_result_code(&rev.diffopt, diff_files_result)) {
@@ -732,7 +733,7 @@ static int module_status(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, module_status_options,
git_submodule_helper_usage, 0);
- if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ if (module_list_compute(argv, prefix, &pathspec, &list) < 0)
goto cleanup;
info.prefix = prefix;
@@ -1326,7 +1327,7 @@ static int module_sync(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, module_sync_options,
git_submodule_helper_usage, 0);
- if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ if (module_list_compute(argv, prefix, &pathspec, &list) < 0)
goto cleanup;
info.prefix = prefix;
@@ -1378,8 +1379,7 @@ static void deinit_submodule(const char *path, const char *prefix,
".git file by using absorbgitdirs."),
displaypath);
- absorb_git_dir_into_superproject(path,
- ABSORB_GITDIR_RECURSE_SUBMODULES);
+ absorb_git_dir_into_superproject(path);
}
@@ -1479,7 +1479,7 @@ static int module_deinit(int argc, const char **argv, const char *prefix)
if (!argc && !all)
die(_("Use '--all' if you really want to deinitialize all submodules"));
- if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ if (module_list_compute(argv, prefix, &pathspec, &list) < 0)
goto cleanup;
info.prefix = prefix;
@@ -1503,6 +1503,8 @@ struct module_clone_data {
const char *name;
const char *url;
const char *depth;
+ const char *branch;
+ const char *branch_oid;
struct list_objects_filter_options *filter_options;
unsigned int quiet: 1;
unsigned int progress: 1;
@@ -1692,6 +1694,8 @@ static int clone_submodule(const struct module_clone_data *clone_data,
strvec_push(&cp.args, clone_data->single_branch ?
"--single-branch" :
"--no-single-branch");
+ if (the_repository->settings.submodule_propagate_branches)
+ strvec_push(&cp.args, "--detach");
strvec_push(&cp.args, "--");
strvec_push(&cp.args, clone_data->url);
@@ -1704,6 +1708,21 @@ static int clone_submodule(const struct module_clone_data *clone_data,
if(run_command(&cp))
die(_("clone of '%s' into submodule path '%s' failed"),
clone_data->url, clone_data_path);
+
+ if (clone_data->branch) {
+ struct child_process branch_cp = CHILD_PROCESS_INIT;
+
+ branch_cp.git_cmd = 1;
+ prepare_other_repo_env(&branch_cp.env, sm_gitdir);
+
+ strvec_pushl(&branch_cp.args, "branch",
+ clone_data->branch, clone_data->branch_oid,
+ NULL);
+
+ if (run_command(&branch_cp))
+ die(_("could not create branch '%s' in submodule path '%s'"),
+ clone_data->branch, clone_data_path);
+ }
} else {
char *path;
@@ -1778,6 +1797,12 @@ static int module_clone(int argc, const char **argv, const char *prefix)
N_("disallow cloning into non-empty directory")),
OPT_BOOL(0, "single-branch", &clone_data.single_branch,
N_("clone only one branch, HEAD or --branch")),
+ OPT_STRING(0, "branch", &clone_data.branch,
+ N_("string"),
+ N_("name of branch to be created")),
+ OPT_STRING(0, "branch-oid", &clone_data.branch_oid,
+ N_("object-id"),
+ N_("commit id for new branch")),
OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
@@ -1785,12 +1810,14 @@ static int module_clone(int argc, const char **argv, const char *prefix)
N_("git submodule--helper clone [--prefix=<path>] [--quiet] "
"[--reference <repository>] [--name <name>] [--depth <depth>] "
"[--single-branch] [--filter <filter-spec>] "
+ "[--branch <branch> --branch-oid <oid>]"
"--url <url> --path <path>"),
NULL
};
argc = parse_options(argc, argv, prefix, module_clone_options,
git_submodule_helper_usage, 0);
+ prepare_repo_settings(the_repository);
clone_data.dissociate = !!dissociate;
clone_data.quiet = !!quiet;
@@ -1802,6 +1829,12 @@ static int module_clone(int argc, const char **argv, const char *prefix)
usage_with_options(git_submodule_helper_usage,
module_clone_options);
+ if (!!clone_data.branch != !!clone_data.branch_oid)
+ BUG("--branch and --branch-oid must be set/unset together");
+ if ((clone_data.branch &&
+ !the_repository->settings.submodule_propagate_branches))
+ BUG("--branch is only expected with submodule.propagateBranches");
+
clone_submodule(&clone_data, &reference);
list_objects_filter_release(&filter_options);
string_list_clear(&reference, 1);
@@ -1884,8 +1917,8 @@ static void submodule_update_clone_release(struct submodule_update_clone *suc)
struct update_data {
const char *prefix;
char *displaypath;
+ const char *super_branch;
enum submodule_update_type update_default;
- struct object_id suboid;
struct string_list references;
struct submodule_update_strategy update_strategy;
struct list_objects_filter_options *filter_options;
@@ -2059,6 +2092,11 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
strvec_push(&child->args, suc->update_data->single_branch ?
"--single-branch" :
"--no-single-branch");
+ if (ud->super_branch) {
+ strvec_pushf(&child->args, "--branch=%s", ud->super_branch);
+ strvec_pushf(&child->args, "--branch-oid=%s",
+ oid_to_hex(&ce->oid));
+ }
cleanup:
free(displaypath);
@@ -2222,9 +2260,14 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet,
static int run_update_command(const struct update_data *ud, int subforce)
{
struct child_process cp = CHILD_PROCESS_INIT;
- char *oid = oid_to_hex(&ud->oid);
+ const char *update_target;
int ret;
+ if (ud->update_strategy.type == SM_UPDATE_CHECKOUT && ud->super_branch)
+ update_target = ud->super_branch;
+ else
+ update_target = oid_to_hex(&ud->oid);
+
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
cp.git_cmd = 1;
@@ -2252,7 +2295,7 @@ static int run_update_command(const struct update_data *ud, int subforce)
BUG("unexpected update strategy type: %d",
ud->update_strategy.type);
}
- strvec_push(&cp.args, oid);
+ strvec_push(&cp.args, update_target);
cp.dir = ud->sm_path;
prepare_submodule_repo_env(&cp.env);
@@ -2260,20 +2303,20 @@ static int run_update_command(const struct update_data *ud, int subforce)
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
die_message(_("Unable to checkout '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ update_target, ud->displaypath);
/* No "ret" assignment, use "git checkout"'s */
break;
case SM_UPDATE_REBASE:
ret = die_message(_("Unable to rebase '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ update_target, ud->displaypath);
break;
case SM_UPDATE_MERGE:
ret = die_message(_("Unable to merge '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ update_target, ud->displaypath);
break;
case SM_UPDATE_COMMAND:
ret = die_message(_("Execution of '%s %s' failed in submodule path '%s'"),
- ud->update_strategy.command, oid, ud->displaypath);
+ ud->update_strategy.command, update_target, ud->displaypath);
break;
default:
BUG("unexpected update strategy type: %d",
@@ -2289,19 +2332,19 @@ static int run_update_command(const struct update_data *ud, int subforce)
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
printf(_("Submodule path '%s': checked out '%s'\n"),
- ud->displaypath, oid);
+ ud->displaypath, update_target);
break;
case SM_UPDATE_REBASE:
printf(_("Submodule path '%s': rebased into '%s'\n"),
- ud->displaypath, oid);
+ ud->displaypath, update_target);
break;
case SM_UPDATE_MERGE:
printf(_("Submodule path '%s': merged in '%s'\n"),
- ud->displaypath, oid);
+ ud->displaypath, update_target);
break;
case SM_UPDATE_COMMAND:
printf(_("Submodule path '%s': '%s %s'\n"),
- ud->displaypath, ud->update_strategy.command, oid);
+ ud->displaypath, ud->update_strategy.command, update_target);
break;
default:
BUG("unexpected update strategy type: %d",
@@ -2313,7 +2356,7 @@ static int run_update_command(const struct update_data *ud, int subforce)
static int run_update_procedure(const struct update_data *ud)
{
- int subforce = is_null_oid(&ud->suboid) || ud->force;
+ int subforce = ud->just_cloned || ud->force;
if (!ud->nofetch) {
/*
@@ -2488,7 +2531,10 @@ static void update_data_to_args(const struct update_data *update_data,
static int update_submodule(struct update_data *update_data)
{
+ int submodule_up_to_date;
int ret;
+ struct object_id suboid;
+ const char *submodule_head = NULL;
ret = determine_submodule_update_strategy(the_repository,
update_data->just_cloned,
@@ -2498,9 +2544,9 @@ static int update_submodule(struct update_data *update_data)
if (ret)
return ret;
- if (update_data->just_cloned)
- oidcpy(&update_data->suboid, null_oid());
- else if (resolve_gitlink_ref(update_data->sm_path, "HEAD", &update_data->suboid))
+ if (!update_data->just_cloned &&
+ resolve_gitlink_ref(update_data->sm_path, "HEAD", &suboid,
+ &submodule_head))
return die_message(_("Unable to find current revision in submodule path '%s'"),
update_data->displaypath);
@@ -2527,14 +2573,26 @@ static int update_submodule(struct update_data *update_data)
update_data->sm_path);
}
- if (resolve_gitlink_ref(update_data->sm_path, remote_ref, &update_data->oid))
+ if (resolve_gitlink_ref(update_data->sm_path, remote_ref,
+ &update_data->oid, NULL))
return die_message(_("Unable to find %s revision in submodule path '%s'"),
remote_ref, update_data->sm_path);
free(remote_ref);
}
- if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force) {
+ if (update_data->just_cloned)
+ submodule_up_to_date = 0;
+ else if (update_data->super_branch)
+ /* Check that the submodule's HEAD points to super_branch. */
+ submodule_up_to_date =
+ skip_prefix(submodule_head, "refs/heads/",
+ &submodule_head) &&
+ !strcmp(update_data->super_branch, submodule_head);
+ else
+ submodule_up_to_date = oideq(&update_data->oid, &suboid);
+
+ if (!submodule_up_to_date || update_data->force) {
ret = run_update_procedure(update_data);
if (ret)
return ret;
@@ -2546,7 +2604,6 @@ static int update_submodule(struct update_data *update_data)
next.prefix = NULL;
oidcpy(&next.oid, null_oid());
- oidcpy(&next.suboid, null_oid());
cp.dir = update_data->sm_path;
cp.git_cmd = 1;
@@ -2567,12 +2624,26 @@ static int update_submodules(struct update_data *update_data)
{
int i, ret = 0;
struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
+ const struct run_process_parallel_opts opts = {
+ .tr2_category = "submodule",
+ .tr2_label = "parallel/update",
+
+ .processes = update_data->max_jobs,
+
+ .get_next_task = update_clone_get_next_task,
+ .start_failure = update_clone_start_failure,
+ .task_finished = update_clone_task_finished,
+ .data = &suc,
+ };
+
+ if (the_repository->settings.submodule_propagate_branches) {
+ struct branch *current_branch = branch_get(NULL);
+ if (current_branch)
+ update_data->super_branch = current_branch->name;
+ }
suc.update_data = update_data;
- run_processes_parallel_tr2(suc.update_data->max_jobs, update_clone_get_next_task,
- update_clone_start_failure,
- update_clone_task_finished, &suc, "submodule",
- "parallel/update");
+ run_processes_parallel(&opts);
/*
* We saved the output and put it out all at once now.
@@ -2635,9 +2706,6 @@ static int module_update(int argc, const char **argv, const char *prefix)
N_("traverse submodules recursively")),
OPT_BOOL('N', "no-fetch", &opt.nofetch,
N_("don't fetch new objects from the remote site")),
- OPT_STRING(0, "prefix", &opt.prefix,
- N_("path"),
- N_("path into the working tree")),
OPT_SET_INT(0, "checkout", &opt.update_default,
N_("use the 'checkout' update strategy (default)"),
SM_UPDATE_CHECKOUT),
@@ -2683,6 +2751,7 @@ static int module_update(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, module_update_options,
git_submodule_helper_usage, 0);
+ prepare_repo_settings(the_repository);
if (opt.require_init)
opt.init = 1;
@@ -2693,11 +2762,12 @@ static int module_update(int argc, const char **argv, const char *prefix)
}
opt.filter_options = &filter_options;
+ opt.prefix = prefix;
if (opt.update_default)
opt.update_strategy.type = opt.update_default;
- if (module_list_compute(argc, argv, prefix, &pathspec, &opt.list) < 0) {
+ if (module_list_compute(argv, prefix, &pathspec, &opt.list) < 0) {
ret = 1;
goto cleanup;
}
@@ -2709,7 +2779,7 @@ static int module_update(int argc, const char **argv, const char *prefix)
struct module_list list = MODULE_LIST_INIT;
struct init_cb info = INIT_CB_INIT;
- if (module_list_compute(argc, argv, opt.prefix,
+ if (module_list_compute(argv, opt.prefix,
&pathspec2, &list) < 0) {
module_list_release(&list);
ret = 1;
@@ -2822,13 +2892,7 @@ static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
int i;
struct pathspec pathspec = { 0 };
struct module_list list = MODULE_LIST_INIT;
- unsigned flags = ABSORB_GITDIR_RECURSE_SUBMODULES;
struct option embed_gitdir_options[] = {
- OPT_STRING(0, "prefix", &prefix,
- N_("path"),
- N_("path into the working tree")),
- OPT_BIT(0, "--recursive", &flags, N_("recurse into submodules"),
- ABSORB_GITDIR_RECURSE_SUBMODULES),
OPT_END()
};
const char *const git_submodule_helper_usage[] = {
@@ -2840,11 +2904,11 @@ static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, embed_gitdir_options,
git_submodule_helper_usage, 0);
- if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ if (module_list_compute(argv, prefix, &pathspec, &list) < 0)
goto cleanup;
for (i = 0; i < list.nr; i++)
- absorb_git_dir_into_superproject(list.entries[i]->name, flags);
+ absorb_git_dir_into_superproject(list.entries[i]->name);
ret = 0;
cleanup:
@@ -2853,51 +2917,6 @@ cleanup:
return ret;
}
-static int module_config(int argc, const char **argv, const char *prefix)
-{
- enum {
- CHECK_WRITEABLE = 1,
- DO_UNSET = 2
- } command = 0;
- struct option module_config_options[] = {
- OPT_CMDMODE(0, "check-writeable", &command,
- N_("check if it is safe to write to the .gitmodules file"),
- CHECK_WRITEABLE),
- OPT_CMDMODE(0, "unset", &command,
- N_("unset the config in the .gitmodules file"),
- DO_UNSET),
- OPT_END()
- };
- const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper config <name> [<value>]"),
- N_("git submodule--helper config --unset <name>"),
- "git submodule--helper config --check-writeable",
- NULL
- };
-
- argc = parse_options(argc, argv, prefix, module_config_options,
- git_submodule_helper_usage, PARSE_OPT_KEEP_ARGV0);
-
- if (argc == 1 && command == CHECK_WRITEABLE)
- return is_writing_gitmodules_ok() ? 0 : -1;
-
- /* Equivalent to ACTION_GET in builtin/config.c */
- if (argc == 2 && command != DO_UNSET)
- return print_config_from_gitmodules(the_repository, argv[1]);
-
- /* Equivalent to ACTION_SET in builtin/config.c */
- if (argc == 3 || (argc == 2 && command == DO_UNSET)) {
- const char *value = (argc == 3) ? argv[2] : NULL;
-
- if (!is_writing_gitmodules_ok())
- die(_("please make sure that the .gitmodules file is in the working tree"));
-
- return config_set_in_gitmodules_file_gently(argv[1], value);
- }
-
- usage_with_options(git_submodule_helper_usage, module_config_options);
-}
-
static int module_set_url(int argc, const char **argv, const char *prefix)
{
int quiet = 0;
@@ -3272,7 +3291,7 @@ static void die_on_repo_without_commits(const char *path)
strbuf_addstr(&sb, path);
if (is_nonbare_repository_dir(&sb)) {
struct object_id oid;
- if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ if (resolve_gitlink_ref(path, "HEAD", &oid, NULL) < 0)
die(_("'%s' does not have a commit checked out"), path);
}
strbuf_release(&sb);
@@ -3396,48 +3415,45 @@ cleanup:
return ret;
}
-#define SUPPORT_SUPER_PREFIX (1<<0)
-
-struct cmd_struct {
- const char *cmd;
- int (*fn)(int, const char **, const char *);
- unsigned option;
-};
-
-static struct cmd_struct commands[] = {
- {"clone", module_clone, SUPPORT_SUPER_PREFIX},
- {"add", module_add, 0},
- {"update", module_update, SUPPORT_SUPER_PREFIX},
- {"foreach", module_foreach, SUPPORT_SUPER_PREFIX},
- {"init", module_init, 0},
- {"status", module_status, SUPPORT_SUPER_PREFIX},
- {"sync", module_sync, SUPPORT_SUPER_PREFIX},
- {"deinit", module_deinit, 0},
- {"summary", module_summary, 0},
- {"push-check", push_check, 0},
- {"absorbgitdirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
- {"config", module_config, 0},
- {"set-url", module_set_url, 0},
- {"set-branch", module_set_branch, 0},
- {"create-branch", module_create_branch, 0},
-};
-
int cmd_submodule__helper(int argc, const char **argv, const char *prefix)
{
- int i;
- if (argc < 2 || !strcmp(argv[1], "-h"))
- usage("git submodule--helper <command>");
-
- for (i = 0; i < ARRAY_SIZE(commands); i++) {
- if (!strcmp(argv[1], commands[i].cmd)) {
- if (get_super_prefix() &&
- !(commands[i].option & SUPPORT_SUPER_PREFIX))
- die(_("%s doesn't support --super-prefix"),
- commands[i].cmd);
- return commands[i].fn(argc - 1, argv + 1, prefix);
- }
- }
+ const char *cmd = argv[0];
+ const char *subcmd;
+ parse_opt_subcommand_fn *fn = NULL;
+ const char *const usage[] = {
+ N_("git submodule--helper <command>"),
+ NULL
+ };
+ struct option options[] = {
+ OPT_SUBCOMMAND("clone", &fn, module_clone),
+ OPT_SUBCOMMAND("add", &fn, module_add),
+ OPT_SUBCOMMAND("update", &fn, module_update),
+ OPT_SUBCOMMAND("foreach", &fn, module_foreach),
+ OPT_SUBCOMMAND("init", &fn, module_init),
+ OPT_SUBCOMMAND("status", &fn, module_status),
+ OPT_SUBCOMMAND("sync", &fn, module_sync),
+ OPT_SUBCOMMAND("deinit", &fn, module_deinit),
+ OPT_SUBCOMMAND("summary", &fn, module_summary),
+ OPT_SUBCOMMAND("push-check", &fn, push_check),
+ OPT_SUBCOMMAND("absorbgitdirs", &fn, absorb_git_dirs),
+ OPT_SUBCOMMAND("set-url", &fn, module_set_url),
+ OPT_SUBCOMMAND("set-branch", &fn, module_set_branch),
+ OPT_SUBCOMMAND("create-branch", &fn, module_create_branch),
+ OPT_END()
+ };
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ subcmd = argv[0];
+
+ if (strcmp(subcmd, "clone") && strcmp(subcmd, "update") &&
+ strcmp(subcmd, "foreach") && strcmp(subcmd, "status") &&
+ strcmp(subcmd, "sync") && strcmp(subcmd, "absorbgitdirs") &&
+ get_super_prefix())
+ /*
+ * xstrfmt() rather than "%s %s" to keep the translated
+ * string identical to git.c's.
+ */
+ die(_("%s doesn't support --super-prefix"),
+ xstrfmt("'%s %s'", cmd, subcmd));
- die(_("'%s' is not a valid submodule--helper "
- "subcommand"), argv[1]);
+ return fn(argc, argv, prefix);
}
diff --git a/builtin/symbolic-ref.c b/builtin/symbolic-ref.c
index 1b0f10225f..e00768a8b7 100644
--- a/builtin/symbolic-ref.c
+++ b/builtin/symbolic-ref.c
@@ -5,15 +5,19 @@
#include "parse-options.h"
static const char * const git_symbolic_ref_usage[] = {
- N_("git symbolic-ref [<options>] <name> [<ref>]"),
- N_("git symbolic-ref -d [-q] <name>"),
+ N_("git symbolic-ref [-m <reason>] <name> <ref>"),
+ N_("git symbolic-ref [-q] [--short] [--no-recurse] <name>"),
+ N_("git symbolic-ref --delete [-q] <name>"),
NULL
};
-static int check_symref(const char *HEAD, int quiet, int shorten, int print)
+static int check_symref(const char *HEAD, int quiet, int shorten, int recurse, int print)
{
- int flag;
- const char *refname = resolve_ref_unsafe(HEAD, 0, NULL, &flag);
+ int resolve_flags, flag;
+ const char *refname;
+
+ resolve_flags = (recurse ? 0 : RESOLVE_REF_NO_RECURSE);
+ refname = resolve_ref_unsafe(HEAD, resolve_flags, NULL, &flag);
if (!refname)
die("No such ref: %s", HEAD);
@@ -35,13 +39,14 @@ static int check_symref(const char *HEAD, int quiet, int shorten, int print)
int cmd_symbolic_ref(int argc, const char **argv, const char *prefix)
{
- int quiet = 0, delete = 0, shorten = 0, ret = 0;
+ int quiet = 0, delete = 0, shorten = 0, recurse = 1, ret = 0;
const char *msg = NULL;
struct option options[] = {
OPT__QUIET(&quiet,
N_("suppress error message for non-symbolic (detached) refs")),
OPT_BOOL('d', "delete", &delete, N_("delete symbolic ref")),
OPT_BOOL(0, "short", &shorten, N_("shorten ref output")),
+ OPT_BOOL(0, "recurse", &recurse, N_("recursively dereference (default)")),
OPT_STRING('m', NULL, &msg, N_("reason"), N_("reason of the update")),
OPT_END(),
};
@@ -55,7 +60,7 @@ int cmd_symbolic_ref(int argc, const char **argv, const char *prefix)
if (delete) {
if (argc != 1)
usage_with_options(git_symbolic_ref_usage, options);
- ret = check_symref(argv[0], 1, 0, 0);
+ ret = check_symref(argv[0], 1, 0, 0, 0);
if (ret)
die("Cannot delete %s, not a symbolic ref", argv[0]);
if (!strcmp(argv[0], "HEAD"))
@@ -65,7 +70,7 @@ int cmd_symbolic_ref(int argc, const char **argv, const char *prefix)
switch (argc) {
case 1:
- ret = check_symref(argv[0], quiet, shorten, 1);
+ ret = check_symref(argv[0], quiet, shorten, recurse, 1);
break;
case 2:
if (!strcmp(argv[0], "HEAD") &&
diff --git a/builtin/tag.c b/builtin/tag.c
index 75dece0e4f..d428c45dc8 100644
--- a/builtin/tag.c
+++ b/builtin/tag.c
@@ -23,11 +23,13 @@
#include "date.h"
static const char * const git_tag_usage[] = {
- N_("git tag [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>]\n"
- " <tagname> [<head>]"),
+ N_("git tag [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>] [-e]\n"
+ " <tagname> [<commit> | <object>]"),
N_("git tag -d <tagname>..."),
- N_("git tag -l [-n[<num>]] [--contains <commit>] [--no-contains <commit>] [--points-at <object>]\n"
- " [--format=<format>] [--merged <commit>] [--no-merged <commit>] [<pattern>...]"),
+ N_("git tag [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>]\n"
+ " [--points-at <object>] [--column[=<options>] | --no-column]\n"
+ " [--create-reflog] [--sort=<key>] [--format=<format>]\n"
+ " [--merged <commit>] [--no-merged <commit>] [<pattern>...]"),
N_("git tag -v [--format=<format>] <tagname>..."),
NULL
};
diff --git a/builtin/unpack-file.c b/builtin/unpack-file.c
index 58652229f2..88de32b7d7 100644
--- a/builtin/unpack-file.c
+++ b/builtin/unpack-file.c
@@ -19,6 +19,7 @@ static char *create_temp_file(struct object_id *oid)
if (write_in_full(fd, buf, size) < 0)
die_errno("unable to write temp-file");
close(fd);
+ free(buf);
return path;
}
@@ -27,7 +28,7 @@ int cmd_unpack_file(int argc, const char **argv, const char *prefix)
struct object_id oid;
if (argc != 2 || !strcmp(argv[1], "-h"))
- usage("git unpack-file <sha1>");
+ usage("git unpack-file <blob>");
if (get_oid(argv[1], &oid))
die("Not a valid object name %s", argv[1]);
diff --git a/builtin/update-index.c b/builtin/update-index.c
index b62249905f..e6305656c9 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -339,7 +339,7 @@ static int process_directory(const char *path, int len, struct stat *st)
if (S_ISGITLINK(ce->ce_mode)) {
/* Do nothing to the index if there is no HEAD! */
- if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ if (resolve_gitlink_ref(path, "HEAD", &oid, NULL) < 0)
return 0;
return add_one_path(ce, path, len, st);
@@ -365,7 +365,7 @@ static int process_directory(const char *path, int len, struct stat *st)
}
/* No match - should we add it as a gitlink? */
- if (!resolve_gitlink_ref(path, "HEAD", &oid))
+ if (!resolve_gitlink_ref(path, "HEAD", &oid, NULL))
return add_one_path(NULL, path, len, st);
/* Error out. */
@@ -732,7 +732,7 @@ static int do_unresolve(int ac, const char **av,
return err;
}
-static int do_reupdate(int ac, const char **av,
+static int do_reupdate(const char **paths,
const char *prefix)
{
/* Read HEAD and run update-index on paths that are
@@ -744,7 +744,7 @@ static int do_reupdate(int ac, const char **av,
parse_pathspec(&pathspec, 0,
PATHSPEC_PREFER_CWD,
- prefix, av + 1);
+ prefix, paths);
if (read_ref("HEAD", &head_oid))
/* If there is no HEAD, that means it is an initial
@@ -970,7 +970,7 @@ static enum parse_opt_result reupdate_callback(
/* consume remaining arguments. */
setup_work_tree();
- *has_errors = do_reupdate(ctx->argc, ctx->argv, prefix);
+ *has_errors = do_reupdate(ctx->argv + 1, prefix);
if (*has_errors)
active_cache_changed = 0;
diff --git a/builtin/update-server-info.c b/builtin/update-server-info.c
index 880fffec58..d2239c9ef4 100644
--- a/builtin/update-server-info.c
+++ b/builtin/update-server-info.c
@@ -4,7 +4,7 @@
#include "parse-options.h"
static const char * const update_server_info_usage[] = {
- "git update-server-info [--force]",
+ "git update-server-info [-f | --force]",
NULL
};
diff --git a/builtin/upload-archive.c b/builtin/upload-archive.c
index 98d028dae6..945ee2b412 100644
--- a/builtin/upload-archive.c
+++ b/builtin/upload-archive.c
@@ -10,7 +10,7 @@
#include "strvec.h"
static const char upload_archive_usage[] =
- "git upload-archive <repo>";
+ "git upload-archive <repository>";
static const char deadchild[] =
"git upload-archive: archiver died with error";
diff --git a/builtin/upload-pack.c b/builtin/upload-pack.c
index 125af53885..25b69da2bf 100644
--- a/builtin/upload-pack.c
+++ b/builtin/upload-pack.c
@@ -8,7 +8,8 @@
#include "serve.h"
static const char * const upload_pack_usage[] = {
- N_("git upload-pack [<options>] <dir>"),
+ N_("git-upload-pack [--[no-]strict] [--timeout=<n>] [--stateless-rpc]\n"
+ " [--advertise-refs] <directory>"),
NULL
};
diff --git a/builtin/verify-commit.c b/builtin/verify-commit.c
index 40c69a0bed..3ebad32b0f 100644
--- a/builtin/verify-commit.c
+++ b/builtin/verify-commit.c
@@ -16,7 +16,7 @@
#include "gpg-interface.h"
static const char * const verify_commit_usage[] = {
- N_("git verify-commit [-v | --verbose] <commit>..."),
+ N_("git verify-commit [-v | --verbose] [--raw] <commit>..."),
NULL
};
diff --git a/builtin/verify-pack.c b/builtin/verify-pack.c
index 05c5213594..27d6f75fd8 100644
--- a/builtin/verify-pack.c
+++ b/builtin/verify-pack.c
@@ -56,7 +56,7 @@ static int verify_one_pack(const char *path, unsigned int flags, const char *has
}
static const char * const verify_pack_usage[] = {
- N_("git verify-pack [-v | --verbose] [-s | --stat-only] <pack>..."),
+ N_("git verify-pack [-v | --verbose] [-s | --stat-only] [--] <pack>.idx..."),
NULL
};
diff --git a/builtin/verify-tag.c b/builtin/verify-tag.c
index f45136a06b..217566952d 100644
--- a/builtin/verify-tag.c
+++ b/builtin/verify-tag.c
@@ -15,7 +15,7 @@
#include "ref-filter.h"
static const char * const verify_tag_usage[] = {
- N_("git verify-tag [-v | --verbose] [--format=<format>] <tag>..."),
+ N_("git verify-tag [-v | --verbose] [--format=<format>] [--raw] <tag>..."),
NULL
};
diff --git a/builtin/worktree.c b/builtin/worktree.c
index c6710b2552..963d322ee7 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -15,15 +15,73 @@
#include "worktree.h"
#include "quote.h"
-static const char * const worktree_usage[] = {
- N_("git worktree add [<options>] <path> [<commit-ish>]"),
- N_("git worktree list [<options>]"),
- N_("git worktree lock [<options>] <path>"),
- N_("git worktree move <worktree> <new-path>"),
- N_("git worktree prune [<options>]"),
- N_("git worktree remove [<options>] <worktree>"),
- N_("git worktree repair [<path>...]"),
- N_("git worktree unlock <path>"),
+#define BUILTIN_WORKTREE_ADD_USAGE \
+ N_("git worktree add [-f] [--detach] [--checkout] [--lock [--reason <string>]]\n" \
+ " [[-b | -B | --orphan] <new-branch>] <path> [<commit-ish>]")
+#define BUILTIN_WORKTREE_LIST_USAGE \
+ N_("git worktree list [-v | --porcelain [-z]]")
+#define BUILTIN_WORKTREE_LOCK_USAGE \
+ N_("git worktree lock [--reason <string>] <worktree>")
+#define BUILTIN_WORKTREE_MOVE_USAGE \
+ N_("git worktree move <worktree> <new-path>")
+#define BUILTIN_WORKTREE_PRUNE_USAGE \
+ N_("git worktree prune [-n] [-v] [--expire <expire>]")
+#define BUILTIN_WORKTREE_REMOVE_USAGE \
+ N_("git worktree remove [-f] <worktree>")
+#define BUILTIN_WORKTREE_REPAIR_USAGE \
+ N_("git worktree repair [<path>...]")
+#define BUILTIN_WORKTREE_UNLOCK_USAGE \
+ N_("git worktree unlock <worktree>")
+
+static const char * const git_worktree_usage[] = {
+ BUILTIN_WORKTREE_ADD_USAGE,
+ BUILTIN_WORKTREE_LIST_USAGE,
+ BUILTIN_WORKTREE_LOCK_USAGE,
+ BUILTIN_WORKTREE_MOVE_USAGE,
+ BUILTIN_WORKTREE_PRUNE_USAGE,
+ BUILTIN_WORKTREE_REMOVE_USAGE,
+ BUILTIN_WORKTREE_REPAIR_USAGE,
+ BUILTIN_WORKTREE_UNLOCK_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_add_usage[] = {
+ BUILTIN_WORKTREE_ADD_USAGE,
+ NULL,
+};
+
+static const char * const git_worktree_list_usage[] = {
+ BUILTIN_WORKTREE_LIST_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_lock_usage[] = {
+ BUILTIN_WORKTREE_LOCK_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_move_usage[] = {
+ BUILTIN_WORKTREE_MOVE_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_prune_usage[] = {
+ BUILTIN_WORKTREE_PRUNE_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_remove_usage[] = {
+ BUILTIN_WORKTREE_REMOVE_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_repair_usage[] = {
+ BUILTIN_WORKTREE_REPAIR_USAGE,
+ NULL
+};
+
+static const char * const git_worktree_unlock_usage[] = {
+ BUILTIN_WORKTREE_UNLOCK_USAGE,
NULL
};
@@ -32,6 +90,7 @@ struct add_opts {
int detach;
int quiet;
int checkout;
+ const char *orphan_branch;
const char *keep_locked;
};
@@ -153,9 +212,10 @@ static int prune(int ac, const char **av, const char *prefix)
};
expire = TIME_MAX;
- ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ ac = parse_options(ac, av, prefix, options, git_worktree_prune_usage,
+ 0);
if (ac)
- usage_with_options(worktree_usage, options);
+ usage_with_options(git_worktree_prune_usage, options);
prune_worktrees();
return 0;
}
@@ -305,6 +365,24 @@ static int checkout_worktree(const struct add_opts *opts,
return run_command(&cp);
}
+static int make_worktree_orphan(const struct add_opts *opts,
+ struct strvec *child_env)
+{
+ int ret;
+ struct strbuf symref = STRBUF_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ cp.git_cmd = 1;
+
+ validate_new_branchname(opts->orphan_branch, &symref, 0);
+ strvec_pushl(&cp.args, "symbolic-ref", "HEAD", symref.buf, NULL);
+ if (opts->quiet)
+ strvec_push(&cp.args, "--quiet");
+ strvec_pushv(&cp.env, child_env->v);
+ ret = run_command(&cp);
+ strbuf_release(&symref);
+ return ret;
+}
+
static int add_worktree(const char *path, const char *refname,
const struct add_opts *opts)
{
@@ -334,7 +412,7 @@ static int add_worktree(const char *path, const char *refname,
die_if_checked_out(symref.buf, 0);
}
commit = lookup_commit_reference_by_name(refname);
- if (!commit)
+ if (!commit && !opts->orphan_branch)
die(_("invalid reference: %s"), refname);
name = worktree_basename(path, &len);
@@ -423,10 +501,10 @@ static int add_worktree(const char *path, const char *refname,
strvec_pushf(&child_env, "%s=%s", GIT_WORK_TREE_ENVIRONMENT, path);
cp.git_cmd = 1;
- if (!is_branch)
+ if (!is_branch && commit) {
strvec_pushl(&cp.args, "update-ref", "HEAD",
oid_to_hex(&commit->object.oid), NULL);
- else {
+ } else {
strvec_pushl(&cp.args, "symbolic-ref", "HEAD",
symref.buf, NULL);
if (opts->quiet)
@@ -438,6 +516,10 @@ static int add_worktree(const char *path, const char *refname,
if (ret)
goto done;
+ if (opts->orphan_branch &&
+ (ret = make_worktree_orphan(opts, &child_env)))
+ goto done;
+
if (opts->checkout &&
(ret = checkout_worktree(opts, &child_env)))
goto done;
@@ -457,7 +539,7 @@ done:
* Hook failure does not warrant worktree deletion, so run hook after
* is_junk is cleared, but do return appropriate code when hook fails.
*/
- if (!ret && opts->checkout) {
+ if (!ret && opts->checkout && !opts->orphan_branch) {
struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
strvec_pushl(&opt.env, "GIT_DIR", "GIT_WORK_TREE", NULL);
@@ -557,6 +639,8 @@ static int add(int ac, const char **av, const char *prefix)
N_("create a new branch")),
OPT_STRING('B', NULL, &new_branch_force, N_("branch"),
N_("create or reset a branch")),
+ OPT_STRING(0, "orphan", &opts.orphan_branch, N_("branch"),
+ N_("new unparented branch")),
OPT_BOOL('d', "detach", &opts.detach, N_("detach HEAD at named commit")),
OPT_BOOL(0, "checkout", &opts.checkout, N_("populate the new working tree")),
OPT_BOOL(0, "lock", &keep_locked, N_("keep the new working tree locked")),
@@ -570,12 +654,21 @@ static int add(int ac, const char **av, const char *prefix)
N_("try to match the new branch name with a remote-tracking branch")),
OPT_END()
};
+ int ret;
memset(&opts, 0, sizeof(opts));
opts.checkout = 1;
- ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ ac = parse_options(ac, av, prefix, options, git_worktree_add_usage, 0);
if (!!opts.detach + !!new_branch + !!new_branch_force > 1)
die(_("options '%s', '%s', and '%s' cannot be used together"), "-b", "-B", "--detach");
+ if (!!opts.detach + !!new_branch + !!new_branch_force + !!opts.orphan_branch > 1)
+ die(_("options '%s', '%s', '%s', and '%s' cannot be used together"),
+ "-b", "-B", "--orphan", "--detach");
+ if (opts.orphan_branch && opt_track)
+ die(_("'%s' and '%s' cannot be used together"), "--orphan", "--track");
+ if (opts.orphan_branch && !opts.checkout)
+ die(_("'%s' and '%s' cannot be used together"), "--orphan",
+ "--no-checkout");
if (lock_reason && !keep_locked)
die(_("the option '%s' requires '%s'"), "--reason", "--lock");
if (lock_reason)
@@ -584,7 +677,7 @@ static int add(int ac, const char **av, const char *prefix)
opts.keep_locked = _("added with --lock");
if (ac < 1 || ac > 2)
- usage_with_options(worktree_usage, options);
+ usage_with_options(git_worktree_add_usage, options);
path = prefix_filename(prefix, av[0]);
branch = ac < 2 ? "HEAD" : av[1];
@@ -592,6 +685,13 @@ static int add(int ac, const char **av, const char *prefix)
if (!strcmp(branch, "-"))
branch = "@{-1}";
+ /*
+ * When creating a new branch, new_branch now contains the branch to
+ * create.
+ *
+ * Past this point, new_branch_force can be treated solely as a
+ * boolean flag to indicate whether `-B` was selected.
+ */
if (new_branch_force) {
struct strbuf symref = STRBUF_INIT;
@@ -604,7 +704,17 @@ static int add(int ac, const char **av, const char *prefix)
strbuf_release(&symref);
}
- if (ac < 2 && !new_branch && !opts.detach) {
+ /*
+ * As the orphan cannot be created until the contents of branch
+ * are staged, opts.orphan_branch should be treated as both a boolean
+ * indicating that `--orphan` was selected and as the name of the new
+ * orphan branch from this point on.
+ */
+ if (opts.orphan_branch) {
+ new_branch = opts.orphan_branch;
+ }
+
+ if (ac < 2 && !new_branch && !opts.detach && !opts.orphan_branch) {
const char *s = dwim_branch(path, &new_branch);
if (s)
branch = s;
@@ -627,7 +737,7 @@ static int add(int ac, const char **av, const char *prefix)
if (!opts.quiet)
print_preparing_worktree_line(opts.detach, branch, new_branch, !!new_branch_force);
- if (new_branch) {
+ if (new_branch && !opts.orphan_branch) {
struct child_process cp = CHILD_PROCESS_INIT;
cp.git_cmd = 1;
strvec_push(&cp.args, "branch");
@@ -646,9 +756,9 @@ static int add(int ac, const char **av, const char *prefix)
die(_("--[no-]track can only be used if a new branch is created"));
}
- UNLEAK(path);
- UNLEAK(opts);
- return add_worktree(path, branch, &opts);
+ ret = add_worktree(path, branch, &opts);
+ free(path);
+ return ret;
}
static void show_worktree_porcelain(struct worktree *wt, int line_terminator)
@@ -772,9 +882,9 @@ static int list(int ac, const char **av, const char *prefix)
};
expire = TIME_MAX;
- ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ ac = parse_options(ac, av, prefix, options, git_worktree_list_usage, 0);
if (ac)
- usage_with_options(worktree_usage, options);
+ usage_with_options(git_worktree_list_usage, options);
else if (verbose && porcelain)
die(_("options '%s' and '%s' cannot be used together"), "--verbose", "--porcelain");
else if (!line_terminator && !porcelain)
@@ -811,9 +921,9 @@ static int lock_worktree(int ac, const char **av, const char *prefix)
};
struct worktree **worktrees, *wt;
- ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ ac = parse_options(ac, av, prefix, options, git_worktree_lock_usage, 0);
if (ac != 1)
- usage_with_options(worktree_usage, options);
+ usage_with_options(git_worktree_lock_usage, options);
worktrees = get_worktrees();
wt = find_worktree(worktrees, prefix, av[0]);
@@ -844,9 +954,9 @@ static int unlock_worktree(int ac, const char **av, const char *prefix)
struct worktree **worktrees, *wt;
int ret;
- ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ ac = parse_options(ac, av, prefix, options, git_worktree_unlock_usage, 0);
if (ac != 1)
- usage_with_options(worktree_usage, options);
+ usage_with_options(git_worktree_unlock_usage, options);
worktrees = get_worktrees();
wt = find_worktree(worktrees, prefix, av[0]);
@@ -914,9 +1024,10 @@ static int move_worktree(int ac, const char **av, const char *prefix)
const char *reason = NULL;
char *path;
- ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ ac = parse_options(ac, av, prefix, options, git_worktree_move_usage,
+ 0);
if (ac != 2)
- usage_with_options(worktree_usage, options);
+ usage_with_options(git_worktree_move_usage, options);
path = prefix_filename(prefix, av[1]);
strbuf_addstr(&dst, path);
@@ -1042,9 +1153,9 @@ static int remove_worktree(int ac, const char **av, const char *prefix)
const char *reason = NULL;
int ret = 0;
- ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ ac = parse_options(ac, av, prefix, options, git_worktree_remove_usage, 0);
if (ac != 1)
- usage_with_options(worktree_usage, options);
+ usage_with_options(git_worktree_remove_usage, options);
worktrees = get_worktrees();
wt = find_worktree(worktrees, prefix, av[0]);
@@ -1102,7 +1213,7 @@ static int repair(int ac, const char **av, const char *prefix)
};
int rc = 0;
- ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ ac = parse_options(ac, av, prefix, options, git_worktree_repair_usage, 0);
p = ac > 0 ? av : self;
for (; *p; p++)
repair_worktree_at_path(*p, report_repair, &rc);
@@ -1130,6 +1241,6 @@ int cmd_worktree(int ac, const char **av, const char *prefix)
if (!prefix)
prefix = "";
- ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ ac = parse_options(ac, av, prefix, options, git_worktree_usage, 0);
return fn(ac, av, prefix);
}
diff --git a/bundle-uri.c b/bundle-uri.c
index 4a8cc74ed0..79a914f961 100644
--- a/bundle-uri.c
+++ b/bundle-uri.c
@@ -4,23 +4,221 @@
#include "object-store.h"
#include "refs.h"
#include "run-command.h"
+#include "hashmap.h"
+#include "pkt-line.h"
+#include "config.h"
-static int find_temp_filename(struct strbuf *name)
+static int compare_bundles(const void *hashmap_cmp_fn_data,
+ const struct hashmap_entry *he1,
+ const struct hashmap_entry *he2,
+ const void *id)
+{
+ const struct remote_bundle_info *e1 =
+ container_of(he1, const struct remote_bundle_info, ent);
+ const struct remote_bundle_info *e2 =
+ container_of(he2, const struct remote_bundle_info, ent);
+
+ return strcmp(e1->id, id ? (const char *)id : e2->id);
+}
+
+void init_bundle_list(struct bundle_list *list)
+{
+ memset(list, 0, sizeof(*list));
+
+ /* Implied defaults. */
+ list->mode = BUNDLE_MODE_ALL;
+ list->version = 1;
+
+ hashmap_init(&list->bundles, compare_bundles, NULL, 0);
+}
+
+static int clear_remote_bundle_info(struct remote_bundle_info *bundle,
+ void *data)
+{
+ FREE_AND_NULL(bundle->id);
+ FREE_AND_NULL(bundle->uri);
+ FREE_AND_NULL(bundle->file);
+ bundle->unbundled = 0;
+ return 0;
+}
+
+void clear_bundle_list(struct bundle_list *list)
+{
+ if (!list)
+ return;
+
+ for_all_bundles_in_list(list, clear_remote_bundle_info, NULL);
+ hashmap_clear_and_free(&list->bundles, struct remote_bundle_info, ent);
+}
+
+int for_all_bundles_in_list(struct bundle_list *list,
+ bundle_iterator iter,
+ void *data)
+{
+ struct remote_bundle_info *info;
+ struct hashmap_iter i;
+
+ hashmap_for_each_entry(&list->bundles, &i, info, ent) {
+ int result = iter(info, data);
+
+ if (result)
+ return result;
+ }
+
+ return 0;
+}
+
+static int summarize_bundle(struct remote_bundle_info *info, void *data)
+{
+ FILE *fp = data;
+ fprintf(fp, "[bundle \"%s\"]\n", info->id);
+ fprintf(fp, "\turi = %s\n", info->uri);
+ return 0;
+}
+
+void print_bundle_list(FILE *fp, struct bundle_list *list)
+{
+ const char *mode;
+
+ switch (list->mode) {
+ case BUNDLE_MODE_ALL:
+ mode = "all";
+ break;
+
+ case BUNDLE_MODE_ANY:
+ mode = "any";
+ break;
+
+ case BUNDLE_MODE_NONE:
+ default:
+ mode = "<unknown>";
+ }
+
+ fprintf(fp, "[bundle]\n");
+ fprintf(fp, "\tversion = %d\n", list->version);
+ fprintf(fp, "\tmode = %s\n", mode);
+
+ for_all_bundles_in_list(list, summarize_bundle, fp);
+}
+
+/**
+ * Given a key-value pair, update the state of the given bundle list.
+ * Returns 0 if the key-value pair is understood. Returns -1 if the key
+ * is not understood or the value is malformed.
+ */
+static int bundle_list_update(const char *key, const char *value,
+ struct bundle_list *list)
+{
+ struct strbuf id = STRBUF_INIT;
+ struct remote_bundle_info lookup = REMOTE_BUNDLE_INFO_INIT;
+ struct remote_bundle_info *bundle;
+ const char *subsection, *subkey;
+ size_t subsection_len;
+
+ if (parse_config_key(key, "bundle", &subsection, &subsection_len, &subkey))
+ return -1;
+
+ if (!subsection_len) {
+ if (!strcmp(subkey, "version")) {
+ int version;
+ if (!git_parse_int(value, &version))
+ return -1;
+ if (version != 1)
+ return -1;
+
+ list->version = version;
+ return 0;
+ }
+
+ if (!strcmp(subkey, "mode")) {
+ if (!strcmp(value, "all"))
+ list->mode = BUNDLE_MODE_ALL;
+ else if (!strcmp(value, "any"))
+ list->mode = BUNDLE_MODE_ANY;
+ else
+ return -1;
+ return 0;
+ }
+
+ /* Ignore other unknown global keys. */
+ return 0;
+ }
+
+ strbuf_add(&id, subsection, subsection_len);
+
+ /*
+ * Check for an existing bundle with this <id>, or create one
+ * if necessary.
+ */
+ lookup.id = id.buf;
+ hashmap_entry_init(&lookup.ent, strhash(lookup.id));
+ if (!(bundle = hashmap_get_entry(&list->bundles, &lookup, ent, NULL))) {
+ CALLOC_ARRAY(bundle, 1);
+ bundle->id = strbuf_detach(&id, NULL);
+ hashmap_entry_init(&bundle->ent, strhash(bundle->id));
+ hashmap_add(&list->bundles, &bundle->ent);
+ }
+ strbuf_release(&id);
+
+ if (!strcmp(subkey, "uri")) {
+ if (bundle->uri)
+ return -1;
+ bundle->uri = xstrdup(value);
+ return 0;
+ }
+
+ /*
+ * At this point, we ignore any information that we don't
+ * understand, assuming it to be hints for a heuristic the client
+ * does not currently understand.
+ */
+ return 0;
+}
+
+static int config_to_bundle_list(const char *key, const char *value, void *data)
+{
+ struct bundle_list *list = data;
+ return bundle_list_update(key, value, list);
+}
+
+int bundle_uri_parse_config_format(const char *uri,
+ const char *filename,
+ struct bundle_list *list)
+{
+ int result;
+ struct config_options opts = {
+ .error_action = CONFIG_ERROR_ERROR,
+ };
+
+ result = git_config_from_file_with_options(config_to_bundle_list,
+ filename, list,
+ &opts);
+
+ if (!result && list->mode == BUNDLE_MODE_NONE) {
+ warning(_("bundle list at '%s' has no mode"), uri);
+ result = 1;
+ }
+
+ return result;
+}
+
+static char *find_temp_filename(void)
{
int fd;
+ struct strbuf name = STRBUF_INIT;
/*
* Find a temporary filename that is available. This is briefly
* racy, but unlikely to collide.
*/
- fd = odb_mkstemp(name, "bundles/tmp_uri_XXXXXX");
+ fd = odb_mkstemp(&name, "bundles/tmp_uri_XXXXXX");
if (fd < 0) {
warning(_("failed to create temporary file"));
- return -1;
+ return NULL;
}
close(fd);
- unlink(name->buf);
- return 0;
+ unlink(name.buf);
+ return strbuf_detach(&name, NULL);
}
static int download_https_uri_to_file(const char *file, const char *uri)
@@ -32,6 +230,7 @@ static int download_https_uri_to_file(const char *file, const char *uri)
int found_get = 0;
strvec_pushl(&cp.args, "git-remote-https", uri, NULL);
+ cp.err = -1;
cp.in = -1;
cp.out = -1;
@@ -105,7 +304,13 @@ static int unbundle_from_file(struct repository *r, const char *file)
if ((bundle_fd = read_bundle_header(file, &header)) < 0)
return 1;
- if ((result = unbundle(r, &header, bundle_fd, NULL)))
+ /*
+ * Skip the reachability walk here, since we will be adding
+ * a reachable ref pointing to the new tips, which will reach
+ * the prerequisite commits.
+ */
+ if ((result = unbundle(r, &header, bundle_fd, NULL,
+ VERIFY_BUNDLE_QUIET)))
return 1;
/*
@@ -138,31 +343,248 @@ static int unbundle_from_file(struct repository *r, const char *file)
return result;
}
-int fetch_bundle_uri(struct repository *r, const char *uri)
+struct bundle_list_context {
+ struct repository *r;
+ struct bundle_list *list;
+ enum bundle_list_mode mode;
+ int count;
+ int depth;
+};
+
+/*
+ * This early definition is necessary because we use indirect recursion:
+ *
+ * While iterating through a bundle list that was downloaded as part
+ * of fetch_bundle_uri_internal(), iterator methods eventually call it
+ * again, but with depth + 1.
+ */
+static int fetch_bundle_uri_internal(struct repository *r,
+ struct remote_bundle_info *bundle,
+ int depth,
+ struct bundle_list *list);
+
+static int download_bundle_to_file(struct remote_bundle_info *bundle, void *data)
{
- int result = 0;
- struct strbuf filename = STRBUF_INIT;
+ int res;
+ struct bundle_list_context *ctx = data;
+
+ if (ctx->mode == BUNDLE_MODE_ANY && ctx->count)
+ return 0;
+
+ res = fetch_bundle_uri_internal(ctx->r, bundle, ctx->depth + 1, ctx->list);
+
+ /*
+ * Only increment count if the download succeeded. If our mode is
+ * BUNDLE_MODE_ANY, then we will want to try other URIs in the
+ * list in case they work instead.
+ */
+ if (!res)
+ ctx->count++;
+
+ /*
+ * To be opportunistic as possible, we continue iterating and
+ * download as many bundles as we can, so we can apply the ones
+ * that work, even in BUNDLE_MODE_ALL mode.
+ */
+ return 0;
+}
+
+static int download_bundle_list(struct repository *r,
+ struct bundle_list *local_list,
+ struct bundle_list *global_list,
+ int depth)
+{
+ struct bundle_list_context ctx = {
+ .r = r,
+ .list = global_list,
+ .depth = depth + 1,
+ .mode = local_list->mode,
+ };
+
+ return for_all_bundles_in_list(local_list, download_bundle_to_file, &ctx);
+}
+
+static int fetch_bundle_list_in_config_format(struct repository *r,
+ struct bundle_list *global_list,
+ struct remote_bundle_info *bundle,
+ int depth)
+{
+ int result;
+ struct bundle_list list_from_bundle;
- if ((result = find_temp_filename(&filename)))
+ init_bundle_list(&list_from_bundle);
+
+ if ((result = bundle_uri_parse_config_format(bundle->uri,
+ bundle->file,
+ &list_from_bundle)))
goto cleanup;
- if ((result = copy_uri_to_file(filename.buf, uri))) {
- warning(_("failed to download bundle from URI '%s'"), uri);
+ if (list_from_bundle.mode == BUNDLE_MODE_NONE) {
+ warning(_("unrecognized bundle mode from URI '%s'"),
+ bundle->uri);
+ result = -1;
goto cleanup;
}
- if ((result = !is_bundle(filename.buf, 0))) {
- warning(_("file at URI '%s' is not a bundle"), uri);
+ if ((result = download_bundle_list(r, &list_from_bundle,
+ global_list, depth)))
goto cleanup;
+
+cleanup:
+ clear_bundle_list(&list_from_bundle);
+ return result;
+}
+
+/**
+ * This limits the recursion on fetch_bundle_uri_internal() when following
+ * bundle lists.
+ */
+static int max_bundle_uri_depth = 4;
+
+/**
+ * Recursively download all bundles advertised at the given URI
+ * to files. If the file is a bundle, then add it to the given
+ * 'list'. Otherwise, expect a bundle list and recurse on the
+ * URIs in that list according to the list mode (ANY or ALL).
+ */
+static int fetch_bundle_uri_internal(struct repository *r,
+ struct remote_bundle_info *bundle,
+ int depth,
+ struct bundle_list *list)
+{
+ int result = 0;
+ struct remote_bundle_info *bcopy;
+
+ if (depth >= max_bundle_uri_depth) {
+ warning(_("exceeded bundle URI recursion limit (%d)"),
+ max_bundle_uri_depth);
+ return -1;
}
- if ((result = unbundle_from_file(r, filename.buf))) {
- warning(_("failed to unbundle bundle from URI '%s'"), uri);
+ if (!bundle->file &&
+ !(bundle->file = find_temp_filename())) {
+ result = -1;
goto cleanup;
}
+ if ((result = copy_uri_to_file(bundle->file, bundle->uri))) {
+ warning(_("failed to download bundle from URI '%s'"), bundle->uri);
+ goto cleanup;
+ }
+
+ if ((result = !is_bundle(bundle->file, 1))) {
+ result = fetch_bundle_list_in_config_format(
+ r, list, bundle, depth);
+ if (result)
+ warning(_("file at URI '%s' is not a bundle or bundle list"),
+ bundle->uri);
+ goto cleanup;
+ }
+
+ /* Copy the bundle and insert it into the global list. */
+ CALLOC_ARRAY(bcopy, 1);
+ bcopy->id = xstrdup(bundle->id);
+ bcopy->file = xstrdup(bundle->file);
+ hashmap_entry_init(&bcopy->ent, strhash(bcopy->id));
+ hashmap_add(&list->bundles, &bcopy->ent);
+
cleanup:
- unlink(filename.buf);
- strbuf_release(&filename);
+ if (result && bundle->file)
+ unlink(bundle->file);
+ return result;
+}
+
+/**
+ * This loop iterator breaks the loop with nonzero return code on the
+ * first successful unbundling of a bundle.
+ */
+static int attempt_unbundle(struct remote_bundle_info *info, void *data)
+{
+ struct repository *r = data;
+
+ if (!info->file || info->unbundled)
+ return 0;
+
+ if (!unbundle_from_file(r, info->file)) {
+ info->unbundled = 1;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int unbundle_all_bundles(struct repository *r,
+ struct bundle_list *list)
+{
+ /*
+ * Iterate through all bundles looking for ones that can
+ * successfully unbundle. If any succeed, then perhaps another
+ * will succeed in the next attempt.
+ *
+ * Keep in mind that a non-zero result for the loop here means
+ * the loop terminated early on a successful unbundling, which
+ * signals that we can try again.
+ */
+ while (for_all_bundles_in_list(list, attempt_unbundle, r)) ;
+
+ return 0;
+}
+
+static int unlink_bundle(struct remote_bundle_info *info, void *data)
+{
+ if (info->file)
+ unlink_or_warn(info->file);
+ return 0;
+}
+
+int fetch_bundle_uri(struct repository *r, const char *uri)
+{
+ int result;
+ struct bundle_list list;
+ struct remote_bundle_info bundle = {
+ .uri = xstrdup(uri),
+ .id = xstrdup(""),
+ };
+
+ init_bundle_list(&list);
+
+ /* If a bundle is added to this global list, then it is required. */
+ list.mode = BUNDLE_MODE_ALL;
+
+ if ((result = fetch_bundle_uri_internal(r, &bundle, 0, &list)))
+ goto cleanup;
+
+ result = unbundle_all_bundles(r, &list);
+
+cleanup:
+ for_all_bundles_in_list(&list, unlink_bundle, NULL);
+ clear_bundle_list(&list);
+ clear_remote_bundle_info(&bundle, NULL);
+ return result;
+}
+
+/**
+ * General API for {transport,connect}.c etc.
+ */
+int bundle_uri_parse_line(struct bundle_list *list, const char *line)
+{
+ int result;
+ const char *equals;
+ struct strbuf key = STRBUF_INIT;
+
+ if (!strlen(line))
+ return error(_("bundle-uri: got an empty line"));
+
+ equals = strchr(line, '=');
+
+ if (!equals)
+ return error(_("bundle-uri: line is not of the form 'key=value'"));
+ if (line == equals || !*(equals + 1))
+ return error(_("bundle-uri: line has empty key or value"));
+
+ strbuf_add(&key, line, equals - line);
+ result = bundle_list_update(key.buf, equals + 1, list);
+ strbuf_release(&key);
+
return result;
}
diff --git a/bundle-uri.h b/bundle-uri.h
index 8a152f1ef1..4dbc269823 100644
--- a/bundle-uri.h
+++ b/bundle-uri.h
@@ -1,7 +1,88 @@
#ifndef BUNDLE_URI_H
#define BUNDLE_URI_H
+#include "hashmap.h"
+#include "strbuf.h"
+
struct repository;
+struct string_list;
+
+/**
+ * The remote_bundle_info struct contains information for a single bundle
+ * URI. This may be initialized simply by a given URI or might have
+ * additional metadata associated with it if the bundle was advertised by
+ * a bundle list.
+ */
+struct remote_bundle_info {
+ struct hashmap_entry ent;
+
+ /**
+ * The 'id' is a name given to the bundle for reference
+ * by other bundle infos.
+ */
+ char *id;
+
+ /**
+ * The 'uri' is the location of the remote bundle so
+ * it can be downloaded on-demand. This will be NULL
+ * if there was no table of contents.
+ */
+ char *uri;
+
+ /**
+ * If the bundle has been downloaded, then 'file' is a
+ * filename storing its contents. Otherwise, 'file' is
+ * NULL.
+ */
+ char *file;
+
+ /**
+ * If the bundle has been unbundled successfully, then
+ * this boolean is true.
+ */
+ unsigned unbundled:1;
+};
+
+#define REMOTE_BUNDLE_INFO_INIT { 0 }
+
+enum bundle_list_mode {
+ BUNDLE_MODE_NONE = 0,
+ BUNDLE_MODE_ALL,
+ BUNDLE_MODE_ANY
+};
+
+/**
+ * A bundle_list contains an unordered set of remote_bundle_info structs,
+ * as well as information about the bundle listing, such as version and
+ * mode.
+ */
+struct bundle_list {
+ int version;
+ enum bundle_list_mode mode;
+ struct hashmap bundles;
+};
+
+void init_bundle_list(struct bundle_list *list);
+void clear_bundle_list(struct bundle_list *list);
+
+typedef int (*bundle_iterator)(struct remote_bundle_info *bundle,
+ void *data);
+
+int for_all_bundles_in_list(struct bundle_list *list,
+ bundle_iterator iter,
+ void *data);
+
+struct FILE;
+void print_bundle_list(FILE *fp, struct bundle_list *list);
+
+/**
+ * A bundle URI may point to a bundle list where the key=value
+ * pairs are provided in config file format. This method is
+ * exposed publicly for testing purposes.
+ */
+int bundle_uri_parse_config_format(const char *uri,
+ const char *filename,
+ struct bundle_list *list);
/**
* Fetch data from the given 'uri' and unbundle the bundle data found
@@ -11,4 +92,16 @@ struct repository;
*/
int fetch_bundle_uri(struct repository *r, const char *uri);
+/**
+ * General API for {transport,connect}.c etc.
+ */
+
+/**
+ * Parse a "key=value" packet line from the bundle-uri verb.
+ *
+ * Returns 0 on success and non-zero on error.
+ */
+int bundle_uri_parse_line(struct bundle_list *list,
+ const char *line);
+
#endif
diff --git a/bundle.c b/bundle.c
index 0208e6d90d..4ef7256aa1 100644
--- a/bundle.c
+++ b/bundle.c
@@ -189,7 +189,7 @@ static int list_refs(struct string_list *r, int argc, const char **argv)
int verify_bundle(struct repository *r,
struct bundle_header *header,
- int verbose)
+ enum verify_bundle_flags flags)
{
/*
* Do fast check, then if any prereqs are missing then go line by line
@@ -202,10 +202,8 @@ int verify_bundle(struct repository *r,
int i, ret = 0, req_nr;
const char *message = _("Repository lacks these prerequisite commits:");
- if (!r || !r->objects || !r->objects->odb) {
- ret = error(_("need a repository to verify a bundle"));
- goto cleanup;
- }
+ if (!r || !r->objects || !r->objects->odb)
+ return error(_("need a repository to verify a bundle"));
repo_init_revisions(r, &revs, NULL);
for (i = 0; i < p->nr; i++) {
@@ -218,7 +216,10 @@ int verify_bundle(struct repository *r,
add_pending_object(&revs, o, name);
continue;
}
- if (++ret == 1)
+ ret++;
+ if (flags & VERIFY_BUNDLE_QUIET)
+ continue;
+ if (ret == 1)
error("%s", message);
error("%s %s", oid_to_hex(oid), name);
}
@@ -245,21 +246,15 @@ int verify_bundle(struct repository *r,
assert(o); /* otherwise we'd have returned early */
if (o->flags & SHOWN)
continue;
- if (++ret == 1)
+ ret++;
+ if (flags & VERIFY_BUNDLE_QUIET)
+ continue;
+ if (ret == 1)
error("%s", message);
error("%s %s", oid_to_hex(oid), name);
}
- /* Clean up objects used, as they will be reused. */
- for (i = 0; i < p->nr; i++) {
- struct string_list_item *e = p->items + i;
- struct object_id *oid = e->util;
- commit = lookup_commit_reference_gently(r, oid, 1);
- if (commit)
- clear_commit_marks(commit, ALL_REV_FLAGS);
- }
-
- if (verbose) {
+ if (flags & VERIFY_BUNDLE_VERBOSE) {
struct string_list *r;
r = &header->references;
@@ -287,6 +282,14 @@ int verify_bundle(struct repository *r,
list_objects_filter_spec(&header->filter));
}
cleanup:
+ /* Clean up objects used, as they will be reused. */
+ for (i = 0; i < p->nr; i++) {
+ struct string_list_item *e = p->items + i;
+ struct object_id *oid = e->util;
+ commit = lookup_commit_reference_gently(r, oid, 1);
+ if (commit)
+ clear_commit_marks(commit, ALL_REV_FLAGS | PREREQ_MARK);
+ }
release_revisions(&revs);
return ret;
}
@@ -620,7 +623,8 @@ err:
}
int unbundle(struct repository *r, struct bundle_header *header,
- int bundle_fd, struct strvec *extra_index_pack_args)
+ int bundle_fd, struct strvec *extra_index_pack_args,
+ enum verify_bundle_flags flags)
{
struct child_process ip = CHILD_PROCESS_INIT;
strvec_pushl(&ip.args, "index-pack", "--fix-thin", "--stdin", NULL);
@@ -634,7 +638,7 @@ int unbundle(struct repository *r, struct bundle_header *header,
strvec_clear(extra_index_pack_args);
}
- if (verify_bundle(r, header, 0))
+ if (verify_bundle(r, header, flags))
return -1;
ip.in = bundle_fd;
ip.no_stdout = 1;
diff --git a/bundle.h b/bundle.h
index 68ff39a0a7..9f2bd733a6 100644
--- a/bundle.h
+++ b/bundle.h
@@ -30,7 +30,14 @@ int read_bundle_header_fd(int fd, struct bundle_header *header,
int create_bundle(struct repository *r, const char *path,
int argc, const char **argv, struct strvec *pack_options,
int version);
-int verify_bundle(struct repository *r, struct bundle_header *header, int verbose);
+
+enum verify_bundle_flags {
+ VERIFY_BUNDLE_VERBOSE = (1 << 0),
+ VERIFY_BUNDLE_QUIET = (1 << 1),
+};
+
+int verify_bundle(struct repository *r, struct bundle_header *header,
+ enum verify_bundle_flags flags);
/**
* Unbundle after reading the header with read_bundle_header().
@@ -41,9 +48,13 @@ int verify_bundle(struct repository *r, struct bundle_header *header, int verbos
* Provide "extra_index_pack_args" to pass any extra arguments
* (e.g. "-v" for verbose/progress), NULL otherwise. The provided
* "extra_index_pack_args" (if any) will be strvec_clear()'d for you.
+ *
+ * Before unbundling, this method will call verify_bundle() with the
+ * given 'flags'.
*/
int unbundle(struct repository *r, struct bundle_header *header,
- int bundle_fd, struct strvec *extra_index_pack_args);
+ int bundle_fd, struct strvec *extra_index_pack_args,
+ enum verify_bundle_flags flags);
int list_bundle_refs(struct bundle_header *header,
int argc, const char **argv);
diff --git a/cache.h b/cache.h
index 26ed03bd6d..627d584a9e 100644
--- a/cache.h
+++ b/cache.h
@@ -505,6 +505,7 @@ static inline enum object_type object_type(unsigned int mode)
#define GIT_WORK_TREE_ENVIRONMENT "GIT_WORK_TREE"
#define GIT_PREFIX_ENVIRONMENT "GIT_PREFIX"
#define GIT_SUPER_PREFIX_ENVIRONMENT "GIT_INTERNAL_SUPER_PREFIX"
+#define GIT_SUBMODULE_PROPAGATE_BRANCHES_ENVIRONMENT "GIT_INTERNAL_SUBMODULE_PROPAGATE_BRANCHES"
#define DEFAULT_GIT_DIR_ENVIRONMENT ".git"
#define DB_ENVIRONMENT "GIT_OBJECT_DIRECTORY"
#define INDEX_ENVIRONMENT "GIT_INDEX_FILE"
@@ -1155,6 +1156,8 @@ struct repository_format {
int hash_algo;
int sparse_index;
char *work_tree;
+ int ref_format_count;
+ enum ref_format_flags ref_format;
struct string_list unknown_extensions;
struct string_list v1_only_extensions;
};
diff --git a/chunk-format.c b/chunk-format.c
index 0275b74a89..e836a121c5 100644
--- a/chunk-format.c
+++ b/chunk-format.c
@@ -13,6 +13,7 @@ struct chunk_info {
chunk_write_fn write_fn;
const void *start;
+ off_t offset;
};
struct chunkfile {
@@ -56,38 +57,59 @@ void add_chunk(struct chunkfile *cf,
cf->chunks_nr++;
}
-int write_chunkfile(struct chunkfile *cf, void *data)
+int write_chunkfile(struct chunkfile *cf,
+ enum chunkfile_flags flags,
+ void *data)
{
int i, result = 0;
- uint64_t cur_offset = hashfile_total(cf->f);
trace2_region_enter("chunkfile", "write", the_repository);
- /* Add the table of contents to the current offset */
- cur_offset += (cf->chunks_nr + 1) * CHUNK_TOC_ENTRY_SIZE;
+ if (!(flags & CHUNKFILE_TRAILING_TOC)) {
+ uint64_t cur_offset = hashfile_total(cf->f);
- for (i = 0; i < cf->chunks_nr; i++) {
- hashwrite_be32(cf->f, cf->chunks[i].id);
- hashwrite_be64(cf->f, cur_offset);
+ /* Add the table of contents to the current offset */
+ cur_offset += (cf->chunks_nr + 1) * CHUNK_TOC_ENTRY_SIZE;
- cur_offset += cf->chunks[i].size;
- }
+ for (i = 0; i < cf->chunks_nr; i++) {
+ hashwrite_be32(cf->f, cf->chunks[i].id);
+ hashwrite_be64(cf->f, cur_offset);
+
+ cur_offset += cf->chunks[i].size;
+ }
- /* Trailing entry marks the end of the chunks */
- hashwrite_be32(cf->f, 0);
- hashwrite_be64(cf->f, cur_offset);
+ /* Trailing entry marks the end of the chunks */
+ hashwrite_be32(cf->f, 0);
+ hashwrite_be64(cf->f, cur_offset);
+ }
for (i = 0; i < cf->chunks_nr; i++) {
- off_t start_offset = hashfile_total(cf->f);
+ cf->chunks[i].offset = hashfile_total(cf->f);
result = cf->chunks[i].write_fn(cf->f, data);
if (result)
goto cleanup;
- if (hashfile_total(cf->f) - start_offset != cf->chunks[i].size)
- BUG("expected to write %"PRId64" bytes to chunk %"PRIx32", but wrote %"PRId64" instead",
- cf->chunks[i].size, cf->chunks[i].id,
- hashfile_total(cf->f) - start_offset);
+ if (!(flags & CHUNKFILE_TRAILING_TOC)) {
+ if (hashfile_total(cf->f) - cf->chunks[i].offset != cf->chunks[i].size)
+ BUG("expected to write %"PRId64" bytes to chunk %"PRIx32", but wrote %"PRId64" instead",
+ cf->chunks[i].size, cf->chunks[i].id,
+ hashfile_total(cf->f) - cf->chunks[i].offset);
+ }
+
+ cf->chunks[i].size = hashfile_total(cf->f) - cf->chunks[i].offset;
+ }
+
+ if (flags & CHUNKFILE_TRAILING_TOC) {
+ size_t last_chunk_tail = hashfile_total(cf->f);
+ /* First entry marks the end of the chunks */
+ hashwrite_be32(cf->f, 0);
+ hashwrite_be64(cf->f, last_chunk_tail);
+
+ for (i = cf->chunks_nr - 1; i >= 0; i--) {
+ hashwrite_be32(cf->f, cf->chunks[i].id);
+ hashwrite_be64(cf->f, cf->chunks[i].offset);
+ }
}
cleanup:
@@ -151,6 +173,59 @@ int read_table_of_contents(struct chunkfile *cf,
return 0;
}
+int read_trailing_table_of_contents(struct chunkfile *cf,
+ const unsigned char *mfile,
+ size_t mfile_size)
+{
+ int i;
+ uint32_t chunk_id;
+ const unsigned char *table_of_contents = mfile + mfile_size - the_hash_algo->rawsz;
+
+ while (1) {
+ uint64_t chunk_offset;
+
+ table_of_contents -= CHUNK_TOC_ENTRY_SIZE;
+
+ chunk_id = get_be32(table_of_contents);
+ chunk_offset = get_be64(table_of_contents + 4);
+
+ /* Calculate the previous chunk size, if it exists. */
+ if (cf->chunks_nr) {
+ off_t previous_offset = cf->chunks[cf->chunks_nr - 1].offset;
+
+ if (chunk_offset < previous_offset ||
+ chunk_offset > table_of_contents - mfile) {
+ error(_("improper chunk offset(s) %"PRIx64" and %"PRIx64""),
+ previous_offset, chunk_offset);
+ return -1;
+ }
+
+ cf->chunks[cf->chunks_nr - 1].size = chunk_offset - previous_offset;
+ }
+
+ /* Stop at the null chunk. We only need it for the last size. */
+ if (!chunk_id)
+ break;
+
+ for (i = 0; i < cf->chunks_nr; i++) {
+ if (cf->chunks[i].id == chunk_id) {
+ error(_("duplicate chunk ID %"PRIx32" found"),
+ chunk_id);
+ return -1;
+ }
+ }
+
+ ALLOC_GROW(cf->chunks, cf->chunks_nr + 1, cf->chunks_alloc);
+
+ cf->chunks[cf->chunks_nr].id = chunk_id;
+ cf->chunks[cf->chunks_nr].start = mfile + chunk_offset;
+ cf->chunks[cf->chunks_nr].offset = chunk_offset;
+ cf->chunks_nr++;
+ }
+
+ return 0;
+}
+
static int pair_chunk_fn(const unsigned char *chunk_start,
size_t chunk_size,
void *data)
diff --git a/chunk-format.h b/chunk-format.h
index 7885aa0848..acb8dfbce8 100644
--- a/chunk-format.h
+++ b/chunk-format.h
@@ -31,7 +31,14 @@ void add_chunk(struct chunkfile *cf,
uint32_t id,
size_t size,
chunk_write_fn fn);
-int write_chunkfile(struct chunkfile *cf, void *data);
+
+enum chunkfile_flags {
+ CHUNKFILE_TRAILING_TOC = (1 << 0),
+};
+
+int write_chunkfile(struct chunkfile *cf,
+ enum chunkfile_flags flags,
+ void *data);
int read_table_of_contents(struct chunkfile *cf,
const unsigned char *mfile,
@@ -39,6 +46,15 @@ int read_table_of_contents(struct chunkfile *cf,
uint64_t toc_offset,
int toc_length);
+/**
+ * Read the given chunkfile, but read the table of contents from the
+ * end of the given mfile. The file is expected to be a hashfile with
+ * the_hash_file->rawsz bytes at the end storing the hash.
+ */
+int read_trailing_table_of_contents(struct chunkfile *cf,
+ const unsigned char *mfile,
+ size_t mfile_size);
+
#define CHUNK_NOT_FOUND (-2)
/*
diff --git a/ci/lib.sh b/ci/lib.sh
index 1b0cc2b57d..24d20a5d64 100755
--- a/ci/lib.sh
+++ b/ci/lib.sh
@@ -259,6 +259,8 @@ macos-latest)
MAKEFLAGS="$MAKEFLAGS PYTHON_PATH=$(which python3)"
else
MAKEFLAGS="$MAKEFLAGS PYTHON_PATH=$(which python2)"
+ MAKEFLAGS="$MAKEFLAGS NO_APPLE_COMMON_CRYPTO=NoThanks"
+ MAKEFLAGS="$MAKEFLAGS NO_OPENSSL=NoThanks"
fi
;;
esac
@@ -278,6 +280,12 @@ linux-leaks)
export GIT_TEST_PASSING_SANITIZE_LEAK=true
export GIT_TEST_SANITIZE_LEAK_LOG=true
;;
+linux-asan)
+ export SANITIZE=address
+ ;;
+linux-ubsan)
+ export SANITIZE=undefined
+ ;;
esac
MAKEFLAGS="$MAKEFLAGS CC=${CC:-cc}"
diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh
index 8ebff42596..32eb280da0 100755
--- a/ci/run-build-and-tests.sh
+++ b/ci/run-build-and-tests.sh
@@ -30,6 +30,7 @@ linux-TEST-vars)
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=master
export GIT_TEST_WRITE_REV_INDEX=1
export GIT_TEST_CHECKOUT_WORKERS=2
+ export GIT_TEST_PACKED_REFS_VERSION=2
;;
linux-clang)
export GIT_TEST_DEFAULT_HASH=sha1
@@ -45,10 +46,19 @@ pedantic)
;;
esac
-group Build make
+mc=
+if test "$jobname" = "linux-cmake-ctest"
+then
+ cb=contrib/buildsystems
+ group CMake cmake -S "$cb" -B "$cb/out"
+ mc="-C $cb/out"
+fi
+
+group Build make $mc
+
if test -n "$run_tests"
then
- group "Run tests" make test ||
+ group "Run tests" make $mc test ||
handle_failed_tests
fi
check_unignored_build_artifacts
diff --git a/combine-diff.c b/combine-diff.c
index b0ece95480..88efcaeefa 100644
--- a/combine-diff.c
+++ b/combine-diff.c
@@ -1060,7 +1060,8 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent,
elem->mode = canon_mode(st.st_mode);
} else if (S_ISDIR(st.st_mode)) {
struct object_id oid;
- if (resolve_gitlink_ref(elem->path, "HEAD", &oid) < 0)
+ if (resolve_gitlink_ref(elem->path, "HEAD", &oid,
+ NULL) < 0)
result = grab_blob(opt->repo, &elem->oid,
elem->mode, &result_size,
NULL, NULL);
diff --git a/commit-graph.c b/commit-graph.c
index a7d8755932..c927b81250 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -1932,7 +1932,7 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
get_num_chunks(cf) * ctx->commits.nr);
}
- write_chunkfile(cf, ctx);
+ write_chunkfile(cf, 0, ctx);
stop_progress(&ctx->progress);
strbuf_release(&progress_title);
diff --git a/commit.c b/commit.c
index 89b8efc611..572301b80a 100644
--- a/commit.c
+++ b/commit.c
@@ -59,6 +59,14 @@ struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref
return c;
}
+struct commit *lookup_commit_object(struct repository *r,
+ const struct object_id *oid)
+{
+ struct object *obj = parse_object(r, oid);
+ return obj ? object_as_type(obj, OBJ_COMMIT, 0) : NULL;
+
+}
+
struct commit *lookup_commit(struct repository *r, const struct object_id *oid)
{
struct object *obj = lookup_object(r, oid);
diff --git a/commit.h b/commit.h
index 21e4d25ce7..fa39202fa6 100644
--- a/commit.h
+++ b/commit.h
@@ -64,6 +64,19 @@ enum decoration_type {
void add_name_decoration(enum decoration_type type, const char *name, struct object *obj);
const struct name_decoration *get_name_decoration(const struct object *obj);
+/*
+ * Look up commit named by "oid" respecting replacement objects.
+ * Returns NULL if "oid" is not a commit or does not exist.
+ */
+struct commit *lookup_commit_object(struct repository *r, const struct object_id *oid);
+
+/*
+ * Look up commit named by "oid" without replacement objects or
+ * checking for object existence. Returns the requested commit if it
+ * is found in the object cache, NULL if "oid" is in the object cache
+ * but is not a commit and a newly allocated unparsed commit object if
+ * "oid" is not in the object cache.
+ */
struct commit *lookup_commit(struct repository *r, const struct object_id *oid);
struct commit *lookup_commit_reference(struct repository *r,
const struct object_id *oid);
diff --git a/common-main.c b/common-main.c
index c531372f3f..0a22861f1c 100644
--- a/common-main.c
+++ b/common-main.c
@@ -40,6 +40,7 @@ int main(int argc, const char **argv)
git_resolve_executable_dir(argv[0]);
+ setlocale(LC_CTYPE, "");
git_setup_gettext();
initialize_the_repository();
diff --git a/compat/fsmonitor/fsm-ipc-darwin.c b/compat/fsmonitor/fsm-ipc-darwin.c
new file mode 100644
index 0000000000..d67b0ee50d
--- /dev/null
+++ b/compat/fsmonitor/fsm-ipc-darwin.c
@@ -0,0 +1,52 @@
+#include "cache.h"
+#include "config.h"
+#include "strbuf.h"
+#include "fsmonitor.h"
+#include "fsmonitor-ipc.h"
+#include "fsmonitor-path-utils.h"
+
+static GIT_PATH_FUNC(fsmonitor_ipc__get_default_path, "fsmonitor--daemon.ipc")
+
+const char *fsmonitor_ipc__get_path(struct repository *r)
+{
+ static const char *ipc_path = NULL;
+ git_SHA_CTX sha1ctx;
+ char *sock_dir = NULL;
+ struct strbuf ipc_file = STRBUF_INIT;
+ unsigned char hash[GIT_MAX_RAWSZ];
+
+ if (!r)
+ BUG("No repository passed into fsmonitor_ipc__get_path");
+
+ if (ipc_path)
+ return ipc_path;
+
+
+ /* By default the socket file is created in the .git directory */
+ if (fsmonitor__is_fs_remote(r->gitdir) < 1) {
+ ipc_path = fsmonitor_ipc__get_default_path();
+ return ipc_path;
+ }
+
+ git_SHA1_Init(&sha1ctx);
+ git_SHA1_Update(&sha1ctx, r->worktree, strlen(r->worktree));
+ git_SHA1_Final(hash, &sha1ctx);
+
+ repo_config_get_string(r, "fsmonitor.socketdir", &sock_dir);
+
+ /* Create the socket file in either socketDir or $HOME */
+ if (sock_dir && *sock_dir) {
+ strbuf_addf(&ipc_file, "%s/.git-fsmonitor-%s",
+ sock_dir, hash_to_hex(hash));
+ } else {
+ strbuf_addf(&ipc_file, "~/.git-fsmonitor-%s", hash_to_hex(hash));
+ }
+ free(sock_dir);
+
+ ipc_path = interpolate_path(ipc_file.buf, 1);
+ if (!ipc_path)
+ die(_("Invalid path: %s"), ipc_file.buf);
+
+ strbuf_release(&ipc_file);
+ return ipc_path;
+}
diff --git a/compat/fsmonitor/fsm-ipc-win32.c b/compat/fsmonitor/fsm-ipc-win32.c
new file mode 100644
index 0000000000..e08c505c14
--- /dev/null
+++ b/compat/fsmonitor/fsm-ipc-win32.c
@@ -0,0 +1,9 @@
+#include "config.h"
+#include "fsmonitor-ipc.h"
+
+const char *fsmonitor_ipc__get_path(struct repository *r) {
+ static char *ret;
+ if (!ret)
+ ret = git_pathdup("fsmonitor--daemon.ipc");
+ return ret;
+}
diff --git a/compat/fsmonitor/fsm-listen-darwin.c b/compat/fsmonitor/fsm-listen-darwin.c
index 8e208e8289..cc9af1e3cb 100644
--- a/compat/fsmonitor/fsm-listen-darwin.c
+++ b/compat/fsmonitor/fsm-listen-darwin.c
@@ -26,6 +26,7 @@
#include "fsmonitor.h"
#include "fsm-listen.h"
#include "fsmonitor--daemon.h"
+#include "fsmonitor-path-utils.h"
struct fsm_listen_data
{
@@ -198,8 +199,9 @@ static void fsevent_callback(ConstFSEventStreamRef streamRef,
struct string_list cookie_list = STRING_LIST_INIT_DUP;
const char *path_k;
const char *slash;
- int k;
+ char *resolved = NULL;
struct strbuf tmp = STRBUF_INIT;
+ int k;
/*
* Build a list of all filesystem changes into a private/local
@@ -209,7 +211,12 @@ static void fsevent_callback(ConstFSEventStreamRef streamRef,
/*
* On Mac, we receive an array of absolute paths.
*/
- path_k = paths[k];
+ free(resolved);
+ resolved = fsmonitor__resolve_alias(paths[k], &state->alias);
+ if (resolved)
+ path_k = resolved;
+ else
+ path_k = paths[k];
/*
* If you want to debug FSEvents, log them to GIT_TRACE_FSMONITOR.
@@ -238,6 +245,7 @@ static void fsevent_callback(ConstFSEventStreamRef streamRef,
fsmonitor_force_resync(state);
fsmonitor_batch__free_list(batch);
string_list_clear(&cookie_list, 0);
+ batch = NULL;
/*
* We assume that any events that we received
@@ -328,7 +336,7 @@ static void fsevent_callback(ConstFSEventStreamRef streamRef,
* know how much to invalidate/refresh.
*/
- if (event_flags[k] & kFSEventStreamEventFlagItemIsFile) {
+ if (event_flags[k] & (kFSEventStreamEventFlagItemIsFile | kFSEventStreamEventFlagItemIsSymlink)) {
const char *rel = path_k +
state->path_worktree_watch.len + 1;
@@ -360,12 +368,14 @@ static void fsevent_callback(ConstFSEventStreamRef streamRef,
}
}
+ free(resolved);
fsmonitor_publish(state, batch, &cookie_list);
string_list_clear(&cookie_list, 0);
strbuf_release(&tmp);
return;
force_shutdown:
+ free(resolved);
fsmonitor_batch__free_list(batch);
string_list_clear(&cookie_list, 0);
diff --git a/compat/fsmonitor/fsm-path-utils-darwin.c b/compat/fsmonitor/fsm-path-utils-darwin.c
new file mode 100644
index 0000000000..ce5a8febe0
--- /dev/null
+++ b/compat/fsmonitor/fsm-path-utils-darwin.c
@@ -0,0 +1,135 @@
+#include "fsmonitor.h"
+#include "fsmonitor-path-utils.h"
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/param.h>
+#include <sys/mount.h>
+
+int fsmonitor__get_fs_info(const char *path, struct fs_info *fs_info)
+{
+ struct statfs fs;
+ if (statfs(path, &fs) == -1) {
+ int saved_errno = errno;
+ trace_printf_key(&trace_fsmonitor, "statfs('%s') failed: %s",
+ path, strerror(saved_errno));
+ errno = saved_errno;
+ return -1;
+ }
+
+ trace_printf_key(&trace_fsmonitor,
+ "statfs('%s') [type 0x%08x][flags 0x%08x] '%s'",
+ path, fs.f_type, fs.f_flags, fs.f_fstypename);
+
+ if (!(fs.f_flags & MNT_LOCAL))
+ fs_info->is_remote = 1;
+ else
+ fs_info->is_remote = 0;
+
+ fs_info->typename = xstrdup(fs.f_fstypename);
+
+ trace_printf_key(&trace_fsmonitor,
+ "'%s' is_remote: %d",
+ path, fs_info->is_remote);
+ return 0;
+}
+
+int fsmonitor__is_fs_remote(const char *path)
+{
+ struct fs_info fs;
+ if (fsmonitor__get_fs_info(path, &fs))
+ return -1;
+
+ free(fs.typename);
+
+ return fs.is_remote;
+}
+
+/*
+ * Scan the root directory for synthetic firmlinks that when resolved
+ * are a prefix of the path, stopping at the first one found.
+ *
+ * Some information about firmlinks and synthetic firmlinks:
+ * https://eclecticlight.co/2020/01/23/catalina-boot-volumes/
+ *
+ * macOS no longer allows symlinks in the root directory; any link found
+ * there is therefore a synthetic firmlink.
+ *
+ * If this function gets called often, will want to cache all the firmlink
+ * information, but for now there is only one caller of this function.
+ *
+ * If there is more than one alias for the path, that is another
+ * matter altogether.
+ */
+int fsmonitor__get_alias(const char *path, struct alias_info *info)
+{
+ DIR *dir;
+ int retval = -1;
+ const char *const root = "/";
+ struct stat st;
+ struct dirent *de;
+ struct strbuf alias;
+ struct strbuf points_to = STRBUF_INIT;
+
+ dir = opendir(root);
+ if (!dir)
+ return error_errno(_("opendir('%s') failed"), root);
+
+ strbuf_init(&alias, 256);
+
+ while ((de = readdir(dir)) != NULL) {
+ strbuf_reset(&alias);
+ strbuf_addf(&alias, "%s%s", root, de->d_name);
+
+ if (lstat(alias.buf, &st) < 0) {
+ error_errno(_("lstat('%s') failed"), alias.buf);
+ goto done;
+ }
+
+ if (!S_ISLNK(st.st_mode))
+ continue;
+
+ if (strbuf_readlink(&points_to, alias.buf, st.st_size) < 0) {
+ error_errno(_("strbuf_readlink('%s') failed"), alias.buf);
+ goto done;
+ }
+
+ if (!strncmp(points_to.buf, path, points_to.len) &&
+ (path[points_to.len] == '/')) {
+ strbuf_addbuf(&info->alias, &alias);
+ strbuf_addbuf(&info->points_to, &points_to);
+ trace_printf_key(&trace_fsmonitor,
+ "Found alias for '%s' : '%s' -> '%s'",
+ path, info->alias.buf, info->points_to.buf);
+ retval = 0;
+ goto done;
+ }
+ }
+ retval = 0; /* no alias */
+
+done:
+ strbuf_release(&alias);
+ strbuf_release(&points_to);
+ if (closedir(dir) < 0)
+ return error_errno(_("closedir('%s') failed"), root);
+ return retval;
+}
+
+char *fsmonitor__resolve_alias(const char *path,
+ const struct alias_info *info)
+{
+ if (!info->alias.len)
+ return NULL;
+
+ if ((!strncmp(info->alias.buf, path, info->alias.len))
+ && path[info->alias.len] == '/') {
+ struct strbuf tmp = STRBUF_INIT;
+ const char *remainder = path + info->alias.len;
+
+ strbuf_addbuf(&tmp, &info->points_to);
+ strbuf_add(&tmp, remainder, strlen(remainder));
+ return strbuf_detach(&tmp, NULL);
+ }
+
+ return NULL;
+}
diff --git a/compat/fsmonitor/fsm-path-utils-win32.c b/compat/fsmonitor/fsm-path-utils-win32.c
new file mode 100644
index 0000000000..0d95bbb416
--- /dev/null
+++ b/compat/fsmonitor/fsm-path-utils-win32.c
@@ -0,0 +1,145 @@
+#include "cache.h"
+#include "fsmonitor.h"
+#include "fsmonitor-path-utils.h"
+
+/*
+ * Check remote working directory protocol.
+ *
+ * Return -1 if client machine cannot get remote protocol information.
+ */
+static int check_remote_protocol(wchar_t *wpath)
+{
+ HANDLE h;
+ FILE_REMOTE_PROTOCOL_INFO proto_info;
+
+ h = CreateFileW(wpath, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS, NULL);
+
+ if (h == INVALID_HANDLE_VALUE) {
+ error(_("[GLE %ld] unable to open for read '%ls'"),
+ GetLastError(), wpath);
+ return -1;
+ }
+
+ if (!GetFileInformationByHandleEx(h, FileRemoteProtocolInfo,
+ &proto_info, sizeof(proto_info))) {
+ error(_("[GLE %ld] unable to get protocol information for '%ls'"),
+ GetLastError(), wpath);
+ CloseHandle(h);
+ return -1;
+ }
+
+ CloseHandle(h);
+
+ trace_printf_key(&trace_fsmonitor,
+ "check_remote_protocol('%ls') remote protocol %#8.8lx",
+ wpath, proto_info.Protocol);
+
+ return 0;
+}
+
+/*
+ * Notes for testing:
+ *
+ * (a) Windows allows a network share to be mapped to a drive letter.
+ * (This is the normal method to access it.)
+ *
+ * $ NET USE Z: \\server\share
+ * $ git -C Z:/repo status
+ *
+ * (b) Windows allows a network share to be referenced WITHOUT mapping
+ * it to drive letter.
+ *
+ * $ NET USE \\server\share\dir
+ * $ git -C //server/share/repo status
+ *
+ * (c) Windows allows "SUBST" to create a fake drive mapping to an
+ * arbitrary path (which may be remote)
+ *
+ * $ SUBST Q: Z:\repo
+ * $ git -C Q:/ status
+ *
+ * (d) Windows allows a directory symlink to be created on a local
+ * file system that points to a remote repo.
+ *
+ * $ mklink /d ./link //server/share/repo
+ * $ git -C ./link status
+ */
+int fsmonitor__get_fs_info(const char *path, struct fs_info *fs_info)
+{
+ wchar_t wpath[MAX_PATH];
+ wchar_t wfullpath[MAX_PATH];
+ size_t wlen;
+ UINT driveType;
+
+ /*
+ * Do everything in wide chars because the drive letter might be
+ * a multi-byte sequence. See win32_has_dos_drive_prefix().
+ */
+ if (xutftowcs_path(wpath, path) < 0) {
+ return -1;
+ }
+
+ /*
+ * GetDriveTypeW() requires a final slash. We assume that the
+ * worktree pathname points to an actual directory.
+ */
+ wlen = wcslen(wpath);
+ if (wpath[wlen - 1] != L'\\' && wpath[wlen - 1] != L'/') {
+ wpath[wlen++] = L'\\';
+ wpath[wlen] = 0;
+ }
+
+ /*
+ * Normalize the path. If nothing else, this converts forward
+ * slashes to backslashes. This is essential to get GetDriveTypeW()
+ * correctly handle some UNC "\\server\share\..." paths.
+ */
+ if (!GetFullPathNameW(wpath, MAX_PATH, wfullpath, NULL)) {
+ return -1;
+ }
+
+ driveType = GetDriveTypeW(wfullpath);
+ trace_printf_key(&trace_fsmonitor,
+ "DriveType '%s' L'%ls' (%u)",
+ path, wfullpath, driveType);
+
+ if (driveType == DRIVE_REMOTE) {
+ fs_info->is_remote = 1;
+ if (check_remote_protocol(wfullpath) < 0)
+ return -1;
+ } else {
+ fs_info->is_remote = 0;
+ }
+
+ trace_printf_key(&trace_fsmonitor,
+ "'%s' is_remote: %d",
+ path, fs_info->is_remote);
+
+ return 0;
+}
+
+int fsmonitor__is_fs_remote(const char *path)
+{
+ struct fs_info fs;
+ if (fsmonitor__get_fs_info(path, &fs))
+ return -1;
+ return fs.is_remote;
+}
+
+/*
+ * No-op for now.
+ */
+int fsmonitor__get_alias(const char *path, struct alias_info *info)
+{
+ return 0;
+}
+
+/*
+ * No-op for now.
+ */
+char *fsmonitor__resolve_alias(const char *path,
+ const struct alias_info *info)
+{
+ return NULL;
+}
diff --git a/compat/fsmonitor/fsm-settings-darwin.c b/compat/fsmonitor/fsm-settings-darwin.c
index efc732c0f3..6abbc7af3a 100644
--- a/compat/fsmonitor/fsm-settings-darwin.c
+++ b/compat/fsmonitor/fsm-settings-darwin.c
@@ -1,32 +1,10 @@
-#include "cache.h"
#include "config.h"
-#include "repository.h"
-#include "fsmonitor-settings.h"
#include "fsmonitor.h"
-#include <sys/param.h>
-#include <sys/mount.h>
+#include "fsmonitor-ipc.h"
+#include "fsmonitor-settings.h"
+#include "fsmonitor-path-utils.h"
-/*
- * [1] Remote working directories are problematic for FSMonitor.
- *
- * The underlying file system on the server machine and/or the remote
- * mount type (NFS, SAMBA, etc.) dictates whether notification events
- * are available at all to remote client machines.
- *
- * Kernel differences between the server and client machines also
- * dictate the how (buffering, frequency, de-dup) the events are
- * delivered to client machine processes.
- *
- * A client machine (such as a laptop) may choose to suspend/resume
- * and it is unclear (without lots of testing) whether the watcher can
- * resync after a resume. We might be able to treat this as a normal
- * "events were dropped by the kernel" event and do our normal "flush
- * and resync" --or-- we might need to close the existing (zombie?)
- * notification fd and create a new one.
- *
- * In theory, the above issues need to be addressed whether we are
- * using the Hook or IPC API.
- *
+ /*
* For the builtin FSMonitor, we create the Unix domain socket for the
* IPC in the .git directory. If the working directory is remote,
* then the socket will be created on the remote file system. This
@@ -38,52 +16,47 @@
* be taken to ensure that $HOME is actually local and not a managed
* file share.)
*
- * So (for now at least), mark remote working directories as
- * incompatible.
- *
- *
- * [2] FAT32 and NTFS working directories are problematic too.
+ * FAT32 and NTFS working directories are problematic too.
*
* The builtin FSMonitor uses a Unix domain socket in the .git
* directory for IPC. These Windows drive formats do not support
* Unix domain sockets, so mark them as incompatible for the daemon.
*
*/
-static enum fsmonitor_reason check_volume(struct repository *r)
+static enum fsmonitor_reason check_uds_volume(struct repository *r)
{
- struct statfs fs;
+ struct fs_info fs;
+ const char *ipc_path = fsmonitor_ipc__get_path(r);
+ struct strbuf path = STRBUF_INIT;
+ strbuf_add(&path, ipc_path, strlen(ipc_path));
- if (statfs(r->worktree, &fs) == -1) {
- int saved_errno = errno;
- trace_printf_key(&trace_fsmonitor, "statfs('%s') failed: %s",
- r->worktree, strerror(saved_errno));
- errno = saved_errno;
+ if (fsmonitor__get_fs_info(dirname(path.buf), &fs) == -1) {
+ strbuf_release(&path);
return FSMONITOR_REASON_ERROR;
}
- trace_printf_key(&trace_fsmonitor,
- "statfs('%s') [type 0x%08x][flags 0x%08x] '%s'",
- r->worktree, fs.f_type, fs.f_flags, fs.f_fstypename);
+ strbuf_release(&path);
- if (!(fs.f_flags & MNT_LOCAL))
- return FSMONITOR_REASON_REMOTE;
-
- if (!strcmp(fs.f_fstypename, "msdos")) /* aka FAT32 */
- return FSMONITOR_REASON_NOSOCKETS;
-
- if (!strcmp(fs.f_fstypename, "ntfs"))
+ if (fs.is_remote ||
+ !strcmp(fs.typename, "msdos") ||
+ !strcmp(fs.typename, "ntfs")) {
+ free(fs.typename);
return FSMONITOR_REASON_NOSOCKETS;
+ }
+ free(fs.typename);
return FSMONITOR_REASON_OK;
}
-enum fsmonitor_reason fsm_os__incompatible(struct repository *r)
+enum fsmonitor_reason fsm_os__incompatible(struct repository *r, int ipc)
{
enum fsmonitor_reason reason;
- reason = check_volume(r);
- if (reason != FSMONITOR_REASON_OK)
- return reason;
+ if (ipc) {
+ reason = check_uds_volume(r);
+ if (reason != FSMONITOR_REASON_OK)
+ return reason;
+ }
return FSMONITOR_REASON_OK;
}
diff --git a/compat/fsmonitor/fsm-settings-win32.c b/compat/fsmonitor/fsm-settings-win32.c
index e5ec5b0a9f..a8af31b71d 100644
--- a/compat/fsmonitor/fsm-settings-win32.c
+++ b/compat/fsmonitor/fsm-settings-win32.c
@@ -1,8 +1,9 @@
#include "cache.h"
#include "config.h"
#include "repository.h"
-#include "fsmonitor-settings.h"
#include "fsmonitor.h"
+#include "fsmonitor-settings.h"
+#include "fsmonitor-path-utils.h"
/*
* VFS for Git is incompatible with FSMonitor.
@@ -24,172 +25,7 @@ static enum fsmonitor_reason check_vfs4git(struct repository *r)
return FSMONITOR_REASON_OK;
}
-/*
- * Check if monitoring remote working directories is allowed.
- *
- * By default, monitoring remote working directories is
- * disabled. Users may override this behavior in enviroments where
- * they have proper support.
- */
-static int check_config_allowremote(struct repository *r)
-{
- int allow;
-
- if (!repo_config_get_bool(r, "fsmonitor.allowremote", &allow))
- return allow;
-
- return -1; /* fsmonitor.allowremote not set */
-}
-
-/*
- * Check remote working directory protocol.
- *
- * Error if client machine cannot get remote protocol information.
- */
-static int check_remote_protocol(wchar_t *wpath)
-{
- HANDLE h;
- FILE_REMOTE_PROTOCOL_INFO proto_info;
-
- h = CreateFileW(wpath, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING,
- FILE_FLAG_BACKUP_SEMANTICS, NULL);
-
- if (h == INVALID_HANDLE_VALUE) {
- error(_("[GLE %ld] unable to open for read '%ls'"),
- GetLastError(), wpath);
- return -1;
- }
-
- if (!GetFileInformationByHandleEx(h, FileRemoteProtocolInfo,
- &proto_info, sizeof(proto_info))) {
- error(_("[GLE %ld] unable to get protocol information for '%ls'"),
- GetLastError(), wpath);
- CloseHandle(h);
- return -1;
- }
-
- CloseHandle(h);
-
- trace_printf_key(&trace_fsmonitor,
- "check_remote_protocol('%ls') remote protocol %#8.8lx",
- wpath, proto_info.Protocol);
-
- return 0;
-}
-
-/*
- * Remote working directories are problematic for FSMonitor.
- *
- * The underlying file system on the server machine and/or the remote
- * mount type dictates whether notification events are available at
- * all to remote client machines.
- *
- * Kernel differences between the server and client machines also
- * dictate the how (buffering, frequency, de-dup) the events are
- * delivered to client machine processes.
- *
- * A client machine (such as a laptop) may choose to suspend/resume
- * and it is unclear (without lots of testing) whether the watcher can
- * resync after a resume. We might be able to treat this as a normal
- * "events were dropped by the kernel" event and do our normal "flush
- * and resync" --or-- we might need to close the existing (zombie?)
- * notification fd and create a new one.
- *
- * In theory, the above issues need to be addressed whether we are
- * using the Hook or IPC API.
- *
- * So (for now at least), mark remote working directories as
- * incompatible.
- *
- * Notes for testing:
- *
- * (a) Windows allows a network share to be mapped to a drive letter.
- * (This is the normal method to access it.)
- *
- * $ NET USE Z: \\server\share
- * $ git -C Z:/repo status
- *
- * (b) Windows allows a network share to be referenced WITHOUT mapping
- * it to drive letter.
- *
- * $ NET USE \\server\share\dir
- * $ git -C //server/share/repo status
- *
- * (c) Windows allows "SUBST" to create a fake drive mapping to an
- * arbitrary path (which may be remote)
- *
- * $ SUBST Q: Z:\repo
- * $ git -C Q:/ status
- *
- * (d) Windows allows a directory symlink to be created on a local
- * file system that points to a remote repo.
- *
- * $ mklink /d ./link //server/share/repo
- * $ git -C ./link status
- */
-static enum fsmonitor_reason check_remote(struct repository *r)
-{
- int ret;
- wchar_t wpath[MAX_PATH];
- wchar_t wfullpath[MAX_PATH];
- size_t wlen;
- UINT driveType;
-
- /*
- * Do everything in wide chars because the drive letter might be
- * a multi-byte sequence. See win32_has_dos_drive_prefix().
- */
- if (xutftowcs_path(wpath, r->worktree) < 0)
- return FSMONITOR_REASON_ERROR;
-
- /*
- * GetDriveTypeW() requires a final slash. We assume that the
- * worktree pathname points to an actual directory.
- */
- wlen = wcslen(wpath);
- if (wpath[wlen - 1] != L'\\' && wpath[wlen - 1] != L'/') {
- wpath[wlen++] = L'\\';
- wpath[wlen] = 0;
- }
-
- /*
- * Normalize the path. If nothing else, this converts forward
- * slashes to backslashes. This is essential to get GetDriveTypeW()
- * correctly handle some UNC "\\server\share\..." paths.
- */
- if (!GetFullPathNameW(wpath, MAX_PATH, wfullpath, NULL))
- return FSMONITOR_REASON_ERROR;
-
- driveType = GetDriveTypeW(wfullpath);
- trace_printf_key(&trace_fsmonitor,
- "DriveType '%s' L'%ls' (%u)",
- r->worktree, wfullpath, driveType);
-
- if (driveType == DRIVE_REMOTE) {
- trace_printf_key(&trace_fsmonitor,
- "check_remote('%s') true",
- r->worktree);
-
- ret = check_remote_protocol(wfullpath);
- if (ret < 0)
- return FSMONITOR_REASON_ERROR;
-
- switch (check_config_allowremote(r)) {
- case 0: /* config overrides and disables */
- return FSMONITOR_REASON_REMOTE;
- case 1: /* config overrides and enables */
- return FSMONITOR_REASON_OK;
- default:
- break; /* config has no opinion */
- }
-
- return FSMONITOR_REASON_REMOTE;
- }
-
- return FSMONITOR_REASON_OK;
-}
-
-enum fsmonitor_reason fsm_os__incompatible(struct repository *r)
+enum fsmonitor_reason fsm_os__incompatible(struct repository *r, int ipc)
{
enum fsmonitor_reason reason;
@@ -197,9 +33,5 @@ enum fsmonitor_reason fsm_os__incompatible(struct repository *r)
if (reason != FSMONITOR_REASON_OK)
return reason;
- reason = check_remote(r);
- if (reason != FSMONITOR_REASON_OK)
- return reason;
-
return FSMONITOR_REASON_OK;
}
diff --git a/compat/mingw.c b/compat/mingw.c
index 901375d584..d614f156df 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -196,16 +196,19 @@ static int read_yes_no_answer(void)
static int ask_yes_no_if_possible(const char *format, ...)
{
char question[4096];
- const char *retry_hook[] = { NULL, NULL, NULL };
+ const char *retry_hook;
va_list args;
va_start(args, format);
vsnprintf(question, sizeof(question), format, args);
va_end(args);
- if ((retry_hook[0] = mingw_getenv("GIT_ASK_YESNO"))) {
- retry_hook[1] = question;
- return !run_command_v_opt(retry_hook, 0);
+ retry_hook = mingw_getenv("GIT_ASK_YESNO");
+ if (retry_hook) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&cmd.args, retry_hook, question, NULL);
+ return !run_command(&cmd);
}
if (!isatty(_fileno(stdin)) || !isatty(_fileno(stderr)))
diff --git a/compat/nonblock.c b/compat/nonblock.c
index 9694ebdb1d..5b51195c32 100644
--- a/compat/nonblock.c
+++ b/compat/nonblock.c
@@ -41,7 +41,7 @@ int enable_pipe_nonblock(int fd)
#else
-int enable_pipe_nonblock(int fd)
+int enable_pipe_nonblock(int fd UNUSED)
{
errno = ENOSYS;
return -1;
diff --git a/config.c b/config.c
index aad3e00341..27f38283ad 100644
--- a/config.c
+++ b/config.c
@@ -1229,7 +1229,7 @@ static int git_parse_unsigned(const char *value, uintmax_t *ret, uintmax_t max)
return 0;
}
-static int git_parse_int(const char *value, int *ret)
+int git_parse_int(const char *value, int *ret)
{
intmax_t tmp;
if (!git_parse_signed(value, &tmp, maximum_signed_value_of_type(int)))
@@ -2406,11 +2406,6 @@ int git_configset_add_file(struct config_set *cs, const char *filename)
return git_config_from_file(config_set_callback, filename, cs);
}
-int git_configset_add_parameters(struct config_set *cs)
-{
- return git_config_from_parameters(config_set_callback, cs);
-}
-
int git_configset_get_value(struct config_set *cs, const char *key, const char **value)
{
const struct string_list *values = NULL;
@@ -2655,24 +2650,15 @@ int repo_config_get_pathname(struct repository *repo,
/* Read values into protected_config. */
static void read_protected_config(void)
{
- char *xdg_config = NULL, *user_config = NULL, *system_config = NULL;
-
+ struct config_options opts = {
+ .respect_includes = 1,
+ .ignore_repo = 1,
+ .ignore_worktree = 1,
+ .system_gently = 1,
+ };
git_configset_init(&protected_config);
-
- system_config = git_system_config();
- git_global_config(&user_config, &xdg_config);
-
- if (system_config)
- git_configset_add_file(&protected_config, system_config);
- if (xdg_config)
- git_configset_add_file(&protected_config, xdg_config);
- if (user_config)
- git_configset_add_file(&protected_config, user_config);
- git_configset_add_parameters(&protected_config);
-
- free(system_config);
- free(xdg_config);
- free(user_config);
+ config_with_options(config_set_callback, &protected_config,
+ NULL, &opts);
}
void git_protected_config(config_fn_t fn, void *data)
diff --git a/config.h b/config.h
index ca994d7714..ef9eade641 100644
--- a/config.h
+++ b/config.h
@@ -206,6 +206,7 @@ int config_with_options(config_fn_t fn, void *,
int git_parse_ssize_t(const char *, ssize_t *);
int git_parse_ulong(const char *, unsigned long *);
+int git_parse_int(const char *value, int *ret);
/**
* Same as `git_config_bool`, except that it returns -1 on error rather
diff --git a/config.mak.dev b/config.mak.dev
index 4fa19d361b..981304727c 100644
--- a/config.mak.dev
+++ b/config.mak.dev
@@ -69,6 +69,31 @@ DEVELOPER_CFLAGS += -Wno-missing-braces
endif
endif
+# Old versions of clang complain about initializaing a
+# struct-within-a-struct using just "{0}" rather than "{{0}}". This
+# error is considered a false-positive and not worth fixing, because
+# new clang versions do not, so just disable it.
+#
+# The "bug" was fixed in upstream clang 9.
+#
+# Complicating this is that versions of clang released by Apple have
+# their own version numbers (associated with the corresponding version
+# of XCode) unrelated to the official clang version numbers.
+#
+# The bug was fixed in Apple clang 12.
+#
+ifneq ($(filter clang1,$(COMPILER_FEATURES)),) # if we are using clang
+ifeq ($(uname_S),Darwin) # if we are on darwin
+ifeq ($(filter clang12,$(COMPILER_FEATURES)),) # if version < 12
+DEVELOPER_CFLAGS += -Wno-missing-braces
+endif
+else # not darwin
+ifeq ($(filter clang9,$(COMPILER_FEATURES)),) # if version < 9
+DEVELOPER_CFLAGS += -Wno-missing-braces
+endif
+endif
+endif
+
# https://bugzilla.redhat.com/show_bug.cgi?id=2075786
ifneq ($(filter gcc12,$(COMPILER_FEATURES)),)
DEVELOPER_CFLAGS += -Wno-error=stringop-overread
diff --git a/connected.c b/connected.c
index 74a20cb32e..b90fd61790 100644
--- a/connected.c
+++ b/connected.c
@@ -85,6 +85,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
promisor_pack_found:
;
} while ((oid = fn(cb_data)) != NULL);
+ free(new_pack);
return 0;
}
@@ -100,6 +101,9 @@ no_promisor_pack_found:
strvec_push(&rev_list.args, "--exclude-promisor-objects");
if (!opt->is_deepening_fetch) {
strvec_push(&rev_list.args, "--not");
+ if (opt->exclude_hidden_refs_section)
+ strvec_pushf(&rev_list.args, "--exclude-hidden=%s",
+ opt->exclude_hidden_refs_section);
strvec_push(&rev_list.args, "--all");
}
strvec_push(&rev_list.args, "--quiet");
@@ -118,8 +122,10 @@ no_promisor_pack_found:
else
rev_list.no_stderr = opt->quiet;
- if (start_command(&rev_list))
+ if (start_command(&rev_list)) {
+ free(new_pack);
return error(_("Could not run 'git rev-list'"));
+ }
sigchain_push(SIGPIPE, SIG_IGN);
@@ -151,5 +157,6 @@ no_promisor_pack_found:
err = error_errno(_("failed to close rev-list's stdin"));
sigchain_pop(SIGPIPE);
+ free(new_pack);
return finish_command(&rev_list) || err;
}
diff --git a/connected.h b/connected.h
index 6e59c92aa3..16b2c84f2e 100644
--- a/connected.h
+++ b/connected.h
@@ -46,6 +46,13 @@ struct check_connected_options {
* during a fetch.
*/
unsigned is_deepening_fetch : 1;
+
+ /*
+ * If not NULL, use `--exclude-hidden=$section` to exclude all refs
+ * hidden via the `$section.hideRefs` config from the set of
+ * already-reachable refs.
+ */
+ const char *exclude_hidden_refs_section;
};
#define CHECK_CONNECTED_INIT { 0 }
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index ea2a531be8..ea1bd9602e 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -4,25 +4,67 @@
#[[
-Instructions how to use this in Visual Studio:
+== Overview ==
+
+The top-level Makefile is Git's primary build environment, and a lot
+of things are missing (and probably always will be) from this CMake
+alternative.
+
+The primary use-case for maintaining this CMake build recipe is to
+have nicer IDE integration on Windows.
+
+== Creating a build recipe ==
+
+The "cmake" command creates a build file from this recipe:
+
+ cmake -S contrib/buildsystems -B contrib/buildsystems/out -DCMAKE_BUILD_TYPE=Release
+
+Running this will create files in the contrib/buildsystems/out
+directory (our top-level .gitignore file knows to ignore contents of
+this directory).
+
+See "cmake options" below for a discussion of
+"-DCMAKE_BUILD_TYPE=Release" and other options to "cmake".
+
+== Building with Visual Visual Studio ==
+
+To use this in Visual Studio:
Open the worktree as a folder. Visual Studio 2019 and later will detect
the CMake configuration automatically and set everything up for you,
-ready to build. You can then run the tests in `t/` via a regular Git Bash.
+ready to build. See "== Running the tests ==" below for running the tests.
Note: Visual Studio also has the option of opening `CMakeLists.txt`
directly; Using this option, Visual Studio will not find the source code,
though, therefore the `File>Open>Folder...` option is preferred.
-Instructions to run CMake manually:
+By default CMake will install vcpkg locally to your source tree on configuration,
+to avoid this, add `-DNO_VCPKG=TRUE` to the command line when configuring.
- mkdir -p contrib/buildsystems/out
- cd contrib/buildsystems/out
- cmake ../ -DCMAKE_BUILD_TYPE=Release
+== Building on Windows without Visual Studio ==
-This will build the git binaries in contrib/buildsystems/out
-directory (our top-level .gitignore file knows to ignore contents of
-this directory).
+Open contrib/buildsystems/git.sln and build Git. Or use the "msbuild"
+command-line tool (see our own ".github/workflows/main.yml" for a real
+example):
+
+ msbuild git.sln
+
+== Building on *nix ==
+
+On all other platforms running "cmake" will generate a Makefile; to
+build with it run:
+
+ make -C contrib/buildsystems/out
+
+It's also possible to use other generators, e.g. Ninja has arguably
+slightly better output. Add "-G Ninja" to the cmake command above,
+then:
+
+ ninja -C contrib/buildsystems/out
+
+== cmake options ==
+
+=== -DCMAKE_BUILD_TYPE=<type> ===
Possible build configurations(-DCMAKE_BUILD_TYPE) with corresponding
compiler flags
@@ -35,17 +77,36 @@ empty(default) :
NOTE: -DCMAKE_BUILD_TYPE is optional. For multi-config generators like Visual Studio
this option is ignored
-This process generates a Makefile(Linux/*BSD/MacOS) , Visual Studio solution(Windows) by default.
-Run `make` to build Git on Linux/*BSD/MacOS.
-Open git.sln on Windows and build Git.
+== Running the tests ==
-NOTE: By default CMake uses Makefile as the build tool on Linux and Visual Studio in Windows,
-to use another tool say `ninja` add this to the command line when configuring.
-`-G Ninja`
+Once we've built in "contrib/buildsystems/out" the tests can be run at
+the top-level (note: not the generated "contrib/buildsystems/out/t/"
+drectory). If no top-level build is found (as created with the
+Makefile) the t/test-lib.sh will discover the git in
+"contrib/buildsystems/out" on e.g.:
-NOTE: By default CMake will install vcpkg locally to your source tree on configuration,
-to avoid this, add `-DNO_VCPKG=TRUE` to the command line when configuring.
+ (cd t && ./t0001-init.sh)
+ setup: had no ../git, but found & used cmake built git in ../contrib/buildsystems/out/git
+ [...]
+The tests can also be run with ctest, e.g. after building with "cmake"
+and "make" or "msbuild" run, from the top-level e.g.:
+
+ # "--test-dir" is new in cmake v3.20, so "(cd
+ # contrib/buildsystems/out && ctest ...)" on older versions.
+ ctest --test-dir contrib/buildsystems/out --jobs="$(nproc)"--output-on-failure
+
+Options can be passed by setting GIT_TEST_OPTIONS before invoking
+cmake. E.g. on a Linux system with systemd the tests can be sped up by
+using a ramdisk for the scratch files:
+
+ GIT_TEST_OPTS="--root=/dev/shm/$(id -u)/ctest" cmake -S contrib/buildsystems -B contrib/buildsystems/out
+ [...]
+ -- Using user-selected test options: --root=/dev/shm/<uid>/ctest
+
+Then running the tests with "ctest" (here with --jobs="$(nproc)"):
+
+ ctest --jobs=$(nproc) --test-dir contrib/buildsystems/out
]]
cmake_minimum_required(VERSION 3.14)
@@ -77,10 +138,17 @@ if(USE_VCPKG)
set(CMAKE_TOOLCHAIN_FILE ${VCPKG_DIR}/scripts/buildsystems/vcpkg.cmake CACHE STRING "Vcpkg toolchain file")
endif()
-find_program(SH_EXE sh PATHS "C:/Program Files/Git/bin" "$ENV{LOCALAPPDATA}/Programs/Git/bin")
-if(NOT SH_EXE)
- message(FATAL_ERROR "sh: shell interpreter was not found in your path, please install one."
- "On Windows, you can get it as part of 'Git for Windows' install at https://gitforwindows.org/")
+if(WIN32)
+ find_program(SH_EXE sh PATHS "C:/Program Files/Git/bin" "$ENV{LOCALAPPDATA}/Programs/Git/bin")
+ if(NOT SH_EXE)
+ message(FATAL_ERROR "sh: shell interpreter was not found in your path, please install one."
+ "You can get it as part of 'Git for Windows' install at https://gitforwindows.org/")
+ endif()
+else()
+ find_program(SH_EXE sh)
+ if(NOT SH_EXE)
+ message(FATAL_ERROR "cannot find 'sh' in '$PATH'")
+ endif()
endif()
#Create GIT-VERSION-FILE using GIT-VERSION-GEN
@@ -308,6 +376,8 @@ if(SUPPORTS_SIMPLE_IPC)
add_compile_definitions(HAVE_FSMONITOR_DAEMON_BACKEND)
list(APPEND compat_SOURCES compat/fsmonitor/fsm-listen-win32.c)
list(APPEND compat_SOURCES compat/fsmonitor/fsm-health-win32.c)
+ list(APPEND compat_SOURCES compat/fsmonitor/fsm-ipc-win32.c)
+ list(APPEND compat_SOURCES compat/fsmonitor/fsm-path-utils-win32.c)
add_compile_definitions(HAVE_FSMONITOR_OS_SETTINGS)
list(APPEND compat_SOURCES compat/fsmonitor/fsm-settings-win32.c)
@@ -315,6 +385,8 @@ if(SUPPORTS_SIMPLE_IPC)
add_compile_definitions(HAVE_FSMONITOR_DAEMON_BACKEND)
list(APPEND compat_SOURCES compat/fsmonitor/fsm-listen-darwin.c)
list(APPEND compat_SOURCES compat/fsmonitor/fsm-health-darwin.c)
+ list(APPEND compat_SOURCES compat/fsmonitor/fsm-ipc-darwin.c)
+ list(APPEND compat_SOURCES compat/fsmonitor/fsm-path-utils-darwin.c)
add_compile_definitions(HAVE_FSMONITOR_OS_SETTINGS)
list(APPEND compat_SOURCES compat/fsmonitor/fsm-settings-darwin.c)
@@ -806,6 +878,19 @@ add_custom_command(OUTPUT ${git_links} ${git_http_links}
DEPENDS git git-remote-http)
add_custom_target(git-links ALL DEPENDS ${git_links} ${git_http_links})
+function(write_script path content)
+ file(WRITE ${path} ${content})
+
+ if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" VERSION_GREATER_EQUAL "3.19")
+ file(CHMOD ${path} FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE)
+ else()
+ execute_process(COMMAND chmod +x ${path}
+ RESULT_VARIABLE CHILD_ERROR)
+ if(CHILD_ERROR)
+ message(FATAL_ERROR "failed to chmod +x '${path}': '${CHILD_ERROR}'")
+ endif()
+ endif()
+endfunction()
#creating required scripts
set(SHELL_PATH /bin/sh)
@@ -831,7 +916,7 @@ foreach(script ${git_shell_scripts})
string(REPLACE "# @@BROKEN_PATH_FIX@@" "" content "${content}")
string(REPLACE "@@PERL@@" "${PERL_PATH}" content "${content}")
string(REPLACE "@@PAGER_ENV@@" "LESS=FRX LV=-c" content "${content}")
- file(WRITE ${CMAKE_BINARY_DIR}/${script} ${content})
+ write_script(${CMAKE_BINARY_DIR}/${script} "${content}")
endforeach()
#perl scripts
@@ -846,13 +931,14 @@ foreach(script ${git_perl_scripts})
file(STRINGS ${CMAKE_SOURCE_DIR}/${script}.perl content NEWLINE_CONSUME)
string(REPLACE "#!/usr/bin/perl" "#!/usr/bin/perl\n${perl_header}\n" content "${content}")
string(REPLACE "@@GIT_VERSION@@" "${PROJECT_VERSION}" content "${content}")
- file(WRITE ${CMAKE_BINARY_DIR}/${script} ${content})
+ write_script(${CMAKE_BINARY_DIR}/${script} "${content}")
endforeach()
#python script
file(STRINGS ${CMAKE_SOURCE_DIR}/git-p4.py content NEWLINE_CONSUME)
string(REPLACE "#!/usr/bin/env python" "#!/usr/bin/python" content "${content}")
-file(WRITE ${CMAKE_BINARY_DIR}/git-p4 ${content})
+write_script(${CMAKE_BINARY_DIR}/git-p4 "${content}")
+file(COPY ${CMAKE_SOURCE_DIR}/git-p4.py DESTINATION ${CMAKE_BINARY_DIR}/)
#perl modules
file(GLOB_RECURSE perl_modules "${CMAKE_SOURCE_DIR}/perl/*.pm")
@@ -893,7 +979,7 @@ if(MSGFMT_EXE)
foreach(po ${po_files})
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/po/build/locale/${po}/LC_MESSAGES)
add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/po/build/locale/${po}/LC_MESSAGES/git.mo
- COMMAND ${MSGFMT_EXE} --check --statistics -o ${CMAKE_BINARY_DIR}/po/build/locale/${po}/LC_MESSAGES/git.mo ${CMAKE_SOURCE_DIR}/po/${po}.po)
+ COMMAND ${MSGFMT_EXE} --check -o ${CMAKE_BINARY_DIR}/po/build/locale/${po}/LC_MESSAGES/git.mo ${CMAKE_SOURCE_DIR}/po/${po}.po)
list(APPEND po_gen ${CMAKE_BINARY_DIR}/po/build/locale/${po}/LC_MESSAGES/git.mo)
endforeach()
add_custom_target(po-gen ALL DEPENDS ${po_gen})
@@ -990,20 +1076,20 @@ foreach(script ${wrapper_scripts})
file(STRINGS ${CMAKE_SOURCE_DIR}/wrap-for-bin.sh content NEWLINE_CONSUME)
string(REPLACE "@@BUILD_DIR@@" "${CMAKE_BINARY_DIR}" content "${content}")
string(REPLACE "@@PROG@@" "${script}${EXE_EXTENSION}" content "${content}")
- file(WRITE ${CMAKE_BINARY_DIR}/bin-wrappers/${script} ${content})
+ write_script(${CMAKE_BINARY_DIR}/bin-wrappers/${script} "${content}")
endforeach()
foreach(script ${wrapper_test_scripts})
file(STRINGS ${CMAKE_SOURCE_DIR}/wrap-for-bin.sh content NEWLINE_CONSUME)
string(REPLACE "@@BUILD_DIR@@" "${CMAKE_BINARY_DIR}" content "${content}")
string(REPLACE "@@PROG@@" "t/helper/${script}${EXE_EXTENSION}" content "${content}")
- file(WRITE ${CMAKE_BINARY_DIR}/bin-wrappers/${script} ${content})
+ write_script(${CMAKE_BINARY_DIR}/bin-wrappers/${script} "${content}")
endforeach()
file(STRINGS ${CMAKE_SOURCE_DIR}/wrap-for-bin.sh content NEWLINE_CONSUME)
string(REPLACE "@@BUILD_DIR@@" "${CMAKE_BINARY_DIR}" content "${content}")
string(REPLACE "@@PROG@@" "git-cvsserver" content "${content}")
-file(WRITE ${CMAKE_BINARY_DIR}/bin-wrappers/git-cvsserver ${content})
+write_script(${CMAKE_BINARY_DIR}/bin-wrappers/git-cvsserver "${content}")
#options for configuring test options
option(PERL_TESTS "Perform tests that use perl" ON)
@@ -1021,7 +1107,6 @@ set(NO_PERL )
set(NO_PTHREADS )
set(NO_PYTHON )
set(PAGER_ENV "LESS=FRX LV=-c")
-set(DC_SHA1 YesPlease)
set(RUNTIME_PREFIX true)
set(NO_GETTEXT )
@@ -1053,11 +1138,13 @@ file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "PYTHON_PATH='${PYTHON_PATH}'\
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "TAR='${TAR}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_CURL='${NO_CURL}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_EXPAT='${NO_EXPAT}'\n")
+if(PCRE2_FOUND)
+ file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "USE_LIBPCRE2='YesPlease'\n")
+endif()
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_PERL='${NO_PERL}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_PTHREADS='${NO_PTHREADS}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_UNIX_SOCKETS='${NO_UNIX_SOCKETS}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "PAGER_ENV='${PAGER_ENV}'\n")
-file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "DC_SHA1='${DC_SHA1}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "X='${EXE_EXTENSION}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_GETTEXT='${NO_GETTEXT}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "RUNTIME_PREFIX='${RUNTIME_PREFIX}'\n")
@@ -1067,32 +1154,36 @@ if(USE_VCPKG)
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "PATH=\"$PATH:$TEST_DIRECTORY/../compat/vcbuild/vcpkg/installed/x64-windows/bin\"\n")
endif()
-#Make the tests work when building out of the source tree
-get_filename_component(CACHE_PATH ${CMAKE_CURRENT_LIST_DIR}/../../CMakeCache.txt ABSOLUTE)
-if(NOT ${CMAKE_BINARY_DIR}/CMakeCache.txt STREQUAL ${CACHE_PATH})
- file(RELATIVE_PATH BUILD_DIR_RELATIVE ${CMAKE_SOURCE_DIR} ${CMAKE_BINARY_DIR}/CMakeCache.txt)
- string(REPLACE "/CMakeCache.txt" "" BUILD_DIR_RELATIVE ${BUILD_DIR_RELATIVE})
- #Setting the build directory in test-lib.sh before running tests
- file(WRITE ${CMAKE_BINARY_DIR}/CTestCustom.cmake
- "file(STRINGS ${CMAKE_SOURCE_DIR}/t/test-lib.sh GIT_BUILD_DIR_REPL REGEX \"GIT_BUILD_DIR=(.*)\")\n"
- "file(STRINGS ${CMAKE_SOURCE_DIR}/t/test-lib.sh content NEWLINE_CONSUME)\n"
- "string(REPLACE \"\${GIT_BUILD_DIR_REPL}\" \"GIT_BUILD_DIR=\\\"$TEST_DIRECTORY/../${BUILD_DIR_RELATIVE}\\\"\" content \"\${content}\")\n"
- "file(WRITE ${CMAKE_SOURCE_DIR}/t/test-lib.sh \${content})")
- #misc copies
- file(COPY ${CMAKE_SOURCE_DIR}/t/chainlint.pl DESTINATION ${CMAKE_BINARY_DIR}/t/)
- file(COPY ${CMAKE_SOURCE_DIR}/po/is.po DESTINATION ${CMAKE_BINARY_DIR}/po/)
- file(COPY ${CMAKE_SOURCE_DIR}/mergetools/tkdiff DESTINATION ${CMAKE_BINARY_DIR}/mergetools/)
- file(COPY ${CMAKE_SOURCE_DIR}/contrib/completion/git-prompt.sh DESTINATION ${CMAKE_BINARY_DIR}/contrib/completion/)
- file(COPY ${CMAKE_SOURCE_DIR}/contrib/completion/git-completion.bash DESTINATION ${CMAKE_BINARY_DIR}/contrib/completion/)
-endif()
-
file(GLOB test_scipts "${CMAKE_SOURCE_DIR}/t/t[0-9]*.sh")
+if(DEFINED ENV{GIT_TEST_OPTS})
+ set(GIT_TEST_OPTS "$ENV{GIT_TEST_OPTS}"
+ CACHE STRING "test options, see t/README")
+ message(STATUS "Using user-selected test options: ${GIT_TEST_OPTS}")
+elseif(WIN32)
+ set(GIT_TEST_OPTS "--no-bin-wrappers --no-chain-lint -vx"
+ CACHE STRING "test options, see t/README")
+ message(STATUS "Using Windowns-specific default test options: ${GIT_TEST_OPTS}")
+else()
+ set(GIT_TEST_OPTS ""
+ CACHE STRING "test options, see t/README")
+ message(STATUS "No custom test options selected, set e.g. GIT_TEST_OPTS=\"-vixd\"")
+endif()
+separate_arguments(GIT_TEST_OPTS)
+
#test
foreach(tsh ${test_scipts})
add_test(NAME ${tsh}
- COMMAND ${SH_EXE} ${tsh}
+ COMMAND ${SH_EXE} ${tsh} ${GIT_TEST_OPTS}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/t)
+ set_property(TEST ${tsh} APPEND PROPERTY ENVIRONMENT
+ GIT_TEST_BUILD_DIR=${CMAKE_BINARY_DIR})
endforeach()
+if(WIN32)
+ # This test script takes an extremely long time and is known to time out even
+ # on fast machines because it requires in excess of one hour to run
+ set_tests_properties("${CMAKE_SOURCE_DIR}/t/t7112-reset-submodule.sh" PROPERTIES TIMEOUT 4000)
+endif()
+
endif()#BUILD_TESTING
diff --git a/contrib/coccinelle/.gitignore b/contrib/coccinelle/.gitignore
index d3f29646dc..1d45c0a40c 100644
--- a/contrib/coccinelle/.gitignore
+++ b/contrib/coccinelle/.gitignore
@@ -1 +1 @@
-*.patch*
+*.patch
diff --git a/contrib/coccinelle/README b/contrib/coccinelle/README
index f0e80bd7f0..d1daa1f626 100644
--- a/contrib/coccinelle/README
+++ b/contrib/coccinelle/README
@@ -41,3 +41,52 @@ There are two types of semantic patches:
This allows to expose plans of pending large scale refactorings without
impacting the bad pattern checks.
+
+Git-specific tips & things to know about how we run "spatch":
+
+ * The "make coccicheck" will piggy-back on
+ "COMPUTE_HEADER_DEPENDENCIES". If you've built a given object file
+ the "coccicheck" target will consider its depednency to decide if
+ it needs to re-run on the corresponding source file.
+
+ This means that a "make coccicheck" will re-compile object files
+ before running. This might be unexpected, but speeds up the run in
+ the common case, as e.g. a change to "column.h" won't require all
+ coccinelle rules to be re-run against "grep.c" (or another file
+ that happens not to use "column.h").
+
+ To disable this behavior use the "SPATCH_USE_O_DEPENDENCIES=NoThanks"
+ flag.
+
+ * To speed up our rules the "make coccicheck" target will by default
+ concatenate all of the *.cocci files here into an "ALL.cocci", and
+ apply it to each source file.
+
+ This makes the run faster, as we don't need to run each rule
+ against each source file. See the Makefile for further discussion,
+ this behavior can be disabled with "SPATCH_CONCAT_COCCI=".
+
+ But since they're concatenated any <id> in the <rulname> (e.g. "@
+ my_name", v.s. anonymous "@@") needs to be unique across all our
+ *.cocci files. You should only need to name rules if other rules
+ depend on them (currently only one rule is named).
+
+ * To speed up incremental runs even more use the "spatchcache" tool
+ in this directory as your "SPATCH". It aimns to be a "ccache" for
+ coccinelle, and piggy-backs on "COMPUTE_HEADER_DEPENDENCIES".
+
+ It caches in Redis by default, see it source for a how-to.
+
+ In one setup with a primed cache "make coccicheck" followed by a
+ "make clean && make" takes around 10s to run, but 2m30s with the
+ default of "SPATCH_CONCAT_COCCI=Y".
+
+ With "SPATCH_CONCAT_COCCI=" the total runtime is around ~6m, sped
+ up to ~1m with "spatchcache".
+
+ Most of the 10s (or ~1m) being spent on re-running "spatch" on
+ files we couldn't cache, as we didn't compile them (in contrib/*
+ and compat/* mostly).
+
+ The absolute times will differ for you, but the relative speedup
+ from caching should be on that order.
diff --git a/contrib/coccinelle/hashmap.cocci b/contrib/coccinelle/hashmap.cocci
index d69e120ccf..c5dbb4557b 100644
--- a/contrib/coccinelle/hashmap.cocci
+++ b/contrib/coccinelle/hashmap.cocci
@@ -1,4 +1,4 @@
-@ hashmap_entry_init_usage @
+@@
expression E;
struct hashmap_entry HME;
@@
diff --git a/contrib/coccinelle/preincr.cocci b/contrib/coccinelle/preincr.cocci
index 7fe1e8d2d9..ae42cb0730 100644
--- a/contrib/coccinelle/preincr.cocci
+++ b/contrib/coccinelle/preincr.cocci
@@ -1,4 +1,4 @@
-@ preincrement @
+@@
identifier i;
@@
- ++i > 1
diff --git a/contrib/coccinelle/spatchcache b/contrib/coccinelle/spatchcache
new file mode 100755
index 0000000000..29e9352d8a
--- /dev/null
+++ b/contrib/coccinelle/spatchcache
@@ -0,0 +1,304 @@
+#!/bin/sh
+#
+# spatchcache: a poor-man's "ccache"-alike for "spatch" in git.git
+#
+# This caching command relies on the peculiarities of the Makefile
+# driving "spatch" in git.git, in particular if we invoke:
+#
+# make
+# # See "spatchCache.cacheWhenStderr" for why "--very-quiet" is
+# # used
+# make coccicheck SPATCH_FLAGS=--very-quiet
+#
+# We can with COMPUTE_HEADER_DEPENDENCIES (auto-detected as true with
+# "gcc" and "clang") write e.g. a .depend/grep.o.d for grep.c, when we
+# compile grep.o.
+#
+# The .depend/grep.o.d will have the full header dependency tree of
+# grep.c, and we can thus cache the output of "spatch" by:
+#
+# 1. Hashing all of those files
+# 2. Hashing our source file, and the *.cocci rule we're
+# applying
+# 3. Running spatch, if suggests no changes (by far the common
+# case) we invoke "spatchCache.getCmd" and
+# "spatchCache.setCmd" with a hash SHA-256 to ask "does this
+# ID have no changes" or "say that ID had no changes>
+# 4. If no "spatchCache.{set,get}Cmd" is specified we'll use
+# "redis-cli" and maintain a SET called "spatch-cache". Set
+# appropriate redis memory policies to keep it from growing
+# out of control.
+#
+# This along with the general incremental "make" support for
+# "contrib/coccinelle" makes it viable to (re-)run coccicheck
+# e.g. when merging integration branches.
+#
+# Note that the "--very-quiet" flag is currently critical. The cache
+# will refuse to cache anything that has output on STDERR (which might
+# be errors from spatch), but see spatchCache.cacheWhenStderr below.
+#
+# The STDERR (and exit code) could in principle be cached (as with
+# ccache), but then the simple structure in the Redis cache would need
+# to change, so just supply "--very-quiet" for now.
+#
+# To use this, simply set SPATCH to
+# contrib/coccinelle/spatchcache. Then optionally set:
+#
+# [spatchCache]
+# # Optional: path to a custom spatch
+# spatch = ~/g/coccicheck/spatch.opt
+#
+# As well as this trace config (debug implies trace):
+#
+# cacheWhenStderr = true
+# trace = false
+# debug = false
+#
+# The ".depend/grep.o.d" can also be customized, as a string that will
+# be eval'd, it has access to a "$dirname" and "$basename":
+#
+# [spatchCache]
+# dependFormat = "$dirname/.depend/${basename%.c}.o.d"
+#
+# Setting "trace" to "true" allows for seeing when we have a cache HIT
+# or MISS. To debug whether the cache is working do that, and run e.g.:
+#
+# redis-cli FLUSHALL
+# <make && make coccicheck, as above>
+# grep -hore HIT -e MISS -e SET -e NOCACHE -e CANTCACHE .build/contrib/coccinelle | sort | uniq -c
+# 600 CANTCACHE
+# 7365 MISS
+# 7365 SET
+#
+# A subsequent "make cocciclean && make coccicheck" should then have
+# all "HIT"'s and "CANTCACHE"'s.
+#
+# The "spatchCache.cacheWhenStderr" option is critical when using
+# spatchCache.{trace,debug} to debug whether something is set in the
+# cache, as we'll write to the spatch logs in .build/* we'd otherwise
+# always emit a NOCACHE.
+#
+# Reading the config can make the command much slower, to work around
+# this the config can be set in the environment, with environment
+# variable name corresponding to the config key. "default" can be used
+# to use whatever's the script default, e.g. setting
+# spatchCache.cacheWhenStderr=true and deferring to the defaults for
+# the rest is:
+#
+# export GIT_CONTRIB_SPATCHCACHE_DEBUG=default
+# export GIT_CONTRIB_SPATCHCACHE_TRACE=default
+# export GIT_CONTRIB_SPATCHCACHE_CACHEWHENSTDERR=true
+# export GIT_CONTRIB_SPATCHCACHE_SPATCH=default
+# export GIT_CONTRIB_SPATCHCACHE_DEPENDFORMAT=default
+# export GIT_CONTRIB_SPATCHCACHE_SETCMD=default
+# export GIT_CONTRIB_SPATCHCACHE_GETCMD=default
+
+set -e
+
+env_or_config () {
+ env="$1"
+ shift
+ if test "$env" = "default"
+ then
+ # Avoid expensive "git config" invocation
+ return
+ elif test -n "$env"
+ then
+ echo "$env"
+ else
+ git config $@ || :
+ fi
+}
+
+## Our own configuration & options
+debug=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_DEBUG" --bool "spatchCache.debug")
+if test "$debug" != "true"
+then
+ debug=
+fi
+if test -n "$debug"
+then
+ set -x
+fi
+
+trace=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_TRACE" --bool "spatchCache.trace")
+if test "$trace" != "true"
+then
+ trace=
+fi
+if test -n "$debug"
+then
+ # debug implies trace
+ trace=true
+fi
+
+cacheWhenStderr=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_CACHEWHENSTDERR" --bool "spatchCache.cacheWhenStderr")
+if test "$cacheWhenStderr" != "true"
+then
+ cacheWhenStderr=
+fi
+
+trace_it () {
+ if test -z "$trace"
+ then
+ return
+ fi
+ echo "$@" >&2
+}
+
+spatch=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_SPATCH" --path "spatchCache.spatch")
+if test -n "$spatch"
+then
+ if test -n "$debug"
+ then
+ trace_it "custom spatchCache.spatch='$spatch'"
+ fi
+else
+ spatch=spatch
+fi
+
+dependFormat='$dirname/.depend/${basename%.c}.o.d'
+dependFormatCfg=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_DEPENDFORMAT" "spatchCache.dependFormat")
+if test -n "$dependFormatCfg"
+then
+ dependFormat="$dependFormatCfg"
+fi
+
+set=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_SETCMD" "spatchCache.setCmd")
+get=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_GETCMD" "spatchCache.getCmd")
+
+## Parse spatch()-like command-line for caching info
+arg_sp=
+arg_file=
+args="$@"
+spatch_opts() {
+ while test $# != 0
+ do
+ arg_file="$1"
+ case "$1" in
+ --sp-file)
+ arg_sp="$2"
+ ;;
+ esac
+ shift
+ done
+}
+spatch_opts "$@"
+if ! test -f "$arg_file"
+then
+ arg_file=
+fi
+
+hash_for_cache() {
+ # Parameters that should affect the cache
+ echo "args=$args"
+ echo "config spatchCache.spatch=$spatch"
+ echo "config spatchCache.debug=$debug"
+ echo "config spatchCache.trace=$trace"
+ echo "config spatchCache.cacheWhenStderr=$cacheWhenStderr"
+ echo
+
+ # Our target file and its dependencies
+ git hash-object "$1" "$2" $(grep -E -o '^[^:]+:$' "$3" | tr -d ':')
+}
+
+# Sanity checks
+if ! test -f "$arg_sp" && ! test -f "$arg_file"
+then
+ echo $0: no idea how to cache "$@" >&2
+ exit 128
+fi
+
+# Main logic
+dirname=$(dirname "$arg_file")
+basename=$(basename "$arg_file")
+eval "dep=$dependFormat"
+
+if ! test -f "$dep"
+then
+ trace_it "$0: CANTCACHE have no '$dep' for '$arg_file'!"
+ exec "$spatch" "$@"
+fi
+
+if test -n "$debug"
+then
+ trace_it "$0: The full cache input for '$arg_sp' '$arg_file' '$dep'"
+ hash_for_cache "$arg_sp" "$arg_file" "$dep" >&2
+fi
+sum=$(hash_for_cache "$arg_sp" "$arg_file" "$dep" | git hash-object --stdin)
+
+trace_it "$0: processing '$arg_file' with '$arg_sp' rule, and got hash '$sum' for it + '$dep'"
+
+getret=
+if test -z "$get"
+then
+ if test $(redis-cli SISMEMBER spatch-cache "$sum") = 1
+ then
+ getret=0
+ else
+ getret=1
+ fi
+else
+ $set "$sum"
+ getret=$?
+fi
+
+if test "$getret" = 0
+then
+ trace_it "$0: HIT for '$arg_file' with '$arg_sp'"
+ exit 0
+else
+ trace_it "$0: MISS: for '$arg_file' with '$arg_sp'"
+fi
+
+out="$(mktemp)"
+err="$(mktemp)"
+
+set +e
+"$spatch" "$@" >"$out" 2>>"$err"
+ret=$?
+cat "$out"
+cat "$err" >&2
+set -e
+
+nocache=
+if test $ret != 0
+then
+ nocache="exited non-zero: $ret"
+elif test -s "$out"
+then
+ nocache="had patch output"
+elif test -z "$cacheWhenStderr" && test -s "$err"
+then
+ nocache="had stderr (use --very-quiet or spatchCache.cacheWhenStderr=true?)"
+fi
+
+if test -n "$nocache"
+then
+ trace_it "$0: NOCACHE ($nocache): for '$arg_file' with '$arg_sp'"
+ exit "$ret"
+fi
+
+trace_it "$0: SET: for '$arg_file' with '$arg_sp'"
+
+setret=
+if test -z "$set"
+then
+ if test $(redis-cli SADD spatch-cache "$sum") = 1
+ then
+ setret=0
+ else
+ setret=1
+ fi
+else
+ "$set" "$sum"
+ setret=$?
+fi
+
+if test "$setret" != 0
+then
+ echo "FAILED to set '$sum' in cache!" >&2
+ exit 128
+fi
+
+exit "$ret"
diff --git a/contrib/coccinelle/strbuf.cocci b/contrib/coccinelle/strbuf.cocci
index 0970d98ad7..5f06105df6 100644
--- a/contrib/coccinelle/strbuf.cocci
+++ b/contrib/coccinelle/strbuf.cocci
@@ -1,4 +1,4 @@
-@ strbuf_addf_with_format_only @
+@@
expression E;
constant fmt !~ "%";
@@
diff --git a/contrib/coccinelle/swap.cocci b/contrib/coccinelle/swap.cocci
index a0934d1fda..522177afb6 100644
--- a/contrib/coccinelle/swap.cocci
+++ b/contrib/coccinelle/swap.cocci
@@ -1,4 +1,4 @@
-@ swap_with_declaration @
+@@
type T;
identifier tmp;
T a, b;
diff --git a/contrib/coccinelle/the_repository.pending.cocci b/contrib/coccinelle/the_repository.pending.cocci
index 072ea0d922..747d382ff5 100644
--- a/contrib/coccinelle/the_repository.pending.cocci
+++ b/contrib/coccinelle/the_repository.pending.cocci
@@ -20,7 +20,6 @@ expression E;
@@
expression E;
-expression F;
@@
- has_object_file_with_flags(
+ repo_has_object_file_with_flags(the_repository,
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index ba5c395d2d..161327057d 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -58,6 +58,11 @@
#
# When set to "1" suggest all options, including options which are
# typically hidden (e.g. '--allow-empty' for 'git commit').
+#
+# GIT_COMPLETION_IGNORE_CASE
+#
+# When set to "1", suggest refs that match case insensitively (e.g.,
+# completing "FOO" on "git checkout f<TAB>").
case "$COMP_WORDBREAKS" in
*:*) : great ;;
@@ -644,8 +649,15 @@ __git_complete_index_file ()
__git_heads ()
{
local pfx="${1-}" cur_="${2-}" sfx="${3-}"
+ local ignore_case=""
+
+ if test "${GIT_COMPLETION_IGNORE_CASE-}" = "1"
+ then
+ ignore_case="--ignore-case"
+ fi
__git for-each-ref --format="${pfx//\%/%%}%(refname:strip=2)$sfx" \
+ $ignore_case \
"refs/heads/$cur_*" "refs/heads/$cur_*/**"
}
@@ -657,8 +669,15 @@ __git_heads ()
__git_remote_heads ()
{
local pfx="${1-}" cur_="${2-}" sfx="${3-}"
+ local ignore_case=""
+
+ if test "${GIT_COMPLETION_IGNORE_CASE-}" = "1"
+ then
+ ignore_case="--ignore-case"
+ fi
__git for-each-ref --format="${pfx//\%/%%}%(refname:strip=2)$sfx" \
+ $ignore_case \
"refs/remotes/$cur_*" "refs/remotes/$cur_*/**"
}
@@ -667,8 +686,15 @@ __git_remote_heads ()
__git_tags ()
{
local pfx="${1-}" cur_="${2-}" sfx="${3-}"
+ local ignore_case=""
+
+ if test "${GIT_COMPLETION_IGNORE_CASE-}" = "1"
+ then
+ ignore_case="--ignore-case"
+ fi
__git for-each-ref --format="${pfx//\%/%%}%(refname:strip=2)$sfx" \
+ $ignore_case \
"refs/tags/$cur_*" "refs/tags/$cur_*/**"
}
@@ -682,12 +708,19 @@ __git_dwim_remote_heads ()
{
local pfx="${1-}" cur_="${2-}" sfx="${3-}"
local fer_pfx="${pfx//\%/%%}" # "escape" for-each-ref format specifiers
+ local ignore_case=""
+
+ if test "${GIT_COMPLETION_IGNORE_CASE-}" = "1"
+ then
+ ignore_case="--ignore-case"
+ fi
# employ the heuristic used by git checkout and git switch
# Try to find a remote branch that cur_es the completion word
# but only output if the branch name is unique
__git for-each-ref --format="$fer_pfx%(refname:strip=3)$sfx" \
--sort="refname:strip=3" \
+ $ignore_case \
"refs/remotes/*/$cur_*" "refs/remotes/*/$cur_*/**" | \
uniq -u
}
@@ -712,7 +745,9 @@ __git_refs ()
local format refs
local pfx="${3-}" cur_="${4-$cur}" sfx="${5-}"
local match="${4-}"
+ local umatch="${4-}"
local fer_pfx="${pfx//\%/%%}" # "escape" for-each-ref format specifiers
+ local ignore_case=""
__git_find_repo_path
dir="$__git_repo_path"
@@ -735,12 +770,20 @@ __git_refs ()
fi
fi
+ if test "${GIT_COMPLETION_IGNORE_CASE-}" = "1"
+ then
+ ignore_case="--ignore-case"
+ # use tr instead of ${match,^^} to preserve bash 3.2 compatibility
+ umatch=$(echo "$match" | tr a-z A-Z 2> /dev/null || echo "$match")
+ fi
+
if [ "$list_refs_from" = path ]; then
if [[ "$cur_" == ^* ]]; then
pfx="$pfx^"
fer_pfx="$fer_pfx^"
cur_=${cur_#^}
match=${match#^}
+ umatch=${umatch#^}
fi
case "$cur_" in
refs|refs/*)
@@ -751,7 +794,7 @@ __git_refs ()
*)
for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD CHERRY_PICK_HEAD; do
case "$i" in
- $match*)
+ $match*|$umatch*)
if [ -e "$dir/$i" ]; then
echo "$pfx$i$sfx"
fi
@@ -765,6 +808,7 @@ __git_refs ()
;;
esac
__git_dir="$dir" __git for-each-ref --format="$fer_pfx%($format)$sfx" \
+ $ignore_case \
"${refs[@]}"
if [ -n "$track" ]; then
__git_dwim_remote_heads "$pfx" "$match" "$sfx"
@@ -784,15 +828,16 @@ __git_refs ()
*)
if [ "$list_refs_from" = remote ]; then
case "HEAD" in
- $match*) echo "${pfx}HEAD$sfx" ;;
+ $match*|$umatch*) echo "${pfx}HEAD$sfx" ;;
esac
__git for-each-ref --format="$fer_pfx%(refname:strip=3)$sfx" \
+ $ignore_case \
"refs/remotes/$remote/$match*" \
"refs/remotes/$remote/$match*/**"
else
local query_symref
case "HEAD" in
- $match*) query_symref="HEAD" ;;
+ $match*|$umatch*) query_symref="HEAD" ;;
esac
__git ls-remote "$remote" $query_symref \
"refs/tags/$match*" "refs/heads/$match*" \
diff --git a/contrib/credential/netrc/git-credential-netrc.perl b/contrib/credential/netrc/git-credential-netrc.perl
index bc57cc6588..9fb998ae09 100755
--- a/contrib/credential/netrc/git-credential-netrc.perl
+++ b/contrib/credential/netrc/git-credential-netrc.perl
@@ -356,7 +356,10 @@ sub read_credential_data_from_stdin {
next unless m/^([^=]+)=(.+)/;
my ($token, $value) = ($1, $2);
- die "Unknown search token $token" unless exists $q{$token};
+
+ # skip any unknown tokens
+ next unless exists $q{$token};
+
$q{$token} = $value;
log_debug("We were given search token $token and value $value");
}
diff --git a/contrib/credential/osxkeychain/git-credential-osxkeychain.c b/contrib/credential/osxkeychain/git-credential-osxkeychain.c
index bf77748d60..e29cc28779 100644
--- a/contrib/credential/osxkeychain/git-credential-osxkeychain.c
+++ b/contrib/credential/osxkeychain/git-credential-osxkeychain.c
@@ -159,6 +159,11 @@ static void read_credential(void)
username = xstrdup(v);
else if (!strcmp(buf, "password"))
password = xstrdup(v);
+ /*
+ * Ignore other lines; we don't know what they mean, but
+ * this future-proofs us when later versions of git do
+ * learn new lines, and the helpers are updated to match.
+ */
}
}
diff --git a/contrib/credential/wincred/git-credential-wincred.c b/contrib/credential/wincred/git-credential-wincred.c
index 5091048f9c..ead6e267c7 100644
--- a/contrib/credential/wincred/git-credential-wincred.c
+++ b/contrib/credential/wincred/git-credential-wincred.c
@@ -278,8 +278,11 @@ static void read_credential(void)
wusername = utf8_to_utf16_dup(v);
} else if (!strcmp(buf, "password"))
password = utf8_to_utf16_dup(v);
- else
- die("unrecognized input");
+ /*
+ * Ignore other lines; we don't know what they mean, but
+ * this future-proofs us when later versions of git do
+ * learn new lines, and the helpers are updated to match.
+ */
}
}
diff --git a/contrib/subtree/git-subtree.sh b/contrib/subtree/git-subtree.sh
index 7562a395c2..10c9c87839 100755
--- a/contrib/subtree/git-subtree.sh
+++ b/contrib/subtree/git-subtree.sh
@@ -98,10 +98,18 @@ progress () {
assert () {
if ! "$@"
then
- die "assertion failed: $*"
+ die "fatal: assertion failed: $*"
fi
}
+# Usage: die_incompatible_opt OPTION COMMAND
+die_incompatible_opt () {
+ assert test "$#" = 2
+ opt="$1"
+ arg_command="$2"
+ die "fatal: the '$opt' flag does not make sense with 'git subtree $arg_command'."
+}
+
main () {
if test $# -eq 0
then
@@ -147,7 +155,7 @@ main () {
allow_addmerge=$arg_split_rejoin
;;
*)
- die "Unknown command '$arg_command'"
+ die "fatal: unknown command '$arg_command'"
;;
esac
# Reset the arguments array for "real" flag parsing.
@@ -176,16 +184,16 @@ main () {
arg_debug=1
;;
--annotate)
- test -n "$allow_split" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_split" || die_incompatible_opt "$opt" "$arg_command"
arg_split_annotate="$1"
shift
;;
--no-annotate)
- test -n "$allow_split" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_split" || die_incompatible_opt "$opt" "$arg_command"
arg_split_annotate=
;;
-b)
- test -n "$allow_split" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_split" || die_incompatible_opt "$opt" "$arg_command"
arg_split_branch="$1"
shift
;;
@@ -194,7 +202,7 @@ main () {
shift
;;
-m)
- test -n "$allow_addmerge" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_addmerge" || die_incompatible_opt "$opt" "$arg_command"
arg_addmerge_message="$1"
shift
;;
@@ -202,41 +210,41 @@ main () {
arg_prefix=
;;
--onto)
- test -n "$allow_split" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_split" || die_incompatible_opt "$opt" "$arg_command"
arg_split_onto="$1"
shift
;;
--no-onto)
- test -n "$allow_split" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_split" || die_incompatible_opt "$opt" "$arg_command"
arg_split_onto=
;;
--rejoin)
- test -n "$allow_split" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_split" || die_incompatible_opt "$opt" "$arg_command"
;;
--no-rejoin)
- test -n "$allow_split" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_split" || die_incompatible_opt "$opt" "$arg_command"
;;
--ignore-joins)
- test -n "$allow_split" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_split" || die_incompatible_opt "$opt" "$arg_command"
arg_split_ignore_joins=1
;;
--no-ignore-joins)
- test -n "$allow_split" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_split" || die_incompatible_opt "$opt" "$arg_command"
arg_split_ignore_joins=
;;
--squash)
- test -n "$allow_addmerge" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_addmerge" || die_incompatible_opt "$opt" "$arg_command"
arg_addmerge_squash=1
;;
--no-squash)
- test -n "$allow_addmerge" || die "The '$opt' flag does not make sense with 'git subtree $arg_command'."
+ test -n "$allow_addmerge" || die_incompatible_opt "$opt" "$arg_command"
arg_addmerge_squash=
;;
--)
break
;;
*)
- die "Unexpected option: $opt"
+ die "fatal: unexpected option: $opt"
;;
esac
done
@@ -244,17 +252,17 @@ main () {
if test -z "$arg_prefix"
then
- die "You must provide the --prefix option."
+ die "fatal: you must provide the --prefix option."
fi
case "$arg_command" in
add)
test -e "$arg_prefix" &&
- die "prefix '$arg_prefix' already exists."
+ die "fatal: prefix '$arg_prefix' already exists."
;;
*)
test -e "$arg_prefix" ||
- die "'$arg_prefix' does not exist; use 'git subtree add'"
+ die "fatal: '$arg_prefix' does not exist; use 'git subtree add'"
;;
esac
@@ -274,11 +282,11 @@ cache_setup () {
assert test $# = 0
cachedir="$GIT_DIR/subtree-cache/$$"
rm -rf "$cachedir" ||
- die "Can't delete old cachedir: $cachedir"
+ die "fatal: can't delete old cachedir: $cachedir"
mkdir -p "$cachedir" ||
- die "Can't create new cachedir: $cachedir"
+ die "fatal: can't create new cachedir: $cachedir"
mkdir -p "$cachedir/notree" ||
- die "Can't create new cachedir: $cachedir/notree"
+ die "fatal: can't create new cachedir: $cachedir/notree"
debug "Using cachedir: $cachedir" >&2
}
@@ -334,7 +342,7 @@ cache_set () {
test "$oldrev" != "latest_new" &&
test -e "$cachedir/$oldrev"
then
- die "cache for $oldrev already exists!"
+ die "fatal: cache for $oldrev already exists!"
fi
echo "$newrev" >"$cachedir/$oldrev"
}
@@ -363,13 +371,47 @@ try_remove_previous () {
fi
}
-# Usage: find_latest_squash DIR
+# Usage: process_subtree_split_trailer SPLIT_HASH MAIN_HASH [REPOSITORY]
+process_subtree_split_trailer () {
+ assert test $# = 2 -o $# = 3
+ b="$1"
+ sq="$2"
+ repository=""
+ if test "$#" = 3
+ then
+ repository="$3"
+ fi
+ fail_msg="fatal: could not rev-parse split hash $b from commit $sq"
+ if ! sub="$(git rev-parse --verify --quiet "$b^{commit}")"
+ then
+ # if 'repository' was given, try to fetch the 'git-subtree-split' hash
+ # before 'rev-parse'-ing it again, as it might be a tag that we do not have locally
+ if test -n "${repository}"
+ then
+ git fetch "$repository" "$b"
+ sub="$(git rev-parse --verify --quiet "$b^{commit}")" ||
+ die "$fail_msg"
+ else
+ hint1=$(printf "hint: hash might be a tag, try fetching it from the subtree repository:")
+ hint2=$(printf "hint: git fetch <subtree-repository> $b")
+ fail_msg=$(printf "$fail_msg\n$hint1\n$hint2")
+ die "$fail_msg"
+ fi
+ fi
+}
+
+# Usage: find_latest_squash DIR [REPOSITORY]
find_latest_squash () {
- assert test $# = 1
- debug "Looking for latest squash ($dir)..."
+ assert test $# = 1 -o $# = 2
+ dir="$1"
+ repository=""
+ if test "$#" = 2
+ then
+ repository="$2"
+ fi
+ debug "Looking for latest squash (dir=$dir, repository=$repository)..."
local indent=$(($indent + 1))
- dir="$1"
sq=
main=
sub=
@@ -387,8 +429,7 @@ find_latest_squash () {
main="$b"
;;
git-subtree-split:)
- sub="$(git rev-parse "$b^{commit}")" ||
- die "could not rev-parse split hash $b from commit $sq"
+ process_subtree_split_trailer "$b" "$sq" "$repository"
;;
END)
if test -n "$sub"
@@ -412,14 +453,19 @@ find_latest_squash () {
done || exit $?
}
-# Usage: find_existing_splits DIR REV
+# Usage: find_existing_splits DIR REV [REPOSITORY]
find_existing_splits () {
- assert test $# = 2
+ assert test $# = 2 -o $# = 3
debug "Looking for prior splits..."
local indent=$(($indent + 1))
dir="$1"
rev="$2"
+ repository=""
+ if test "$#" = 3
+ then
+ repository="$3"
+ fi
main=
sub=
local grep_format="^git-subtree-dir: $dir/*\$"
@@ -439,8 +485,7 @@ find_existing_splits () {
main="$b"
;;
git-subtree-split:)
- sub="$(git rev-parse "$b^{commit}")" ||
- die "could not rev-parse split hash $b from commit $sq"
+ process_subtree_split_trailer "$b" "$sq" "$repository"
;;
END)
debug "Main is: '$main'"
@@ -490,7 +535,7 @@ copy_commit () {
cat
) |
git commit-tree "$2" $3 # reads the rest of stdin
- ) || die "Can't copy commit $1"
+ ) || die "fatal: can't copy commit $1"
}
# Usage: add_msg DIR LATEST_OLD LATEST_NEW
@@ -718,11 +763,11 @@ ensure_clean () {
assert test $# = 0
if ! git diff-index HEAD --exit-code --quiet 2>&1
then
- die "Working tree has modifications. Cannot add."
+ die "fatal: working tree has modifications. Cannot add."
fi
if ! git diff-index --cached HEAD --exit-code --quiet 2>&1
then
- die "Index has modifications. Cannot add."
+ die "fatal: index has modifications. Cannot add."
fi
}
@@ -730,7 +775,7 @@ ensure_clean () {
ensure_valid_ref_format () {
assert test $# = 1
git check-ref-format "refs/heads/$1" ||
- die "'$1' does not look like a ref"
+ die "fatal: '$1' does not look like a ref"
}
# Usage: process_split_commit REV PARENTS
@@ -796,7 +841,7 @@ cmd_add () {
if test $# -eq 1
then
git rev-parse -q --verify "$1^{commit}" >/dev/null ||
- die "'$1' does not refer to a commit"
+ die "fatal: '$1' does not refer to a commit"
cmd_add_commit "$@"
@@ -811,7 +856,7 @@ cmd_add () {
cmd_add_repository "$@"
else
- say >&2 "error: parameters were '$*'"
+ say >&2 "fatal: parameters were '$*'"
die "Provide either a commit or a repository and commit."
fi
}
@@ -843,7 +888,7 @@ cmd_add_commit () {
git checkout -- "$dir" || exit $?
tree=$(git write-tree) || exit $?
- headrev=$(git rev-parse HEAD) || exit $?
+ headrev=$(git rev-parse --verify HEAD) || exit $?
if test -n "$headrev" && test "$headrev" != "$rev"
then
headp="-p $headrev"
@@ -866,17 +911,22 @@ cmd_add_commit () {
say >&2 "Added dir '$dir'"
}
-# Usage: cmd_split [REV]
+# Usage: cmd_split [REV] [REPOSITORY]
cmd_split () {
if test $# -eq 0
then
rev=$(git rev-parse HEAD)
- elif test $# -eq 1
+ elif test $# -eq 1 -o $# -eq 2
then
rev=$(git rev-parse -q --verify "$1^{commit}") ||
- die "'$1' does not refer to a commit"
+ die "fatal: '$1' does not refer to a commit"
else
- die "You must provide exactly one revision. Got: '$*'"
+ die "fatal: you must provide exactly one revision, and optionnally a repository. Got: '$*'"
+ fi
+ repository=""
+ if test "$#" = 2
+ then
+ repository="$2"
fi
if test -n "$arg_split_rejoin"
@@ -900,7 +950,7 @@ cmd_split () {
done || exit $?
fi
- unrevs="$(find_existing_splits "$dir" "$rev")" || exit $?
+ unrevs="$(find_existing_splits "$dir" "$rev" "$repository")" || exit $?
# We can't restrict rev-list to only $dir here, because some of our
# parents have the $dir contents the root, and those won't match.
@@ -919,7 +969,7 @@ cmd_split () {
latest_new=$(cache_get latest_new) || exit $?
if test -z "$latest_new"
then
- die "No new revisions were found"
+ die "fatal: no new revisions were found"
fi
if test -n "$arg_split_rejoin"
@@ -940,7 +990,7 @@ cmd_split () {
then
if ! git merge-base --is-ancestor "$arg_split_branch" "$latest_new"
then
- die "Branch '$arg_split_branch' is not an ancestor of commit '$latest_new'."
+ die "fatal: branch '$arg_split_branch' is not an ancestor of commit '$latest_new'."
fi
action='Updated'
else
@@ -954,20 +1004,25 @@ cmd_split () {
exit 0
}
-# Usage: cmd_merge REV
+# Usage: cmd_merge REV [REPOSITORY]
cmd_merge () {
- test $# -eq 1 ||
- die "You must provide exactly one revision. Got: '$*'"
+ test $# -eq 1 -o $# -eq 2 ||
+ die "fatal: you must provide exactly one revision, and optionally a repository. Got: '$*'"
rev=$(git rev-parse -q --verify "$1^{commit}") ||
- die "'$1' does not refer to a commit"
+ die "fatal: '$1' does not refer to a commit"
+ repository=""
+ if test "$#" = 2
+ then
+ repository="$2"
+ fi
ensure_clean
if test -n "$arg_addmerge_squash"
then
- first_split="$(find_latest_squash "$dir")" || exit $?
+ first_split="$(find_latest_squash "$dir" "$repository")" || exit $?
if test -z "$first_split"
then
- die "Can't squash-merge: '$dir' was never added."
+ die "fatal: can't squash-merge: '$dir' was never added."
fi
set $first_split
old=$1
@@ -995,19 +1050,21 @@ cmd_merge () {
cmd_pull () {
if test $# -ne 2
then
- die "You must provide <repository> <ref>"
+ die "fatal: you must provide <repository> <ref>"
fi
+ repository="$1"
+ ref="$2"
ensure_clean
- ensure_valid_ref_format "$2"
- git fetch "$@" || exit $?
- cmd_merge FETCH_HEAD
+ ensure_valid_ref_format "$ref"
+ git fetch "$repository" "$ref" || exit $?
+ cmd_merge FETCH_HEAD "$repository"
}
# Usage: cmd_push REPOSITORY [+][LOCALREV:]REMOTEREF
cmd_push () {
if test $# -ne 2
then
- die "You must provide <repository> <refspec>"
+ die "fatal: you must provide <repository> <refspec>"
fi
if test -e "$dir"
then
@@ -1022,13 +1079,13 @@ cmd_push () {
fi
ensure_valid_ref_format "$remoteref"
localrev_presplit=$(git rev-parse -q --verify "$localrevname_presplit^{commit}") ||
- die "'$localrevname_presplit' does not refer to a commit"
+ die "fatal: '$localrevname_presplit' does not refer to a commit"
echo "git push using: " "$repository" "$refspec"
- localrev=$(cmd_split "$localrev_presplit") || die
+ localrev=$(cmd_split "$localrev_presplit" "$repository") || die
git push "$repository" "$localrev":"refs/heads/$remoteref"
else
- die "'$dir' must already exist. Try 'git subtree add'."
+ die "fatal: '$dir' must already exist. Try 'git subtree add'."
fi
}
diff --git a/contrib/subtree/git-subtree.txt b/contrib/subtree/git-subtree.txt
index 9cddfa2654..004abf415b 100644
--- a/contrib/subtree/git-subtree.txt
+++ b/contrib/subtree/git-subtree.txt
@@ -11,7 +11,7 @@ SYNOPSIS
[verse]
'git subtree' [<options>] -P <prefix> add <local-commit>
'git subtree' [<options>] -P <prefix> add <repository> <remote-ref>
-'git subtree' [<options>] -P <prefix> merge <local-commit>
+'git subtree' [<options>] -P <prefix> merge <local-commit> [<repository>]
'git subtree' [<options>] -P <prefix> split [<local-commit>]
[verse]
@@ -76,7 +76,7 @@ add <repository> <remote-ref>::
only a single commit from the subproject, rather than its
entire history.
-merge <local-commit>::
+merge <local-commit> [<repository>]::
Merge recent changes up to <local-commit> into the <prefix>
subtree. As with normal 'git merge', this doesn't
remove your own local changes; it just merges those
@@ -88,8 +88,13 @@ If you use '--squash', the merge direction doesn't always have to be
forward; you can use this command to go back in time from v2.5 to v2.4,
for example. If your merge introduces a conflict, you can resolve it in
the usual ways.
++
+When using '--squash', and the previous merge with '--squash' merged an
+annotated tag of the subtree repository, that tag needs to be available locally.
+If <repository> is given, a missing tag will automatically be fetched from that
+repository.
-split [<local-commit>]::
+split [<local-commit>] [<repository>]::
Extract a new, synthetic project history from the
history of the <prefix> subtree of <local-commit>, or of
HEAD if no <local-commit> is given. The new history
@@ -109,6 +114,11 @@ settings passed to 'split' (such as '--annotate') are the same.
Because of this, if you add new commits and then re-split, the new
commits will be attached as commits on top of the history you
generated last time, so 'git merge' and friends will work as expected.
++
+When a previous merge with '--squash' merged an annotated tag of the
+subtree repository, that tag needs to be available locally.
+If <repository> is given, a missing tag will automatically be fetched from that
+repository.
pull <repository> <remote-ref>::
Exactly like 'merge', but parallels 'git pull' in that
diff --git a/contrib/subtree/t/t7900-subtree.sh b/contrib/subtree/t/t7900-subtree.sh
index 1c1f76f04a..341c169eca 100755
--- a/contrib/subtree/t/t7900-subtree.sh
+++ b/contrib/subtree/t/t7900-subtree.sh
@@ -43,6 +43,30 @@ last_commit_subject () {
git log --pretty=format:%s -1
}
+# Upon 'git subtree add|merge --squash' of an annotated tag,
+# pre-2.32.0 versions of 'git subtree' would write the hash of the tag
+# (sub1 below), instead of the commit (sub1^{commit}) in the
+# "git-subtree-split" trailer.
+# We immitate this behaviour below using a replace ref.
+# This function creates 3 repositories:
+# - $1
+# - $1-sub (added as subtree "sub" in $1)
+# - $1-clone (clone of $1)
+test_create_pre2_32_repo () {
+ subtree_test_create_repo "$1" &&
+ subtree_test_create_repo "$1-sub" &&
+ test_commit -C "$1" main1 &&
+ test_commit -C "$1-sub" --annotate sub1 &&
+ git -C "$1" subtree add --prefix="sub" --squash "../$1-sub" sub1 &&
+ tag=$(git -C "$1" rev-parse FETCH_HEAD) &&
+ commit=$(git -C "$1" rev-parse FETCH_HEAD^{commit}) &&
+ git -C "$1" log -1 --format=%B HEAD^2 >msg &&
+ test_commit -C "$1-sub" --annotate sub2 &&
+ git clone --no-local "$1" "$1-clone" &&
+ new_commit=$(cat msg | sed -e "s/$commit/$tag/" | git -C "$1-clone" commit-tree HEAD^2^{tree}) &&
+ git -C "$1-clone" replace HEAD^2 $new_commit
+}
+
test_expect_success 'shows short help text for -h' '
test_expect_code 129 git subtree -h >out 2>err &&
test_must_be_empty err &&
@@ -264,6 +288,13 @@ test_expect_success 'merge new subproj history into subdir/ with a slash appende
)
'
+test_expect_success 'merge with --squash after annotated tag was added/merged with --squash pre-v2.32.0 ' '
+ test_create_pre2_32_repo "$test_count" &&
+ git -C "$test_count-clone" fetch "../$test_count-sub" sub2 &&
+ test_must_fail git -C "$test_count-clone" subtree merge --prefix="sub" --squash FETCH_HEAD &&
+ git -C "$test_count-clone" subtree merge --prefix="sub" --squash FETCH_HEAD "../$test_count-sub"
+'
+
#
# Tests for 'git subtree split'
#
@@ -277,7 +308,7 @@ test_expect_success 'split requires option --prefix' '
cd "$test_count" &&
git fetch ./"sub proj" HEAD &&
git subtree add --prefix="sub dir" FETCH_HEAD &&
- echo "You must provide the --prefix option." >expected &&
+ echo "fatal: you must provide the --prefix option." >expected &&
test_must_fail git subtree split >actual 2>&1 &&
test_debug "printf '"expected: "'" &&
test_debug "cat expected" &&
@@ -296,7 +327,7 @@ test_expect_success 'split requires path given by option --prefix must exist' '
cd "$test_count" &&
git fetch ./"sub proj" HEAD &&
git subtree add --prefix="sub dir" FETCH_HEAD &&
- echo "'\''non-existent-directory'\'' does not exist; use '\''git subtree add'\''" >expected &&
+ echo "fatal: '\''non-existent-directory'\'' does not exist; use '\''git subtree add'\''" >expected &&
test_must_fail git subtree split --prefix=non-existent-directory >actual 2>&1 &&
test_debug "printf '"expected: "'" &&
test_debug "cat expected" &&
@@ -551,6 +582,12 @@ test_expect_success 'split "sub dir"/ with --branch for an incompatible branch'
)
'
+test_expect_success 'split after annotated tag was added/merged with --squash pre-v2.32.0' '
+ test_create_pre2_32_repo "$test_count" &&
+ test_must_fail git -C "$test_count-clone" subtree split --prefix="sub" HEAD &&
+ git -C "$test_count-clone" subtree split --prefix="sub" HEAD "../$test_count-sub"
+'
+
#
# Tests for 'git subtree pull'
#
@@ -570,7 +607,7 @@ test_expect_success 'pull requires option --prefix' '
cd "$test_count" &&
test_must_fail git subtree pull ./"sub proj" HEAD >out 2>err &&
- echo "You must provide the --prefix option." >expected &&
+ echo "fatal: you must provide the --prefix option." >expected &&
test_must_be_empty out &&
test_cmp expected err
)
@@ -584,7 +621,7 @@ test_expect_success 'pull requires path given by option --prefix must exist' '
(
test_must_fail git subtree pull --prefix="sub dir" ./"sub proj" HEAD >out 2>err &&
- echo "'\''sub dir'\'' does not exist; use '\''git subtree add'\''" >expected &&
+ echo "fatal: '\''sub dir'\'' does not exist; use '\''git subtree add'\''" >expected &&
test_must_be_empty out &&
test_cmp expected err
)
@@ -630,6 +667,11 @@ test_expect_success 'pull rejects flags for split' '
)
'
+test_expect_success 'pull with --squash after annotated tag was added/merged with --squash pre-v2.32.0 ' '
+ test_create_pre2_32_repo "$test_count" &&
+ git -C "$test_count-clone" subtree -d pull --prefix="sub" --squash "../$test_count-sub" sub2
+'
+
#
# Tests for 'git subtree push'
#
@@ -643,7 +685,7 @@ test_expect_success 'push requires option --prefix' '
cd "$test_count" &&
git fetch ./"sub proj" HEAD &&
git subtree add --prefix="sub dir" FETCH_HEAD &&
- echo "You must provide the --prefix option." >expected &&
+ echo "fatal: you must provide the --prefix option." >expected &&
test_must_fail git subtree push "./sub proj" from-mainline >actual 2>&1 &&
test_debug "printf '"expected: "'" &&
test_debug "cat expected" &&
@@ -662,7 +704,7 @@ test_expect_success 'push requires path given by option --prefix must exist' '
cd "$test_count" &&
git fetch ./"sub proj" HEAD &&
git subtree add --prefix="sub dir" FETCH_HEAD &&
- echo "'\''non-existent-directory'\'' does not exist; use '\''git subtree add'\''" >expected &&
+ echo "fatal: '\''non-existent-directory'\'' does not exist; use '\''git subtree add'\''" >expected &&
test_must_fail git subtree push --prefix=non-existent-directory "./sub proj" from-mainline >actual 2>&1 &&
test_debug "printf '"expected: "'" &&
test_debug "cat expected" &&
@@ -953,6 +995,12 @@ test_expect_success 'push "sub dir"/ with a local rev' '
)
'
+test_expect_success 'push after annotated tag was added/merged with --squash pre-v2.32.0' '
+ test_create_pre2_32_repo "$test_count" &&
+ test_create_commit "$test_count-clone" sub/main-sub1 &&
+ git -C "$test_count-clone" subtree push --prefix="sub" "../$test_count-sub" from-mainline
+'
+
#
# Validity checking
#
diff --git a/convert.c b/convert.c
index 95e6a5244f..9b67649032 100644
--- a/convert.c
+++ b/convert.c
@@ -1549,7 +1549,7 @@ struct stream_filter {
struct stream_filter_vtbl *vtbl;
};
-static int null_filter_fn(struct stream_filter *filter,
+static int null_filter_fn(struct stream_filter *filter UNUSED,
const char *input, size_t *isize_p,
char *output, size_t *osize_p)
{
@@ -1568,7 +1568,7 @@ static int null_filter_fn(struct stream_filter *filter,
return 0;
}
-static void null_free_fn(struct stream_filter *filter)
+static void null_free_fn(struct stream_filter *filter UNUSED)
{
; /* nothing -- null instances are shared */
}
diff --git a/csum-file.c b/csum-file.c
index 59ef3398ca..3243473c3d 100644
--- a/csum-file.c
+++ b/csum-file.c
@@ -45,7 +45,8 @@ void hashflush(struct hashfile *f)
unsigned offset = f->offset;
if (offset) {
- the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
+ if (!f->skip_hash)
+ the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
flush(f, f->buffer, offset);
f->offset = 0;
}
@@ -64,7 +65,12 @@ int finalize_hashfile(struct hashfile *f, unsigned char *result,
int fd;
hashflush(f);
- the_hash_algo->final_fn(f->buffer, &f->ctx);
+
+ if (f->skip_hash)
+ memset(f->buffer, 0, the_hash_algo->rawsz);
+ else
+ the_hash_algo->final_fn(f->buffer, &f->ctx);
+
if (result)
hashcpy(result, f->buffer);
if (flags & CSUM_HASH_IN_STREAM)
@@ -108,7 +114,8 @@ void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
* the hashfile's buffer. In this block,
* f->offset is necessarily zero.
*/
- the_hash_algo->update_fn(&f->ctx, buf, nr);
+ if (!f->skip_hash)
+ the_hash_algo->update_fn(&f->ctx, buf, nr);
flush(f, buf, nr);
} else {
/*
@@ -153,6 +160,7 @@ static struct hashfile *hashfd_internal(int fd, const char *name,
f->tp = tp;
f->name = name;
f->do_crc = 0;
+ f->skip_hash = 0;
the_hash_algo->init_fn(&f->ctx);
f->buffer_len = buffer_len;
diff --git a/csum-file.h b/csum-file.h
index 0d29f528fb..29468067f8 100644
--- a/csum-file.h
+++ b/csum-file.h
@@ -20,6 +20,13 @@ struct hashfile {
size_t buffer_len;
unsigned char *buffer;
unsigned char *check_buffer;
+
+ /**
+ * If set to 1, skip_hash indicates that we should
+ * not actually compute the hash for this hashfile and
+ * instead only use it as a buffered write.
+ */
+ unsigned int skip_hash;
};
/* Checkpoint */
diff --git a/date.c b/date.c
index 68a260c214..53bd6a7932 100644
--- a/date.c
+++ b/date.c
@@ -1101,7 +1101,7 @@ static void date_tea(struct tm *tm, struct tm *now, int *num)
date_time(tm, now, 17);
}
-static void date_pm(struct tm *tm, struct tm *now, int *num)
+static void date_pm(struct tm *tm, struct tm *now UNUSED, int *num)
{
int hour, n = *num;
*num = 0;
@@ -1115,7 +1115,7 @@ static void date_pm(struct tm *tm, struct tm *now, int *num)
tm->tm_hour = (hour % 12) + 12;
}
-static void date_am(struct tm *tm, struct tm *now, int *num)
+static void date_am(struct tm *tm, struct tm *now UNUSED, int *num)
{
int hour, n = *num;
*num = 0;
@@ -1129,7 +1129,7 @@ static void date_am(struct tm *tm, struct tm *now, int *num)
tm->tm_hour = (hour % 12);
}
-static void date_never(struct tm *tm, struct tm *now, int *num)
+static void date_never(struct tm *tm, struct tm *now UNUSED, int *num)
{
time_t n = 0;
localtime_r(&n, tm);
diff --git a/delta-islands.c b/delta-islands.c
index 26f9e99e1a..90c0d6958f 100644
--- a/delta-islands.c
+++ b/delta-islands.c
@@ -26,8 +26,6 @@ static kh_oid_map_t *island_marks;
static unsigned island_counter;
static unsigned island_counter_core;
-static kh_str_t *remote_islands;
-
struct remote_island {
uint64_t hash;
struct oid_array oids;
@@ -312,29 +310,55 @@ void resolve_tree_islands(struct repository *r,
free(todo);
}
-static regex_t *island_regexes;
-static unsigned int island_regexes_alloc, island_regexes_nr;
+struct island_load_data {
+ kh_str_t *remote_islands;
+ regex_t *rx;
+ size_t nr;
+ size_t alloc;
+};
static const char *core_island_name;
-static int island_config_callback(const char *k, const char *v, void *cb UNUSED)
+static void free_config_regexes(struct island_load_data *ild)
{
+ for (size_t i = 0; i < ild->nr; i++)
+ regfree(&ild->rx[i]);
+ free(ild->rx);
+}
+
+static void free_remote_islands(kh_str_t *remote_islands)
+{
+ const char *island_name;
+ struct remote_island *rl;
+
+ kh_foreach(remote_islands, island_name, rl, {
+ free((void *)island_name);
+ oid_array_clear(&rl->oids);
+ free(rl);
+ });
+ kh_destroy_str(remote_islands);
+}
+
+static int island_config_callback(const char *k, const char *v, void *cb)
+{
+ struct island_load_data *ild = cb;
+
if (!strcmp(k, "pack.island")) {
struct strbuf re = STRBUF_INIT;
if (!v)
return config_error_nonbool(k);
- ALLOC_GROW(island_regexes, island_regexes_nr + 1, island_regexes_alloc);
+ ALLOC_GROW(ild->rx, ild->nr + 1, ild->alloc);
if (*v != '^')
strbuf_addch(&re, '^');
strbuf_addstr(&re, v);
- if (regcomp(&island_regexes[island_regexes_nr], re.buf, REG_EXTENDED))
+ if (regcomp(&ild->rx[ild->nr], re.buf, REG_EXTENDED))
die(_("failed to load island regex for '%s': %s"), k, re.buf);
strbuf_release(&re);
- island_regexes_nr++;
+ ild->nr++;
return 0;
}
@@ -344,7 +368,8 @@ static int island_config_callback(const char *k, const char *v, void *cb UNUSED)
return 0;
}
-static void add_ref_to_island(const char *island_name, const struct object_id *oid)
+static void add_ref_to_island(kh_str_t *remote_islands, const char *island_name,
+ const struct object_id *oid)
{
uint64_t sha_core;
struct remote_island *rl = NULL;
@@ -365,8 +390,10 @@ static void add_ref_to_island(const char *island_name, const struct object_id *o
}
static int find_island_for_ref(const char *refname, const struct object_id *oid,
- int flags UNUSED, void *data UNUSED)
+ int flags UNUSED, void *cb)
{
+ struct island_load_data *ild = cb;
+
/*
* We should advertise 'ARRAY_SIZE(matches) - 2' as the max,
* so we can diagnose below a config with more capture groups
@@ -377,8 +404,8 @@ static int find_island_for_ref(const char *refname, const struct object_id *oid,
struct strbuf island_name = STRBUF_INIT;
/* walk backwards to get last-one-wins ordering */
- for (i = island_regexes_nr - 1; i >= 0; i--) {
- if (!regexec(&island_regexes[i], refname,
+ for (i = ild->nr - 1; i >= 0; i--) {
+ if (!regexec(&ild->rx[i], refname,
ARRAY_SIZE(matches), matches, 0))
break;
}
@@ -403,12 +430,12 @@ static int find_island_for_ref(const char *refname, const struct object_id *oid,
strbuf_add(&island_name, refname + match->rm_so, match->rm_eo - match->rm_so);
}
- add_ref_to_island(island_name.buf, oid);
+ add_ref_to_island(ild->remote_islands, island_name.buf, oid);
strbuf_release(&island_name);
return 0;
}
-static struct remote_island *get_core_island(void)
+static struct remote_island *get_core_island(kh_str_t *remote_islands)
{
if (core_island_name) {
khiter_t pos = kh_get_str(remote_islands, core_island_name);
@@ -419,7 +446,7 @@ static struct remote_island *get_core_island(void)
return NULL;
}
-static void deduplicate_islands(struct repository *r)
+static void deduplicate_islands(kh_str_t *remote_islands, struct repository *r)
{
struct remote_island *island, *core = NULL, **list;
unsigned int island_count, dst, src, ref, i = 0;
@@ -445,7 +472,7 @@ static void deduplicate_islands(struct repository *r)
}
island_bitmap_size = (island_count / 32) + 1;
- core = get_core_island();
+ core = get_core_island(remote_islands);
for (i = 0; i < island_count; ++i) {
mark_remote_island_1(r, list[i], core && list[i]->hash == core->hash);
@@ -456,12 +483,16 @@ static void deduplicate_islands(struct repository *r)
void load_delta_islands(struct repository *r, int progress)
{
+ struct island_load_data ild = { 0 };
+
island_marks = kh_init_oid_map();
- remote_islands = kh_init_str();
- git_config(island_config_callback, NULL);
- for_each_ref(find_island_for_ref, NULL);
- deduplicate_islands(r);
+ git_config(island_config_callback, &ild);
+ ild.remote_islands = kh_init_str();
+ for_each_ref(find_island_for_ref, &ild);
+ free_config_regexes(&ild);
+ deduplicate_islands(ild.remote_islands, r);
+ free_remote_islands(ild.remote_islands);
if (progress)
fprintf(stderr, _("Marked %d islands, done.\n"), island_counter);
diff --git a/diff-lib.c b/diff-lib.c
index 2edea41a23..b48f155736 100644
--- a/diff-lib.c
+++ b/diff-lib.c
@@ -14,6 +14,7 @@
#include "dir.h"
#include "fsmonitor.h"
#include "commit-reach.h"
+#include "config.h"
/*
* diff-files
@@ -53,7 +54,7 @@ static int check_removed(const struct index_state *istate, const struct cache_en
* a directory --- the blob was removed!
*/
if (!S_ISGITLINK(ce->ce_mode) &&
- resolve_gitlink_ref(ce->name, "HEAD", &sub))
+ resolve_gitlink_ref(ce->name, "HEAD", &sub, NULL))
return 1;
}
return 0;
@@ -65,26 +66,46 @@ static int check_removed(const struct index_state *istate, const struct cache_en
* Return 1 when changes are detected, 0 otherwise. If the DIRTY_SUBMODULES
* option is set, the caller does not only want to know if a submodule is
* modified at all but wants to know all the conditions that are met (new
- * commits, untracked content and/or modified content).
+ * commits, untracked content and/or modified content). If
+ * defer_submodule_status bit is set, dirty_submodule will be left to the
+ * caller to set. defer_submodule_status can also be set to 0 in this
+ * function if there is no need to check if the submodule is modified.
*/
static int match_stat_with_submodule(struct diff_options *diffopt,
const struct cache_entry *ce,
struct stat *st, unsigned ce_option,
- unsigned *dirty_submodule)
+ unsigned *dirty_submodule, int *defer_submodule_status,
+ unsigned *ignore_untracked)
{
int changed = ie_match_stat(diffopt->repo->index, ce, st, ce_option);
- if (S_ISGITLINK(ce->ce_mode)) {
- struct diff_flags orig_flags = diffopt->flags;
- if (!diffopt->flags.override_submodule_config)
- set_diffopt_flags_from_submodule_config(diffopt, ce->name);
- if (diffopt->flags.ignore_submodules)
- changed = 0;
- else if (!diffopt->flags.ignore_dirty_submodules &&
- (!changed || diffopt->flags.dirty_submodules))
+ struct diff_flags orig_flags;
+ int defer = 0;
+
+ if (!S_ISGITLINK(ce->ce_mode))
+ goto ret;
+
+ orig_flags = diffopt->flags;
+ if (!diffopt->flags.override_submodule_config)
+ set_diffopt_flags_from_submodule_config(diffopt, ce->name);
+ if (diffopt->flags.ignore_submodules) {
+ changed = 0;
+ goto cleanup;
+ }
+ if (!diffopt->flags.ignore_dirty_submodules &&
+ (!changed || diffopt->flags.dirty_submodules)) {
+ if (defer_submodule_status && *defer_submodule_status) {
+ defer = 1;
+ *ignore_untracked = diffopt->flags.ignore_untracked_in_submodules;
+ } else {
*dirty_submodule = is_submodule_modified(ce->name,
- diffopt->flags.ignore_untracked_in_submodules);
- diffopt->flags = orig_flags;
+ diffopt->flags.ignore_untracked_in_submodules);
+ }
}
+cleanup:
+ diffopt->flags = orig_flags;
+ret:
+ if (defer_submodule_status)
+ *defer_submodule_status = defer;
return changed;
}
@@ -96,6 +117,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
? CE_MATCH_RACY_IS_DIRTY : 0);
uint64_t start = getnanotime();
struct index_state *istate = revs->diffopt.repo->index;
+ struct string_list submodules = STRING_LIST_INIT_NODUP;
diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/");
@@ -220,6 +242,8 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
newmode = ce->ce_mode;
} else {
struct stat st;
+ unsigned ignore_untracked = 0;
+ int defer_submodule_status = !!revs->repo;
changed = check_removed(istate, ce, &st);
if (changed) {
@@ -241,8 +265,25 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
}
changed = match_stat_with_submodule(&revs->diffopt, ce, &st,
- ce_option, &dirty_submodule);
+ ce_option, &dirty_submodule,
+ &defer_submodule_status,
+ &ignore_untracked);
newmode = ce_mode_from_stat(ce, st.st_mode);
+ if (defer_submodule_status) {
+ struct submodule_status_util tmp = {
+ .changed = changed,
+ .dirty_submodule = 0,
+ .ignore_untracked = ignore_untracked,
+ .newmode = newmode,
+ .ce = ce,
+ };
+ struct string_list_item *item;
+
+ item = string_list_append(&submodules, ce->name);
+ item->util = xmalloc(sizeof(tmp));
+ memcpy(item->util, &tmp, sizeof(tmp));
+ continue;
+ }
}
if (!changed && !dirty_submodule) {
@@ -261,6 +302,40 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
ce->name, 0, dirty_submodule);
}
+ if (submodules.nr > 0) {
+ int parallel_jobs;
+ if (git_config_get_int("submodule.diffjobs", &parallel_jobs))
+ parallel_jobs = 1;
+ else if (!parallel_jobs)
+ parallel_jobs = online_cpus();
+ else if (parallel_jobs < 0)
+ die(_("submodule.diffjobs cannot be negative"));
+
+ if (get_submodules_status(revs->repo, &submodules, parallel_jobs))
+ die(_("submodule status failed"));
+ for (size_t i = 0; i < submodules.nr; i++) {
+ struct submodule_status_util *util = submodules.items[i].util;
+ struct cache_entry *ce = util->ce;
+ unsigned int oldmode;
+ const struct object_id *old_oid, *new_oid;
+
+ if (!util->changed && !util->dirty_submodule) {
+ ce_mark_uptodate(ce);
+ mark_fsmonitor_valid(istate, ce);
+ if (!revs->diffopt.flags.find_copies_harder)
+ continue;
+ }
+ oldmode = ce->ce_mode;
+ old_oid = &ce->oid;
+ new_oid = util->changed ? null_oid() : &ce->oid;
+ diff_change(&revs->diffopt, oldmode, util->newmode,
+ old_oid, new_oid,
+ !is_null_oid(old_oid),
+ !is_null_oid(new_oid),
+ ce->name, 0, util->dirty_submodule);
+ }
+ }
+ string_list_clear(&submodules, 1);
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
trace_performance_since(start, "diff-files");
@@ -308,7 +383,7 @@ static int get_stat_data(const struct index_state *istate,
return -1;
}
changed = match_stat_with_submodule(diffopt, ce, &st,
- 0, dirty_submodule);
+ 0, dirty_submodule, NULL, NULL);
if (changed) {
mode = ce_mode_from_stat(ce, st.st_mode);
oid = null_oid();
diff --git a/diff-merges.c b/diff-merges.c
index 7f64156b8b..85cbefa5af 100644
--- a/diff-merges.c
+++ b/diff-merges.c
@@ -20,9 +20,20 @@ static void suppress(struct rev_info *revs)
revs->remerge_diff = 0;
}
-static void set_separate(struct rev_info *revs)
+static void common_setup(struct rev_info *revs)
{
suppress(revs);
+ revs->merges_need_diff = 1;
+}
+
+static void set_none(struct rev_info *revs)
+{
+ suppress(revs);
+}
+
+static void set_separate(struct rev_info *revs)
+{
+ common_setup(revs);
revs->separate_merges = 1;
revs->simplify_history = 0;
}
@@ -35,21 +46,21 @@ static void set_first_parent(struct rev_info *revs)
static void set_combined(struct rev_info *revs)
{
- suppress(revs);
+ common_setup(revs);
revs->combine_merges = 1;
revs->dense_combined_merges = 0;
}
static void set_dense_combined(struct rev_info *revs)
{
- suppress(revs);
+ common_setup(revs);
revs->combine_merges = 1;
revs->dense_combined_merges = 1;
}
static void set_remerge_diff(struct rev_info *revs)
{
- suppress(revs);
+ common_setup(revs);
revs->remerge_diff = 1;
revs->simplify_history = 0;
}
@@ -57,18 +68,18 @@ static void set_remerge_diff(struct rev_info *revs)
static diff_merges_setup_func_t func_by_opt(const char *optarg)
{
if (!strcmp(optarg, "off") || !strcmp(optarg, "none"))
- return suppress;
+ return set_none;
if (!strcmp(optarg, "1") || !strcmp(optarg, "first-parent"))
return set_first_parent;
- else if (!strcmp(optarg, "separate"))
+ if (!strcmp(optarg, "separate"))
return set_separate;
- else if (!strcmp(optarg, "c") || !strcmp(optarg, "combined"))
+ if (!strcmp(optarg, "c") || !strcmp(optarg, "combined"))
return set_combined;
- else if (!strcmp(optarg, "cc") || !strcmp(optarg, "dense-combined"))
+ if (!strcmp(optarg, "cc") || !strcmp(optarg, "dense-combined"))
return set_dense_combined;
- else if (!strcmp(optarg, "r") || !strcmp(optarg, "remerge"))
+ if (!strcmp(optarg, "r") || !strcmp(optarg, "remerge"))
return set_remerge_diff;
- else if (!strcmp(optarg, "m") || !strcmp(optarg, "on"))
+ if (!strcmp(optarg, "m") || !strcmp(optarg, "on"))
return set_to_default;
return NULL;
}
@@ -81,10 +92,6 @@ static void set_diff_merges(struct rev_info *revs, const char *optarg)
die(_("invalid value for '%s': '%s'"), "--diff-merges", optarg);
func(revs);
-
- /* NOTE: the merges_need_diff flag is cleared by func() call */
- if (func != suppress)
- revs->merges_need_diff = 1;
}
/*
@@ -115,6 +122,7 @@ int diff_merges_parse_opts(struct rev_info *revs, const char **argv)
if (!suppress_m_parsing && !strcmp(arg, "-m")) {
set_to_default(revs);
+ revs->merges_need_diff = 0;
} else if (!strcmp(arg, "-c")) {
set_combined(revs);
revs->merges_imply_patch = 1;
@@ -125,7 +133,7 @@ int diff_merges_parse_opts(struct rev_info *revs, const char **argv)
set_remerge_diff(revs);
revs->merges_imply_patch = 1;
} else if (!strcmp(arg, "--no-diff-merges")) {
- suppress(revs);
+ set_none(revs);
} else if (!strcmp(arg, "--combined-all-paths")) {
revs->combined_all_paths = 1;
} else if ((argcount = parse_long_opt("diff-merges", argv, &optarg))) {
@@ -139,7 +147,7 @@ int diff_merges_parse_opts(struct rev_info *revs, const char **argv)
void diff_merges_suppress(struct rev_info *revs)
{
- suppress(revs);
+ set_none(revs);
}
void diff_merges_default_to_first_parent(struct rev_info *revs)
diff --git a/diff.c b/diff.c
index 648f6717a5..9f9a92ec9d 100644
--- a/diff.c
+++ b/diff.c
@@ -2488,6 +2488,9 @@ static int diffstat_consume(void *priv, char *line, unsigned long len)
struct diffstat_t *diffstat = priv;
struct diffstat_file *x = diffstat->files[diffstat->nr - 1];
+ if (!len)
+ BUG("xdiff fed us an empty line");
+
if (line[0] == '+')
x->added++;
else if (line[0] == '-')
@@ -2621,7 +2624,7 @@ static void show_stats(struct diffstat_t *data, struct diff_options *options)
continue;
}
fill_print_name(file);
- len = strlen(file->print_name);
+ len = utf8_strwidth(file->print_name);
if (max_len < len)
max_len = len;
@@ -2674,6 +2677,11 @@ static void show_stats(struct diffstat_t *data, struct diff_options *options)
* making the line longer than the maximum width.
*/
+ /*
+ * NEEDSWORK: line_prefix is often used for "log --graph" output
+ * and contains ANSI-colored string. utf8_strnwidth() should be
+ * used to correctly count the display width instead of strlen().
+ */
if (options->stat_width == -1)
width = term_columns() - strlen(line_prefix);
else
@@ -2735,7 +2743,7 @@ static void show_stats(struct diffstat_t *data, struct diff_options *options)
char *name = file->print_name;
uintmax_t added = file->added;
uintmax_t deleted = file->deleted;
- int name_len;
+ int name_len, padding;
if (!file->is_interesting && (added + deleted == 0))
continue;
@@ -2744,20 +2752,34 @@ static void show_stats(struct diffstat_t *data, struct diff_options *options)
* "scale" the filename
*/
len = name_width;
- name_len = strlen(name);
+ name_len = utf8_strwidth(name);
if (name_width < name_len) {
char *slash;
prefix = "...";
len -= 3;
+ /*
+ * NEEDSWORK: (name_len - len) counts the display
+ * width, which would be shorter than the byte
+ * length of the corresponding substring.
+ * Advancing "name" by that number of bytes does
+ * *NOT* skip over that many columns, so it is
+ * very likely that chomping the pathname at the
+ * slash we will find starting from "name" will
+ * leave the resulting string still too long.
+ */
name += name_len - len;
slash = strchr(name, '/');
if (slash)
name = slash;
}
+ padding = len - utf8_strwidth(name);
+ if (padding < 0)
+ padding = 0;
if (file->is_binary) {
- strbuf_addf(&out, " %s%-*s |", prefix, len, name);
- strbuf_addf(&out, " %*s", number_width, "Bin");
+ strbuf_addf(&out, " %s%s%*s | %*s",
+ prefix, name, padding, "",
+ number_width, "Bin");
if (!added && !deleted) {
strbuf_addch(&out, '\n');
emit_diff_symbol(options, DIFF_SYMBOL_STATS_LINE,
@@ -2777,8 +2799,9 @@ static void show_stats(struct diffstat_t *data, struct diff_options *options)
continue;
}
else if (file->is_unmerged) {
- strbuf_addf(&out, " %s%-*s |", prefix, len, name);
- strbuf_addstr(&out, " Unmerged\n");
+ strbuf_addf(&out, " %s%s%*s | %*s",
+ prefix, name, padding, "",
+ number_width, "Unmerged");
emit_diff_symbol(options, DIFF_SYMBOL_STATS_LINE,
out.buf, out.len, 0);
strbuf_reset(&out);
@@ -2804,10 +2827,10 @@ static void show_stats(struct diffstat_t *data, struct diff_options *options)
add = total - del;
}
}
- strbuf_addf(&out, " %s%-*s |", prefix, len, name);
- strbuf_addf(&out, " %*"PRIuMAX"%s",
- number_width, added + deleted,
- added + deleted ? " " : "");
+ strbuf_addf(&out, " %s%s%*s | %*"PRIuMAX"%s",
+ prefix, name, padding, "",
+ number_width, added + deleted,
+ added + deleted ? " " : "");
show_graph(&out, '+', add, add_c, reset);
show_graph(&out, '-', del, del_c, reset);
strbuf_addch(&out, '\n');
@@ -4278,35 +4301,34 @@ static void run_external_diff(const char *pgm,
const char *xfrm_msg,
struct diff_options *o)
{
- struct strvec argv = STRVEC_INIT;
- struct strvec env = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
struct diff_queue_struct *q = &diff_queued_diff;
- strvec_push(&argv, pgm);
- strvec_push(&argv, name);
+ strvec_push(&cmd.args, pgm);
+ strvec_push(&cmd.args, name);
if (one && two) {
- add_external_diff_name(o->repo, &argv, name, one);
+ add_external_diff_name(o->repo, &cmd.args, name, one);
if (!other)
- add_external_diff_name(o->repo, &argv, name, two);
+ add_external_diff_name(o->repo, &cmd.args, name, two);
else {
- add_external_diff_name(o->repo, &argv, other, two);
- strvec_push(&argv, other);
- strvec_push(&argv, xfrm_msg);
+ add_external_diff_name(o->repo, &cmd.args, other, two);
+ strvec_push(&cmd.args, other);
+ strvec_push(&cmd.args, xfrm_msg);
}
}
- strvec_pushf(&env, "GIT_DIFF_PATH_COUNTER=%d", ++o->diff_path_counter);
- strvec_pushf(&env, "GIT_DIFF_PATH_TOTAL=%d", q->nr);
+ strvec_pushf(&cmd.env, "GIT_DIFF_PATH_COUNTER=%d",
+ ++o->diff_path_counter);
+ strvec_pushf(&cmd.env, "GIT_DIFF_PATH_TOTAL=%d", q->nr);
diff_free_filespec_data(one);
diff_free_filespec_data(two);
- if (run_command_v_opt_cd_env(argv.v, RUN_USING_SHELL, NULL, env.v))
+ cmd.use_shell = 1;
+ if (run_command(&cmd))
die(_("external diff died, stopping at %s"), name);
remove_tempfile();
- strvec_clear(&argv);
- strvec_clear(&env);
}
static int similarity_index(struct diff_filepair *p)
@@ -6206,7 +6228,7 @@ static void patch_id_add_mode(git_hash_ctx *ctx, unsigned mode)
}
/* returns 0 upon success, and writes result into oid */
-static int diff_get_patch_id(struct diff_options *options, struct object_id *oid, int diff_header_only, int stable)
+static int diff_get_patch_id(struct diff_options *options, struct object_id *oid, int diff_header_only)
{
struct diff_queue_struct *q = &diff_queued_diff;
int i;
@@ -6253,61 +6275,62 @@ static int diff_get_patch_id(struct diff_options *options, struct object_id *oid
if (p->one->mode == 0) {
patch_id_add_string(&ctx, "newfilemode");
patch_id_add_mode(&ctx, p->two->mode);
- patch_id_add_string(&ctx, "---/dev/null");
- patch_id_add_string(&ctx, "+++b/");
- the_hash_algo->update_fn(&ctx, p->two->path, len2);
} else if (p->two->mode == 0) {
patch_id_add_string(&ctx, "deletedfilemode");
patch_id_add_mode(&ctx, p->one->mode);
- patch_id_add_string(&ctx, "---a/");
- the_hash_algo->update_fn(&ctx, p->one->path, len1);
- patch_id_add_string(&ctx, "+++/dev/null");
- } else {
- patch_id_add_string(&ctx, "---a/");
- the_hash_algo->update_fn(&ctx, p->one->path, len1);
- patch_id_add_string(&ctx, "+++b/");
- the_hash_algo->update_fn(&ctx, p->two->path, len2);
+ } else if (p->one->mode != p->two->mode) {
+ patch_id_add_string(&ctx, "oldmode");
+ patch_id_add_mode(&ctx, p->one->mode);
+ patch_id_add_string(&ctx, "newmode");
+ patch_id_add_mode(&ctx, p->two->mode);
}
- if (diff_header_only)
- continue;
-
- if (fill_mmfile(options->repo, &mf1, p->one) < 0 ||
- fill_mmfile(options->repo, &mf2, p->two) < 0)
- return error("unable to read files to diff");
-
- if (diff_filespec_is_binary(options->repo, p->one) ||
+ if (diff_header_only) {
+ /* don't do anything since we're only populating header info */
+ } else if (diff_filespec_is_binary(options->repo, p->one) ||
diff_filespec_is_binary(options->repo, p->two)) {
the_hash_algo->update_fn(&ctx, oid_to_hex(&p->one->oid),
the_hash_algo->hexsz);
the_hash_algo->update_fn(&ctx, oid_to_hex(&p->two->oid),
the_hash_algo->hexsz);
- continue;
- }
-
- xpp.flags = 0;
- xecfg.ctxlen = 3;
- xecfg.flags = XDL_EMIT_NO_HUNK_HDR;
- if (xdi_diff_outf(&mf1, &mf2, NULL,
- patch_id_consume, &data, &xpp, &xecfg))
- return error("unable to generate patch-id diff for %s",
- p->one->path);
+ } else {
+ if (p->one->mode == 0) {
+ patch_id_add_string(&ctx, "---/dev/null");
+ patch_id_add_string(&ctx, "+++b/");
+ the_hash_algo->update_fn(&ctx, p->two->path, len2);
+ } else if (p->two->mode == 0) {
+ patch_id_add_string(&ctx, "---a/");
+ the_hash_algo->update_fn(&ctx, p->one->path, len1);
+ patch_id_add_string(&ctx, "+++/dev/null");
+ } else {
+ patch_id_add_string(&ctx, "---a/");
+ the_hash_algo->update_fn(&ctx, p->one->path, len1);
+ patch_id_add_string(&ctx, "+++b/");
+ the_hash_algo->update_fn(&ctx, p->two->path, len2);
+ }
- if (stable)
- flush_one_hunk(oid, &ctx);
+ if (fill_mmfile(options->repo, &mf1, p->one) < 0 ||
+ fill_mmfile(options->repo, &mf2, p->two) < 0)
+ return error("unable to read files to diff");
+ xpp.flags = 0;
+ xecfg.ctxlen = 3;
+ xecfg.flags = XDL_EMIT_NO_HUNK_HDR;
+ if (xdi_diff_outf(&mf1, &mf2, NULL,
+ patch_id_consume, &data, &xpp, &xecfg))
+ return error("unable to generate patch-id diff for %s",
+ p->one->path);
+ }
+ flush_one_hunk(oid, &ctx);
}
- if (!stable)
- the_hash_algo->final_oid_fn(oid, &ctx);
-
return 0;
}
-int diff_flush_patch_id(struct diff_options *options, struct object_id *oid, int diff_header_only, int stable)
+int diff_flush_patch_id(struct diff_options *options, struct object_id *oid, int diff_header_only)
{
struct diff_queue_struct *q = &diff_queued_diff;
int i;
- int result = diff_get_patch_id(options, oid, diff_header_only, stable);
+ int result = diff_get_patch_id(options, oid, diff_header_only);
for (i = 0; i < q->nr; i++)
diff_free_filepair(q->queue[i]);
diff --git a/diff.h b/diff.h
index 8ae18e5ab1..fd33caeb25 100644
--- a/diff.h
+++ b/diff.h
@@ -634,7 +634,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option);
int run_diff_index(struct rev_info *revs, unsigned int option);
int do_diff_cache(const struct object_id *, struct diff_options *);
-int diff_flush_patch_id(struct diff_options *, struct object_id *, int, int);
+int diff_flush_patch_id(struct diff_options *, struct object_id *, int);
void flush_one_hunk(struct object_id *result, git_hash_ctx *ctx);
int diff_result_code(struct diff_options *, int);
diff --git a/diffcore-pickaxe.c b/diffcore-pickaxe.c
index c88e50c632..03fcbcb40b 100644
--- a/diffcore-pickaxe.c
+++ b/diffcore-pickaxe.c
@@ -38,7 +38,7 @@ static int diffgrep_consume(void *priv, char *line, unsigned long len)
static int diff_grep(mmfile_t *one, mmfile_t *two,
struct diff_options *o,
- regex_t *regexp, kwset_t kws)
+ regex_t *regexp, kwset_t kws UNUSED)
{
struct diffgrep_cb ecbdata;
xpparam_t xpp;
@@ -114,7 +114,7 @@ static unsigned int contains(mmfile_t *mf, regex_t *regexp, kwset_t kws,
}
static int has_changes(mmfile_t *one, mmfile_t *two,
- struct diff_options *o,
+ struct diff_options *o UNUSED,
regex_t *regexp, kwset_t kws)
{
unsigned int c1 = one ? contains(one, regexp, kws, 0) : 0;
diff --git a/dir.c b/dir.c
index 7542950820..703e9d72f2 100644
--- a/dir.c
+++ b/dir.c
@@ -669,9 +669,7 @@ int pl_hashmap_cmp(const void *cmp_data UNUSED,
? ee1->patternlen
: ee2->patternlen;
- if (ignore_case)
- return strncasecmp(ee1->pattern, ee2->pattern, min_len);
- return strncmp(ee1->pattern, ee2->pattern, min_len);
+ return fspathncmp(ee1->pattern, ee2->pattern, min_len);
}
static char *dup_and_filter_pattern(const char *pattern)
@@ -3253,7 +3251,7 @@ static int remove_dir_recurse(struct strbuf *path, int flag, int *kept_up)
struct object_id submodule_head;
if ((flag & REMOVE_DIR_KEEP_NESTED_GIT) &&
- !resolve_gitlink_ref(path->buf, "HEAD", &submodule_head)) {
+ !resolve_gitlink_ref(path->buf, "HEAD", &submodule_head, NULL)) {
/* Do not descend and nuke a nested git work tree. */
if (kept_up)
*kept_up = 1;
@@ -3583,8 +3581,12 @@ static void free_untracked(struct untracked_cache_dir *ucd)
void free_untracked_cache(struct untracked_cache *uc)
{
- if (uc)
- free_untracked(uc->root);
+ if (!uc)
+ return;
+
+ free(uc->exclude_per_dir_to_free);
+ strbuf_release(&uc->ident);
+ free_untracked(uc->root);
free(uc);
}
@@ -3741,7 +3743,7 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
next + offset + hashsz);
uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
exclude_per_dir = (const char *)next + exclude_per_dir_offset;
- uc->exclude_per_dir = xstrdup(exclude_per_dir);
+ uc->exclude_per_dir = uc->exclude_per_dir_to_free = xstrdup(exclude_per_dir);
/* NUL after exclude_per_dir is covered by sizeof(*ouc) */
next += exclude_per_dir_offset + strlen(exclude_per_dir) + 1;
if (next >= end)
diff --git a/dir.h b/dir.h
index 674747d93a..8acfc04418 100644
--- a/dir.h
+++ b/dir.h
@@ -188,6 +188,7 @@ struct untracked_cache {
struct oid_stat ss_info_exclude;
struct oid_stat ss_excludes_file;
const char *exclude_per_dir;
+ char *exclude_per_dir_to_free;
struct strbuf ident;
/*
* dir_struct#flags must match dir_flags or the untracked
diff --git a/exec-cmd.c b/exec-cmd.c
index eeb2ee52b8..0232bbc990 100644
--- a/exec-cmd.c
+++ b/exec-cmd.c
@@ -252,7 +252,7 @@ static const char *system_prefix(void)
* This is called during initialization, but No work needs to be done here when
* runtime prefix is not being used.
*/
-void git_resolve_executable_dir(const char *argv0)
+void git_resolve_executable_dir(const char *argv0 UNUSED)
{
}
diff --git a/fsck.h b/fsck.h
index 6f801e53b1..6fbce68ad6 100644
--- a/fsck.h
+++ b/fsck.h
@@ -13,6 +13,12 @@ enum fsck_msg_type {
FSCK_WARN,
};
+/*
+ * Documentation/fsck-msgids.txt documents these; when
+ * modifying this list in any way, make sure to keep the
+ * two in sync.
+ */
+
#define FOREACH_FSCK_MSG_ID(FUNC) \
/* fatal errors */ \
FUNC(NUL_IN_HEADER, FATAL) \
@@ -24,7 +30,6 @@ enum fsck_msg_type {
FUNC(BAD_NAME, ERROR) \
FUNC(BAD_OBJECT_SHA1, ERROR) \
FUNC(BAD_PARENT_SHA1, ERROR) \
- FUNC(BAD_TAG_OBJECT, ERROR) \
FUNC(BAD_TIMEZONE, ERROR) \
FUNC(BAD_TREE, ERROR) \
FUNC(BAD_TREE_SHA1, ERROR) \
@@ -40,7 +45,6 @@ enum fsck_msg_type {
FUNC(MISSING_TAG, ERROR) \
FUNC(MISSING_TAG_ENTRY, ERROR) \
FUNC(MISSING_TREE, ERROR) \
- FUNC(MISSING_TREE_OBJECT, ERROR) \
FUNC(MISSING_TYPE, ERROR) \
FUNC(MISSING_TYPE_ENTRY, ERROR) \
FUNC(MULTIPLE_AUTHORS, ERROR) \
diff --git a/fsmonitor--daemon.h b/fsmonitor--daemon.h
index 2102a5c9ff..e24838f9a8 100644
--- a/fsmonitor--daemon.h
+++ b/fsmonitor--daemon.h
@@ -8,6 +8,7 @@
#include "run-command.h"
#include "simple-ipc.h"
#include "thread-utils.h"
+#include "fsmonitor-path-utils.h"
struct fsmonitor_batch;
struct fsmonitor_token_data;
@@ -43,6 +44,7 @@ struct fsmonitor_daemon_state {
struct strbuf path_worktree_watch;
struct strbuf path_gitdir_watch;
+ struct alias_info alias;
int nr_paths_watching;
struct fsmonitor_token_data *current_token_data;
@@ -59,6 +61,7 @@ struct fsmonitor_daemon_state {
struct ipc_server_data *ipc_server_data;
struct strbuf path_ipc;
+
};
/*
diff --git a/fsmonitor-ipc.c b/fsmonitor-ipc.c
index 789e7397ba..19d772f0f3 100644
--- a/fsmonitor-ipc.c
+++ b/fsmonitor-ipc.c
@@ -18,7 +18,7 @@ int fsmonitor_ipc__is_supported(void)
return 0;
}
-const char *fsmonitor_ipc__get_path(void)
+const char *fsmonitor_ipc__get_path(struct repository *r)
{
return NULL;
}
@@ -47,19 +47,21 @@ int fsmonitor_ipc__is_supported(void)
return 1;
}
-GIT_PATH_FUNC(fsmonitor_ipc__get_path, "fsmonitor--daemon.ipc")
-
enum ipc_active_state fsmonitor_ipc__get_state(void)
{
- return ipc_get_active_state(fsmonitor_ipc__get_path());
+ return ipc_get_active_state(fsmonitor_ipc__get_path(the_repository));
}
static int spawn_daemon(void)
{
- const char *args[] = { "fsmonitor--daemon", "start", NULL };
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ cmd.no_stdin = 1;
+ cmd.trace2_child_class = "fsmonitor";
+ strvec_pushl(&cmd.args, "fsmonitor--daemon", "start", NULL);
- return run_command_v_opt_tr2(args, RUN_COMMAND_NO_STDIN | RUN_GIT_CMD,
- "fsmonitor");
+ return run_command(&cmd);
}
int fsmonitor_ipc__send_query(const char *since_token,
@@ -81,8 +83,8 @@ int fsmonitor_ipc__send_query(const char *since_token,
trace2_data_string("fsm_client", NULL, "query/command", tok);
try_again:
- state = ipc_client_try_connect(fsmonitor_ipc__get_path(), &options,
- &connection);
+ state = ipc_client_try_connect(fsmonitor_ipc__get_path(the_repository),
+ &options, &connection);
switch (state) {
case IPC_STATE__LISTENING:
@@ -117,13 +119,13 @@ try_again:
case IPC_STATE__INVALID_PATH:
ret = error(_("fsmonitor_ipc__send_query: invalid path '%s'"),
- fsmonitor_ipc__get_path());
+ fsmonitor_ipc__get_path(the_repository));
goto done;
case IPC_STATE__OTHER_ERROR:
default:
ret = error(_("fsmonitor_ipc__send_query: unspecified error on '%s'"),
- fsmonitor_ipc__get_path());
+ fsmonitor_ipc__get_path(the_repository));
goto done;
}
@@ -149,8 +151,8 @@ int fsmonitor_ipc__send_command(const char *command,
options.wait_if_busy = 1;
options.wait_if_not_found = 0;
- state = ipc_client_try_connect(fsmonitor_ipc__get_path(), &options,
- &connection);
+ state = ipc_client_try_connect(fsmonitor_ipc__get_path(the_repository),
+ &options, &connection);
if (state != IPC_STATE__LISTENING) {
die(_("fsmonitor--daemon is not running"));
return -1;
diff --git a/fsmonitor-ipc.h b/fsmonitor-ipc.h
index b6a7067c3a..8b489da762 100644
--- a/fsmonitor-ipc.h
+++ b/fsmonitor-ipc.h
@@ -3,6 +3,8 @@
#include "simple-ipc.h"
+struct repository;
+
/*
* Returns true if built-in file system monitor daemon is defined
* for this platform.
@@ -16,7 +18,7 @@ int fsmonitor_ipc__is_supported(void);
*
* Returns NULL if the daemon is not supported on this platform.
*/
-const char *fsmonitor_ipc__get_path(void);
+const char *fsmonitor_ipc__get_path(struct repository *r);
/*
* Try to determine whether there is a `git-fsmonitor--daemon` process
diff --git a/fsmonitor-path-utils.h b/fsmonitor-path-utils.h
new file mode 100644
index 0000000000..5bfdfb81c1
--- /dev/null
+++ b/fsmonitor-path-utils.h
@@ -0,0 +1,60 @@
+#ifndef FSM_PATH_UTILS_H
+#define FSM_PATH_UTILS_H
+
+#include "strbuf.h"
+
+struct alias_info
+{
+ struct strbuf alias;
+ struct strbuf points_to;
+};
+
+struct fs_info {
+ int is_remote;
+ char *typename;
+};
+
+/*
+ * Get some basic filesystem information for the given path
+ *
+ * The caller owns the storage that is occupied by fs_info and
+ * is responsible for releasing it.
+ *
+ * Returns -1 on error, zero otherwise.
+ */
+int fsmonitor__get_fs_info(const char *path, struct fs_info *fs_info);
+
+/*
+ * Determines if the filesystem that path resides on is remote.
+ *
+ * Returns -1 on error, 0 if not remote, 1 if remote.
+ */
+int fsmonitor__is_fs_remote(const char *path);
+
+/*
+ * Get the alias in given path, if any.
+ *
+ * Sets alias to the first alias that matches any part of the path.
+ *
+ * If an alias is found, info.alias and info.points_to are set to the
+ * found mapping.
+ *
+ * Returns -1 on error, 0 otherwise.
+ *
+ * The caller owns the storage that is occupied by info.alias and
+ * info.points_to and is responsible for releasing it.
+ */
+int fsmonitor__get_alias(const char *path, struct alias_info *info);
+
+/*
+ * Resolve the path against the given alias.
+ *
+ * Returns the resolved path if there is one, NULL otherwise.
+ *
+ * The caller owns the storage that the returned string occupies and
+ * is responsible for releasing it.
+ */
+char *fsmonitor__resolve_alias(const char *path,
+ const struct alias_info *info);
+
+#endif
diff --git a/fsmonitor-settings.c b/fsmonitor-settings.c
index 464424a1e9..ee63a97dc5 100644
--- a/fsmonitor-settings.c
+++ b/fsmonitor-settings.c
@@ -1,7 +1,9 @@
#include "cache.h"
#include "config.h"
#include "repository.h"
+#include "fsmonitor-ipc.h"
#include "fsmonitor-settings.h"
+#include "fsmonitor-path-utils.h"
/*
* We keep this structure defintion private and have getters
@@ -13,7 +15,53 @@ struct fsmonitor_settings {
char *hook_path;
};
-static enum fsmonitor_reason check_for_incompatible(struct repository *r)
+/*
+ * Remote working directories are problematic for FSMonitor.
+ *
+ * The underlying file system on the server machine and/or the remote
+ * mount type dictates whether notification events are available at
+ * all to remote client machines.
+ *
+ * Kernel differences between the server and client machines also
+ * dictate the how (buffering, frequency, de-dup) the events are
+ * delivered to client machine processes.
+ *
+ * A client machine (such as a laptop) may choose to suspend/resume
+ * and it is unclear (without lots of testing) whether the watcher can
+ * resync after a resume. We might be able to treat this as a normal
+ * "events were dropped by the kernel" event and do our normal "flush
+ * and resync" --or-- we might need to close the existing (zombie?)
+ * notification fd and create a new one.
+ *
+ * In theory, the above issues need to be addressed whether we are
+ * using the Hook or IPC API.
+ *
+ * So (for now at least), mark remote working directories as
+ * incompatible unless 'fsmonitor.allowRemote' is true.
+ *
+ */
+#ifdef HAVE_FSMONITOR_OS_SETTINGS
+static enum fsmonitor_reason check_remote(struct repository *r)
+{
+ int allow_remote = -1; /* -1 unset, 0 not allowed, 1 allowed */
+ int is_remote = fsmonitor__is_fs_remote(r->worktree);
+
+ switch (is_remote) {
+ case 0:
+ return FSMONITOR_REASON_OK;
+ case 1:
+ repo_config_get_bool(r, "fsmonitor.allowremote", &allow_remote);
+ if (allow_remote < 1)
+ return FSMONITOR_REASON_REMOTE;
+ else
+ return FSMONITOR_REASON_OK;
+ default:
+ return FSMONITOR_REASON_ERROR;
+ }
+}
+#endif
+
+static enum fsmonitor_reason check_for_incompatible(struct repository *r, int ipc)
{
if (!r->worktree) {
/*
@@ -27,7 +75,10 @@ static enum fsmonitor_reason check_for_incompatible(struct repository *r)
{
enum fsmonitor_reason reason;
- reason = fsm_os__incompatible(r);
+ reason = check_remote(r);
+ if (reason != FSMONITOR_REASON_OK)
+ return reason;
+ reason = fsm_os__incompatible(r, ipc);
if (reason != FSMONITOR_REASON_OK)
return reason;
}
@@ -112,7 +163,7 @@ const char *fsm_settings__get_hook_path(struct repository *r)
void fsm_settings__set_ipc(struct repository *r)
{
- enum fsmonitor_reason reason = check_for_incompatible(r);
+ enum fsmonitor_reason reason = check_for_incompatible(r, 1);
if (reason != FSMONITOR_REASON_OK) {
fsm_settings__set_incompatible(r, reason);
@@ -135,7 +186,7 @@ void fsm_settings__set_ipc(struct repository *r)
void fsm_settings__set_hook(struct repository *r, const char *path)
{
- enum fsmonitor_reason reason = check_for_incompatible(r);
+ enum fsmonitor_reason reason = check_for_incompatible(r, 0);
if (reason != FSMONITOR_REASON_OK) {
fsm_settings__set_incompatible(r, reason);
@@ -192,10 +243,11 @@ enum fsmonitor_reason fsm_settings__get_reason(struct repository *r)
return r->settings.fsmonitor->reason;
}
-char *fsm_settings__get_incompatible_msg(const struct repository *r,
+char *fsm_settings__get_incompatible_msg(struct repository *r,
enum fsmonitor_reason reason)
{
struct strbuf msg = STRBUF_INIT;
+ const char *socket_dir;
switch (reason) {
case FSMONITOR_REASON_UNTESTED:
@@ -231,9 +283,11 @@ char *fsm_settings__get_incompatible_msg(const struct repository *r,
goto done;
case FSMONITOR_REASON_NOSOCKETS:
+ socket_dir = dirname((char *)fsmonitor_ipc__get_path(r));
strbuf_addf(&msg,
- _("repository '%s' is incompatible with fsmonitor due to lack of Unix sockets"),
- r->worktree);
+ _("socket directory '%s' is incompatible with fsmonitor due"
+ " to lack of Unix sockets support"),
+ socket_dir);
goto done;
}
diff --git a/fsmonitor-settings.h b/fsmonitor-settings.h
index d9c2605197..ab02e3995e 100644
--- a/fsmonitor-settings.h
+++ b/fsmonitor-settings.h
@@ -33,7 +33,7 @@ enum fsmonitor_mode fsm_settings__get_mode(struct repository *r);
const char *fsm_settings__get_hook_path(struct repository *r);
enum fsmonitor_reason fsm_settings__get_reason(struct repository *r);
-char *fsm_settings__get_incompatible_msg(const struct repository *r,
+char *fsm_settings__get_incompatible_msg(struct repository *r,
enum fsmonitor_reason reason);
struct fsmonitor_settings;
@@ -48,7 +48,7 @@ struct fsmonitor_settings;
* fsm_os__* routines should considered private to fsm_settings__
* routines.
*/
-enum fsmonitor_reason fsm_os__incompatible(struct repository *r);
+enum fsmonitor_reason fsm_os__incompatible(struct repository *r, int ipc);
#endif /* HAVE_FSMONITOR_OS_SETTINGS */
#endif /* FSMONITOR_SETTINGS_H */
diff --git a/fsmonitor.c b/fsmonitor.c
index 57d6a483be..08af00c738 100644
--- a/fsmonitor.c
+++ b/fsmonitor.c
@@ -295,6 +295,7 @@ static int fsmonitor_force_update_threshold = 100;
void refresh_fsmonitor(struct index_state *istate)
{
+ static int warn_once = 0;
struct strbuf query_result = STRBUF_INIT;
int query_success = 0, hook_version = -1;
size_t bol = 0; /* beginning of line */
@@ -305,6 +306,14 @@ void refresh_fsmonitor(struct index_state *istate)
int is_trivial = 0;
struct repository *r = istate->repo ? istate->repo : the_repository;
enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
+ enum fsmonitor_reason reason = fsm_settings__get_reason(r);
+
+ if (!warn_once && reason > FSMONITOR_REASON_OK) {
+ char *msg = fsm_settings__get_incompatible_msg(r, reason);
+ warn_once = 1;
+ warning("%s", msg);
+ free(msg);
+ }
if (fsm_mode <= FSMONITOR_MODE_DISABLED ||
istate->fsmonitor_has_run_once)
diff --git a/gettext.c b/gettext.c
index bb5ba1fe7c..f139008d0a 100644
--- a/gettext.c
+++ b/gettext.c
@@ -10,7 +10,6 @@
#include "config.h"
#ifndef NO_GETTEXT
-# include <locale.h>
# include <libintl.h>
# ifdef GIT_WINDOWS_NATIVE
@@ -80,7 +79,6 @@ static int test_vsnprintf(const char *fmt, ...)
static void init_gettext_charset(const char *domain)
{
- setlocale(LC_CTYPE, "");
charset = locale_charset();
bind_textdomain_codeset(domain, charset);
diff --git a/git-bisect.sh b/git-bisect.sh
deleted file mode 100755
index 405cf76f2a..0000000000
--- a/git-bisect.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/bin/sh
-
-USAGE='[help|start|bad|good|new|old|terms|skip|next|reset|visualize|view|replay|log|run]'
-LONG_USAGE='git bisect help
- print this long help message.
-git bisect start [--term-{new,bad}=<term> --term-{old,good}=<term>]
- [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<pathspec>...]
- reset bisect state and start bisection.
-git bisect (bad|new) [<rev>]
- mark <rev> a known-bad revision/
- a revision after change in a given property.
-git bisect (good|old) [<rev>...]
- mark <rev>... known-good revisions/
- revisions before change in a given property.
-git bisect terms [--term-good | --term-bad]
- show the terms used for old and new commits (default: bad, good)
-git bisect skip [(<rev>|<range>)...]
- mark <rev>... untestable revisions.
-git bisect next
- find next bisection to test and check it out.
-git bisect reset [<commit>]
- finish bisection search and go back to commit.
-git bisect (visualize|view)
- show bisect status in gitk.
-git bisect replay <logfile>
- replay bisection log.
-git bisect log
- show bisect log.
-git bisect run <cmd>...
- use <cmd>... to automatically bisect.
-
-Please use "git help bisect" to get the full man page.'
-
-OPTIONS_SPEC=
-. git-sh-setup
-
-TERM_BAD=bad
-TERM_GOOD=good
-
-get_terms () {
- if test -s "$GIT_DIR/BISECT_TERMS"
- then
- {
- read TERM_BAD
- read TERM_GOOD
- } <"$GIT_DIR/BISECT_TERMS"
- fi
-}
-
-case "$#" in
-0)
- usage ;;
-*)
- cmd="$1"
- get_terms
- shift
- case "$cmd" in
- help)
- git bisect -h ;;
- start)
- git bisect--helper --bisect-start "$@" ;;
- bad|good|new|old|"$TERM_BAD"|"$TERM_GOOD")
- git bisect--helper --bisect-state "$cmd" "$@" ;;
- skip)
- git bisect--helper --bisect-skip "$@" || exit;;
- next)
- # Not sure we want "next" at the UI level anymore.
- git bisect--helper --bisect-next "$@" || exit ;;
- visualize|view)
- git bisect--helper --bisect-visualize "$@" || exit;;
- reset)
- git bisect--helper --bisect-reset "$@" ;;
- replay)
- git bisect--helper --bisect-replay "$@" || exit;;
- log)
- git bisect--helper --bisect-log || exit ;;
- run)
- git bisect--helper --bisect-run "$@" || exit;;
- terms)
- git bisect--helper --bisect-terms "$@" || exit;;
- *)
- usage ;;
- esac
-esac
diff --git a/git-compat-util.h b/git-compat-util.h
index b90b64718e..a76d0526f7 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -189,9 +189,12 @@ struct strbuf;
#define _NETBSD_SOURCE 1
#define _SGI_SOURCE 1
-#if defined(__GNUC__)
+#if GIT_GNUC_PREREQ(4, 5)
#define UNUSED __attribute__((unused)) \
__attribute__((deprecated ("parameter declared as UNUSED")))
+#elif defined(__GNUC__)
+#define UNUSED __attribute__((unused)) \
+ __attribute__((deprecated))
#else
#define UNUSED
#endif
@@ -222,6 +225,7 @@ struct strbuf;
#endif
#include <errno.h>
#include <limits.h>
+#include <locale.h>
#ifdef NEEDS_SYS_PARAM_H
#include <sys/param.h>
#endif
@@ -310,7 +314,9 @@ typedef unsigned long uintptr_t;
#ifdef PRECOMPOSE_UNICODE
#include "compat/precompose_utf8.h"
#else
-static inline const char *precompose_argv_prefix(int argc, const char **argv, const char *prefix)
+static inline const char *precompose_argv_prefix(int argc UNUSED,
+ const char **argv UNUSED,
+ const char *prefix)
{
return prefix;
}
@@ -335,7 +341,9 @@ struct itimerval {
#endif
#ifdef NO_SETITIMER
-static inline int setitimer(int which, const struct itimerval *value, struct itimerval *newvalue) {
+static inline int setitimer(int which UNUSED,
+ const struct itimerval *value UNUSED,
+ struct itimerval *newvalue UNUSED) {
return 0; /* pretend success */
}
#endif
@@ -420,7 +428,7 @@ int lstat_cache_aware_rmdir(const char *path);
#endif
#ifndef has_dos_drive_prefix
-static inline int git_has_dos_drive_prefix(const char *path)
+static inline int git_has_dos_drive_prefix(const char *path UNUSED)
{
return 0;
}
@@ -428,7 +436,7 @@ static inline int git_has_dos_drive_prefix(const char *path)
#endif
#ifndef skip_dos_drive_prefix
-static inline int git_skip_dos_drive_prefix(char **path)
+static inline int git_skip_dos_drive_prefix(char **path UNUSED)
{
return 0;
}
@@ -1463,11 +1471,11 @@ int open_nofollow(const char *path, int flags);
#endif
#ifndef _POSIX_THREAD_SAFE_FUNCTIONS
-static inline void flockfile(FILE *fh)
+static inline void flockfile(FILE *fh UNUSED)
{
; /* nothing */
}
-static inline void funlockfile(FILE *fh)
+static inline void funlockfile(FILE *fh UNUSED)
{
; /* nothing */
}
diff --git a/git-submodule.sh b/git-submodule.sh
index 5e5d21c010..9a50f2e912 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -343,7 +343,6 @@ cmd_update()
${recursive:+--recursive} \
${init:+--init} \
${nofetch:+--no-fetch} \
- ${wt_prefix:+--prefix "$wt_prefix"} \
${rebase:+--rebase} \
${merge:+--merge} \
${checkout:+--checkout} \
@@ -557,7 +556,7 @@ cmd_sync()
cmd_absorbgitdirs()
{
- git submodule--helper absorbgitdirs --prefix "$wt_prefix" "$@"
+ git ${wt_prefix:+-C "$wt_prefix"} submodule--helper absorbgitdirs "$@"
}
# This loop parses the command line arguments to find the
diff --git a/git.c b/git.c
index da411c5382..277a8cce84 100644
--- a/git.c
+++ b/git.c
@@ -492,7 +492,7 @@ static struct cmd_struct commands[] = {
{ "annotate", cmd_annotate, RUN_SETUP },
{ "apply", cmd_apply, RUN_SETUP_GENTLY },
{ "archive", cmd_archive, RUN_SETUP_GENTLY },
- { "bisect--helper", cmd_bisect__helper, RUN_SETUP },
+ { "bisect", cmd_bisect, RUN_SETUP },
{ "blame", cmd_blame, RUN_SETUP },
{ "branch", cmd_branch, RUN_SETUP | DELAY_PAGER_CONFIG },
{ "bugreport", cmd_bugreport, RUN_SETUP_GENTLY },
@@ -610,7 +610,7 @@ static struct cmd_struct commands[] = {
{ "stash", cmd_stash, RUN_SETUP | NEED_WORK_TREE },
{ "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
{ "stripspace", cmd_stripspace },
- { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX | NO_PARSEOPT },
+ { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX },
{ "switch", cmd_switch, RUN_SETUP | NEED_WORK_TREE },
{ "symbolic-ref", cmd_symbolic_ref, RUN_SETUP },
{ "tag", cmd_tag, RUN_SETUP | DELAY_PAGER_CONFIG },
@@ -787,7 +787,7 @@ static int run_argv(int *argcp, const char ***argv)
if (!done_alias)
handle_builtin(*argcp, *argv);
else if (get_builtin(**argv)) {
- struct strvec args = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
int i;
/*
@@ -804,18 +804,21 @@ static int run_argv(int *argcp, const char ***argv)
commit_pager_choice();
- strvec_push(&args, "git");
+ strvec_push(&cmd.args, "git");
for (i = 0; i < *argcp; i++)
- strvec_push(&args, (*argv)[i]);
+ strvec_push(&cmd.args, (*argv)[i]);
- trace_argv_printf(args.v, "trace: exec:");
+ trace_argv_printf(cmd.args.v, "trace: exec:");
/*
* if we fail because the command is not found, it is
* OK to return. Otherwise, we just pass along the status code.
*/
- i = run_command_v_opt_tr2(args.v, RUN_SILENT_EXEC_FAILURE |
- RUN_CLEAN_ON_EXIT | RUN_WAIT_AFTER_CLEAN, "git_alias");
+ cmd.silent_exec_failure = 1;
+ cmd.clean_on_exit = 1;
+ cmd.wait_after_clean = 1;
+ cmd.trace2_child_class = "git_alias";
+ i = run_command(&cmd);
if (i >= 0 || errno != ENOENT)
exit(i);
die("could not execute builtin %s", **argv);
@@ -894,12 +897,8 @@ int cmd_main(int argc, const char **argv)
argv++;
argc--;
handle_options(&argv, &argc, NULL);
- if (argc > 0) {
- if (!strcmp("--version", argv[0]) || !strcmp("-v", argv[0]))
- argv[0] = "version";
- else if (!strcmp("--help", argv[0]) || !strcmp("-h", argv[0]))
- argv[0] = "help";
- } else {
+
+ if (!argc) {
/* The user didn't specify a command; give them help */
commit_pager_choice();
printf(_("usage: %s\n\n"), git_usage_string);
@@ -907,6 +906,12 @@ int cmd_main(int argc, const char **argv)
printf("\n%s\n", _(git_more_info_string));
exit(1);
}
+
+ if (!strcmp("--version", argv[0]) || !strcmp("-v", argv[0]))
+ argv[0] = "version";
+ else if (!strcmp("--help", argv[0]) || !strcmp("-h", argv[0]))
+ argv[0] = "help";
+
cmd = argv[0];
/*
diff --git a/gpg-interface.c b/gpg-interface.c
index 9aa714bdee..f877a1ea56 100644
--- a/gpg-interface.c
+++ b/gpg-interface.c
@@ -1059,12 +1059,11 @@ static int sign_buffer_ssh(struct strbuf *buffer, struct strbuf *signature,
strbuf_addbuf(&ssh_signature_filename, &buffer_file->filename);
strbuf_addstr(&ssh_signature_filename, ".sig");
if (strbuf_read_file(signature, ssh_signature_filename.buf, 0) < 0) {
- error_errno(
+ ret = error_errno(
_("failed reading ssh signing data buffer from '%s'"),
ssh_signature_filename.buf);
+ goto out;
}
- unlink_or_warn(ssh_signature_filename.buf);
-
/* Strip CR from the line endings, in case we are on Windows. */
remove_cr_after(signature, bottom);
@@ -1073,6 +1072,8 @@ out:
delete_tempfile(&key_file);
if (buffer_file)
delete_tempfile(&buffer_file);
+ if (ssh_signature_filename.len)
+ unlink_or_warn(ssh_signature_filename.buf);
strbuf_release(&signer_stderr);
strbuf_release(&ssh_signature_filename);
FREE_AND_NULL(ssh_signing_key_file);
diff --git a/grep.c b/grep.c
index 52a894c989..06eed69493 100644
--- a/grep.c
+++ b/grep.c
@@ -708,6 +708,7 @@ void compile_grep_patterns(struct grep_opt *opt)
{
struct grep_pat *p;
struct grep_expr *header_expr = prep_header_patterns(opt);
+ int extended = 0;
for (p = opt->pattern_list; p; p = p->next) {
switch (p->token) {
@@ -717,14 +718,14 @@ void compile_grep_patterns(struct grep_opt *opt)
compile_regexp(p, opt);
break;
default:
- opt->extended = 1;
+ extended = 1;
break;
}
}
if (opt->all_match || opt->no_body_match || header_expr)
- opt->extended = 1;
- else if (!opt->extended)
+ extended = 1;
+ else if (!extended)
return;
p = opt->pattern_list;
@@ -790,7 +791,7 @@ void free_grep_patterns(struct grep_opt *opt)
free(p);
}
- if (!opt->extended)
+ if (!opt->pattern_expression)
return;
free_pattern_expr(opt->pattern_expression);
}
@@ -971,8 +972,6 @@ static int match_expr_eval(struct grep_opt *opt, struct grep_expr *x,
{
int h = 0;
- if (!x)
- die("Not a valid grep expression");
switch (x->node) {
case GREP_NODE_TRUE:
h = 1;
@@ -1052,7 +1051,7 @@ static int match_line(struct grep_opt *opt,
struct grep_pat *p;
int hit = 0;
- if (opt->extended)
+ if (opt->pattern_expression)
return match_expr(opt, bol, eol, ctx, col, icol,
collect_hits);
@@ -1370,7 +1369,7 @@ static int should_lookahead(struct grep_opt *opt)
{
struct grep_pat *p;
- if (opt->extended)
+ if (opt->pattern_expression)
return 0; /* punt for too complex stuff */
if (opt->invert)
return 0;
diff --git a/grep.h b/grep.h
index bdcadce61b..6075f997e6 100644
--- a/grep.h
+++ b/grep.h
@@ -151,7 +151,6 @@ struct grep_opt {
#define GREP_BINARY_TEXT 2
int binary;
int allow_textconv;
- int extended;
int use_reflog_filter;
int relative;
int pathname;
diff --git a/help.c b/help.c
index d04542d826..f1e090a442 100644
--- a/help.c
+++ b/help.c
@@ -757,7 +757,7 @@ int cmd_version(int argc, const char **argv, const char *prefix)
struct strbuf buf = STRBUF_INIT;
int build_options = 0;
const char * const usage[] = {
- N_("git version [<options>]"),
+ N_("git version [--build-options]"),
NULL
};
struct option options[] = {
diff --git a/hook.c b/hook.c
index a493939a4f..a4fa1031f2 100644
--- a/hook.c
+++ b/hook.c
@@ -114,8 +114,20 @@ int run_hooks_opt(const char *hook_name, struct run_hooks_opt *options)
.options = options,
};
const char *const hook_path = find_hook(hook_name);
- int jobs = 1;
int ret = 0;
+ const struct run_process_parallel_opts opts = {
+ .tr2_category = "hook",
+ .tr2_label = hook_name,
+
+ .processes = 1,
+ .ungroup = 1,
+
+ .get_next_task = pick_next_hook,
+ .start_failure = notify_start_failure,
+ .task_finished = notify_hook_finished,
+
+ .data = &cb_data,
+ };
if (!options)
BUG("a struct run_hooks_opt must be provided to run_hooks");
@@ -137,14 +149,7 @@ int run_hooks_opt(const char *hook_name, struct run_hooks_opt *options)
cb_data.hook_path = abs_path.buf;
}
- run_processes_parallel_ungroup = 1;
- run_processes_parallel_tr2(jobs,
- pick_next_hook,
- notify_start_failure,
- notify_hook_finished,
- &cb_data,
- "hook",
- hook_name);
+ run_processes_parallel(&opts);
ret = cb_data.rc;
cleanup:
strbuf_release(&abs_path);
diff --git a/http.c b/http.c
index 5d0502f51f..8a5ba3f477 100644
--- a/http.c
+++ b/http.c
@@ -560,13 +560,15 @@ static void set_curl_keepalive(CURL *c)
}
#endif
-static void redact_sensitive_header(struct strbuf *header)
+/* Return 1 if redactions have been made, 0 otherwise. */
+static int redact_sensitive_header(struct strbuf *header, size_t offset)
{
+ int ret = 0;
const char *sensitive_header;
if (trace_curl_redact &&
- (skip_iprefix(header->buf, "Authorization:", &sensitive_header) ||
- skip_iprefix(header->buf, "Proxy-Authorization:", &sensitive_header))) {
+ (skip_iprefix(header->buf + offset, "Authorization:", &sensitive_header) ||
+ skip_iprefix(header->buf + offset, "Proxy-Authorization:", &sensitive_header))) {
/* The first token is the type, which is OK to log */
while (isspace(*sensitive_header))
sensitive_header++;
@@ -575,8 +577,9 @@ static void redact_sensitive_header(struct strbuf *header)
/* Everything else is opaque and possibly sensitive */
strbuf_setlen(header, sensitive_header - header->buf);
strbuf_addstr(header, " <redacted>");
+ ret = 1;
} else if (trace_curl_redact &&
- skip_iprefix(header->buf, "Cookie:", &sensitive_header)) {
+ skip_iprefix(header->buf + offset, "Cookie:", &sensitive_header)) {
struct strbuf redacted_header = STRBUF_INIT;
const char *cookie;
@@ -612,6 +615,26 @@ static void redact_sensitive_header(struct strbuf *header)
strbuf_setlen(header, sensitive_header - header->buf);
strbuf_addbuf(header, &redacted_header);
+ ret = 1;
+ }
+ return ret;
+}
+
+/* Redact headers in info */
+static void redact_sensitive_info_header(struct strbuf *header)
+{
+ const char *sensitive_header;
+
+ /*
+ * curl's h2h3 prints headers in info, e.g.:
+ * h2h3 [<header-name>: <header-val>]
+ */
+ if (trace_curl_redact &&
+ skip_iprefix(header->buf, "h2h3 [", &sensitive_header)) {
+ if (redact_sensitive_header(header, sensitive_header - header->buf)) {
+ /* redaction ate our closing bracket */
+ strbuf_addch(header, ']');
+ }
}
}
@@ -629,7 +652,7 @@ static void curl_dump_header(const char *text, unsigned char *ptr, size_t size,
for (header = headers; *header; header++) {
if (hide_sensitive_header)
- redact_sensitive_header(*header);
+ redact_sensitive_header(*header, 0);
strbuf_insertstr((*header), 0, text);
strbuf_insertstr((*header), strlen(text), ": ");
strbuf_rtrim((*header));
@@ -668,6 +691,18 @@ static void curl_dump_data(const char *text, unsigned char *ptr, size_t size)
strbuf_release(&out);
}
+static void curl_dump_info(char *data, size_t size)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_add(&buf, data, size);
+
+ redact_sensitive_info_header(&buf);
+ trace_printf_key(&trace_curl, "== Info: %s", buf.buf);
+
+ strbuf_release(&buf);
+}
+
static int curl_trace(CURL *handle, curl_infotype type, char *data, size_t size, void *userp)
{
const char *text;
@@ -675,7 +710,7 @@ static int curl_trace(CURL *handle, curl_infotype type, char *data, size_t size,
switch (type) {
case CURLINFO_TEXT:
- trace_printf_key(&trace_curl, "== Info: %s", data);
+ curl_dump_info(data, size);
break;
case CURLINFO_HEADER_OUT:
text = "=> Send header";
diff --git a/ll-merge.c b/ll-merge.c
index 8955d7e1f6..22a603e8af 100644
--- a/ll-merge.c
+++ b/ll-merge.c
@@ -49,14 +49,14 @@ void reset_merge_attributes(void)
/*
* Built-in low-levels
*/
-static enum ll_merge_result ll_binary_merge(const struct ll_merge_driver *drv_unused,
+static enum ll_merge_result ll_binary_merge(const struct ll_merge_driver *drv UNUSED,
mmbuffer_t *result,
- const char *path,
- mmfile_t *orig, const char *orig_name,
- mmfile_t *src1, const char *name1,
- mmfile_t *src2, const char *name2,
+ const char *path UNUSED,
+ mmfile_t *orig, const char *orig_name UNUSED,
+ mmfile_t *src1, const char *name1 UNUSED,
+ mmfile_t *src2, const char *name2 UNUSED,
const struct ll_merge_options *opts,
- int marker_size)
+ int marker_size UNUSED)
{
enum ll_merge_result ret;
mmfile_t *stolen;
@@ -183,9 +183,9 @@ static void create_temp(mmfile_t *src, char *path, size_t len)
static enum ll_merge_result ll_ext_merge(const struct ll_merge_driver *fn,
mmbuffer_t *result,
const char *path,
- mmfile_t *orig, const char *orig_name,
- mmfile_t *src1, const char *name1,
- mmfile_t *src2, const char *name2,
+ mmfile_t *orig, const char *orig_name UNUSED,
+ mmfile_t *src1, const char *name1 UNUSED,
+ mmfile_t *src2, const char *name2 UNUSED,
const struct ll_merge_options *opts,
int marker_size)
{
@@ -193,7 +193,7 @@ static enum ll_merge_result ll_ext_merge(const struct ll_merge_driver *fn,
struct strbuf cmd = STRBUF_INIT;
struct strbuf_expand_dict_entry dict[6];
struct strbuf path_sq = STRBUF_INIT;
- const char *args[] = { NULL, NULL };
+ struct child_process child = CHILD_PROCESS_INIT;
int status, fd, i;
struct stat st;
enum ll_merge_result ret;
@@ -219,8 +219,9 @@ static enum ll_merge_result ll_ext_merge(const struct ll_merge_driver *fn,
strbuf_expand(&cmd, fn->cmdline, strbuf_expand_dict_cb, &dict);
- args[0] = cmd.buf;
- status = run_command_v_opt(args, RUN_USING_SHELL);
+ child.use_shell = 1;
+ strvec_push(&child.args, cmd.buf);
+ status = run_command(&child);
fd = open(temp[1], O_RDONLY);
if (fd < 0)
goto bad;
diff --git a/ls-refs.c b/ls-refs.c
index fa0d01b47c..fb6769742c 100644
--- a/ls-refs.c
+++ b/ls-refs.c
@@ -6,6 +6,7 @@
#include "ls-refs.h"
#include "pkt-line.h"
#include "config.h"
+#include "string-list.h"
static int config_read;
static int advertise_unborn;
@@ -73,6 +74,7 @@ struct ls_refs_data {
unsigned symrefs;
struct strvec prefixes;
struct strbuf buf;
+ struct string_list hidden_refs;
unsigned unborn : 1;
};
@@ -84,7 +86,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
strbuf_reset(&data->buf);
- if (ref_is_hidden(refname_nons, refname))
+ if (ref_is_hidden(refname_nons, refname, &data->hidden_refs))
return 0;
if (!ref_match(&data->prefixes, refname_nons))
@@ -137,14 +139,15 @@ static void send_possibly_unborn_head(struct ls_refs_data *data)
}
static int ls_refs_config(const char *var, const char *value,
- void *data UNUSED)
+ void *cb_data)
{
+ struct ls_refs_data *data = cb_data;
/*
* We only serve fetches over v2 for now, so respect only "uploadpack"
* config. This may need to eventually be expanded to "receive", but we
* don't yet know how that information will be passed to ls-refs.
*/
- return parse_hide_refs_config(var, value, "uploadpack");
+ return parse_hide_refs_config(var, value, "uploadpack", &data->hidden_refs);
}
int ls_refs(struct repository *r, struct packet_reader *request)
@@ -154,9 +157,10 @@ int ls_refs(struct repository *r, struct packet_reader *request)
memset(&data, 0, sizeof(data));
strvec_init(&data.prefixes);
strbuf_init(&data.buf, 0);
+ string_list_init_dup(&data.hidden_refs);
ensure_config_read();
- git_config(ls_refs_config, NULL);
+ git_config(ls_refs_config, &data);
while (packet_reader_read(request) == PACKET_READ_NORMAL) {
const char *arg = request->line;
@@ -195,6 +199,7 @@ int ls_refs(struct repository *r, struct packet_reader *request)
packet_fflush(stdout);
strvec_clear(&data.prefixes);
strbuf_release(&data.buf);
+ string_list_clear(&data.hidden_refs, 0);
return 0;
}
diff --git a/mailinfo.c b/mailinfo.c
index 9621ba62a3..833d28612f 100644
--- a/mailinfo.c
+++ b/mailinfo.c
@@ -317,7 +317,7 @@ static void cleanup_subject(struct mailinfo *mi, struct strbuf *subject)
pos = strchr(subject->buf + at, ']');
if (!pos)
break;
- remove = pos - subject->buf + at + 1;
+ remove = pos - (subject->buf + at) + 1;
if (!mi->keep_non_patch_brackets_in_subject ||
(7 <= remove &&
memmem(subject->buf + at, remove, "PATCH", 5)))
diff --git a/merge-ort.c b/merge-ort.c
index 99dcee2db8..d1611ca400 100644
--- a/merge-ort.c
+++ b/merge-ort.c
@@ -397,7 +397,7 @@ struct conflicted_submodule_item {
int flag;
};
-static void conflicted_submodule_item_free(void *util, const char *str)
+static void conflicted_submodule_item_free(void *util, const char *str UNUSED)
{
struct conflicted_submodule_item *item = util;
@@ -2619,8 +2619,40 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
}
assert(ci->filemask == 2 || ci->filemask == 4);
- assert(ci->dirmask == 0);
- strmap_remove(&opt->priv->paths, old_path, 0);
+ assert(ci->dirmask == 0 || ci->dirmask == 1);
+ if (ci->dirmask == 0)
+ strmap_remove(&opt->priv->paths, old_path, 0);
+ else {
+ /*
+ * This file exists on one side, but we still had a directory
+ * at the old location that we can't remove until after
+ * processing all paths below it. So, make a copy of ci in
+ * new_ci and only put the file information into it.
+ */
+ new_ci = mem_pool_calloc(&opt->priv->pool, 1, sizeof(*new_ci));
+ memcpy(new_ci, ci, sizeof(*ci));
+ assert(!new_ci->match_mask);
+ new_ci->dirmask = 0;
+ new_ci->stages[1].mode = 0;
+ oidcpy(&new_ci->stages[1].oid, null_oid());
+
+ /*
+ * Now that we have the file information in new_ci, make sure
+ * ci only has the directory information.
+ */
+ ci->filemask = 0;
+ ci->merged.clean = 1;
+ for (i = MERGE_BASE; i <= MERGE_SIDE2; i++) {
+ if (ci->dirmask & (1 << i))
+ continue;
+ /* zero out any entries related to files */
+ ci->stages[i].mode = 0;
+ oidcpy(&ci->stages[i].oid, null_oid());
+ }
+
+ // Now we want to focus on new_ci, so reassign ci to it
+ ci = new_ci;
+ }
branch_with_new_path = (ci->filemask == 2) ? opt->branch1 : opt->branch2;
branch_with_dir_rename = (ci->filemask == 2) ? opt->branch2 : opt->branch1;
@@ -2807,6 +2839,8 @@ static int process_renames(struct merge_options *opt,
pathnames,
1 + 2 * opt->priv->call_depth,
&merged);
+ if (clean_merge < 0)
+ return -1;
if (!clean_merge &&
merged.mode == side1->stages[1].mode &&
oideq(&merged.oid, &side1->stages[1].oid))
@@ -2916,7 +2950,7 @@ static int process_renames(struct merge_options *opt,
struct version_info merged;
struct conflict_info *base, *side1, *side2;
- unsigned clean;
+ int clean;
pathnames[0] = oldpath;
pathnames[other_source_index] = oldpath;
@@ -2937,6 +2971,8 @@ static int process_renames(struct merge_options *opt,
pathnames,
1 + 2 * opt->priv->call_depth,
&merged);
+ if (clean < 0)
+ return -1;
memcpy(&newinfo->stages[target_index], &merged,
sizeof(merged));
@@ -3571,15 +3607,15 @@ static int tree_entry_order(const void *a_, const void *b_)
b->string, strlen(b->string), bmi->result.mode);
}
-static void write_tree(struct object_id *result_oid,
- struct string_list *versions,
- unsigned int offset,
- size_t hash_size)
+static int write_tree(struct object_id *result_oid,
+ struct string_list *versions,
+ unsigned int offset,
+ size_t hash_size)
{
size_t maxlen = 0, extra;
unsigned int nr;
struct strbuf buf = STRBUF_INIT;
- int i;
+ int i, ret = 0;
assert(offset <= versions->nr);
nr = versions->nr - offset;
@@ -3605,8 +3641,10 @@ static void write_tree(struct object_id *result_oid,
}
/* Write this object file out, and record in result_oid */
- write_object_file(buf.buf, buf.len, OBJ_TREE, result_oid);
+ if (write_object_file(buf.buf, buf.len, OBJ_TREE, result_oid))
+ ret = -1;
strbuf_release(&buf);
+ return ret;
}
static void record_entry_for_tree(struct directory_versions *dir_metadata,
@@ -3625,13 +3663,13 @@ static void record_entry_for_tree(struct directory_versions *dir_metadata,
basename)->util = &mi->result;
}
-static void write_completed_directory(struct merge_options *opt,
- const char *new_directory_name,
- struct directory_versions *info)
+static int write_completed_directory(struct merge_options *opt,
+ const char *new_directory_name,
+ struct directory_versions *info)
{
const char *prev_dir;
struct merged_info *dir_info = NULL;
- unsigned int offset;
+ unsigned int offset, ret = 0;
/*
* Some explanation of info->versions and info->offsets...
@@ -3717,7 +3755,7 @@ static void write_completed_directory(struct merge_options *opt,
* strcmp here.)
*/
if (new_directory_name == info->last_directory)
- return;
+ return 0;
/*
* If we are just starting (last_directory is NULL), or last_directory
@@ -3739,7 +3777,7 @@ static void write_completed_directory(struct merge_options *opt,
*/
string_list_append(&info->offsets,
info->last_directory)->util = (void*)offset;
- return;
+ return 0;
}
/*
@@ -3769,8 +3807,9 @@ static void write_completed_directory(struct merge_options *opt,
*/
dir_info->is_null = 0;
dir_info->result.mode = S_IFDIR;
- write_tree(&dir_info->result.oid, &info->versions, offset,
- opt->repo->hash_algo->rawsz);
+ if (write_tree(&dir_info->result.oid, &info->versions, offset,
+ opt->repo->hash_algo->rawsz) < 0)
+ ret = -1;
}
/*
@@ -3798,13 +3837,15 @@ static void write_completed_directory(struct merge_options *opt,
/* And, of course, we need to update last_directory to match. */
info->last_directory = new_directory_name;
info->last_directory_len = strlen(info->last_directory);
+
+ return ret;
}
/* Per entry merge function */
-static void process_entry(struct merge_options *opt,
- const char *path,
- struct conflict_info *ci,
- struct directory_versions *dir_metadata)
+static int process_entry(struct merge_options *opt,
+ const char *path,
+ struct conflict_info *ci,
+ struct directory_versions *dir_metadata)
{
int df_file_index = 0;
@@ -3818,7 +3859,7 @@ static void process_entry(struct merge_options *opt,
record_entry_for_tree(dir_metadata, path, &ci->merged);
if (ci->filemask == 0)
/* nothing else to handle */
- return;
+ return 0;
assert(ci->df_conflict);
}
@@ -3865,7 +3906,7 @@ static void process_entry(struct merge_options *opt,
*/
if (ci->filemask == 1) {
ci->filemask = 0;
- return;
+ return 0;
}
/*
@@ -4060,7 +4101,7 @@ static void process_entry(struct merge_options *opt,
} else if (ci->filemask >= 6) {
/* Need a two-way or three-way content merge */
struct version_info merged_file;
- unsigned clean_merge;
+ int clean_merge;
struct version_info *o = &ci->stages[0];
struct version_info *a = &ci->stages[1];
struct version_info *b = &ci->stages[2];
@@ -4069,6 +4110,8 @@ static void process_entry(struct merge_options *opt,
ci->pathnames,
opt->priv->call_depth * 2,
&merged_file);
+ if (clean_merge < 0)
+ return -1;
ci->merged.clean = clean_merge &&
!ci->df_conflict && !ci->path_conflict;
ci->merged.result.mode = merged_file.mode;
@@ -4164,6 +4207,7 @@ static void process_entry(struct merge_options *opt,
/* Record metadata for ci->merged in dir_metadata */
record_entry_for_tree(dir_metadata, path, &ci->merged);
+ return 0;
}
static void prefetch_for_content_merges(struct merge_options *opt,
@@ -4214,8 +4258,8 @@ static void prefetch_for_content_merges(struct merge_options *opt,
oid_array_clear(&to_fetch);
}
-static void process_entries(struct merge_options *opt,
- struct object_id *result_oid)
+static int process_entries(struct merge_options *opt,
+ struct object_id *result_oid)
{
struct hashmap_iter iter;
struct strmap_entry *e;
@@ -4224,11 +4268,12 @@ static void process_entries(struct merge_options *opt,
struct directory_versions dir_metadata = { STRING_LIST_INIT_NODUP,
STRING_LIST_INIT_NODUP,
NULL, 0 };
+ int ret = 0;
trace2_region_enter("merge", "process_entries setup", opt->repo);
if (strmap_empty(&opt->priv->paths)) {
oidcpy(result_oid, opt->repo->hash_algo->empty_tree);
- return;
+ return 0;
}
/* Hack to pre-allocate plist to the desired size */
@@ -4270,13 +4315,19 @@ static void process_entries(struct merge_options *opt,
*/
struct merged_info *mi = entry->util;
- write_completed_directory(opt, mi->directory_name,
- &dir_metadata);
+ if (write_completed_directory(opt, mi->directory_name,
+ &dir_metadata) < 0) {
+ ret = -1;
+ goto cleanup;
+ }
if (mi->clean)
record_entry_for_tree(&dir_metadata, path, mi);
else {
struct conflict_info *ci = (struct conflict_info *)mi;
- process_entry(opt, path, ci, &dir_metadata);
+ if (process_entry(opt, path, ci, &dir_metadata) < 0) {
+ ret = -1;
+ goto cleanup;
+ };
}
}
trace2_region_leave("merge", "processing", opt->repo);
@@ -4291,12 +4342,16 @@ static void process_entries(struct merge_options *opt,
fflush(stdout);
BUG("dir_metadata accounting completely off; shouldn't happen");
}
- write_tree(result_oid, &dir_metadata.versions, 0,
- opt->repo->hash_algo->rawsz);
+ if (write_tree(result_oid, &dir_metadata.versions, 0,
+ opt->repo->hash_algo->rawsz) < 0)
+ ret = -1;
+cleanup:
string_list_clear(&plist, 0);
string_list_clear(&dir_metadata.versions, 0);
string_list_clear(&dir_metadata.offsets, 0);
trace2_region_leave("merge", "process_entries cleanup", opt->repo);
+
+ return ret;
}
/*** Function Grouping: functions related to merge_switch_to_result() ***/
@@ -4928,15 +4983,18 @@ redo:
}
trace2_region_enter("merge", "process_entries", opt->repo);
- process_entries(opt, &working_tree_oid);
+ if (process_entries(opt, &working_tree_oid) < 0)
+ result->clean = -1;
trace2_region_leave("merge", "process_entries", opt->repo);
/* Set return values */
result->path_messages = &opt->priv->conflicts;
- result->tree = parse_tree_indirect(&working_tree_oid);
- /* existence of conflicted entries implies unclean */
- result->clean &= strmap_empty(&opt->priv->conflicted);
+ if (result->clean >= 0) {
+ result->tree = parse_tree_indirect(&working_tree_oid);
+ /* existence of conflicted entries implies unclean */
+ result->clean &= strmap_empty(&opt->priv->conflicted);
+ }
if (!opt->priv->call_depth) {
result->priv = opt->priv;
result->_properly_initialized = RESULT_INITIALIZED;
diff --git a/merge.c b/merge.c
index 2382ff66d3..445b4f19aa 100644
--- a/merge.c
+++ b/merge.c
@@ -19,22 +19,22 @@ int try_merge_command(struct repository *r,
const char **xopts, struct commit_list *common,
const char *head_arg, struct commit_list *remotes)
{
- struct strvec args = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
int i, ret;
struct commit_list *j;
- strvec_pushf(&args, "merge-%s", strategy);
+ strvec_pushf(&cmd.args, "merge-%s", strategy);
for (i = 0; i < xopts_nr; i++)
- strvec_pushf(&args, "--%s", xopts[i]);
+ strvec_pushf(&cmd.args, "--%s", xopts[i]);
for (j = common; j; j = j->next)
- strvec_push(&args, merge_argument(j->item));
- strvec_push(&args, "--");
- strvec_push(&args, head_arg);
+ strvec_push(&cmd.args, merge_argument(j->item));
+ strvec_push(&cmd.args, "--");
+ strvec_push(&cmd.args, head_arg);
for (j = remotes; j; j = j->next)
- strvec_push(&args, merge_argument(j->item));
+ strvec_push(&cmd.args, merge_argument(j->item));
- ret = run_command_v_opt(args.v, RUN_GIT_CMD);
- strvec_clear(&args);
+ cmd.git_cmd = 1;
+ ret = run_command(&cmd);
discard_index(r->index);
if (repo_read_index(r) < 0)
diff --git a/midx.c b/midx.c
index c27d0e5f15..03d947a5d3 100644
--- a/midx.c
+++ b/midx.c
@@ -278,7 +278,7 @@ uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
(off_t)pos * MIDX_CHUNK_OFFSET_WIDTH);
}
-int fill_midx_entry(struct repository * r,
+int fill_midx_entry(struct repository *r,
const struct object_id *oid,
struct pack_entry *e,
struct multi_pack_index *m)
@@ -913,6 +913,8 @@ static uint32_t *midx_pack_order(struct write_midx_context *ctx)
uint32_t *pack_order;
uint32_t i;
+ trace2_region_enter("midx", "midx_pack_order", the_repository);
+
ALLOC_ARRAY(data, ctx->entries_nr);
for (i = 0; i < ctx->entries_nr; i++) {
struct pack_midx_entry *e = &ctx->entries[i];
@@ -930,6 +932,8 @@ static uint32_t *midx_pack_order(struct write_midx_context *ctx)
pack_order[i] = data[i].nr;
free(data);
+ trace2_region_leave("midx", "midx_pack_order", the_repository);
+
return pack_order;
}
@@ -939,6 +943,8 @@ static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
struct strbuf buf = STRBUF_INIT;
const char *tmp_file;
+ trace2_region_enter("midx", "write_midx_reverse_index", the_repository);
+
strbuf_addf(&buf, "%s-%s.rev", midx_name, hash_to_hex(midx_hash));
tmp_file = write_rev_file_order(NULL, ctx->pack_order, ctx->entries_nr,
@@ -948,6 +954,8 @@ static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
die(_("cannot store reverse index file"));
strbuf_release(&buf);
+
+ trace2_region_leave("midx", "write_midx_reverse_index", the_repository);
}
static void clear_midx_files_ext(const char *object_dir, const char *ext,
@@ -963,6 +971,8 @@ static void prepare_midx_packing_data(struct packing_data *pdata,
{
uint32_t i;
+ trace2_region_enter("midx", "prepare_midx_packing_data", the_repository);
+
memset(pdata, 0, sizeof(struct packing_data));
prepare_packing_data(the_repository, pdata);
@@ -973,6 +983,8 @@ static void prepare_midx_packing_data(struct packing_data *pdata,
oe_set_in_pack(pdata, to,
ctx->info[ctx->pack_perm[from->pack_int_id]].p);
}
+
+ trace2_region_leave("midx", "prepare_midx_packing_data", the_repository);
}
static int add_ref_to_pending(const char *refname,
@@ -980,6 +992,7 @@ static int add_ref_to_pending(const char *refname,
int flag, void *cb_data)
{
struct rev_info *revs = (struct rev_info*)cb_data;
+ struct object_id peeled;
struct object *object;
if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) {
@@ -987,6 +1000,9 @@ static int add_ref_to_pending(const char *refname,
return 0;
}
+ if (!peel_iterated_oid(oid, &peeled))
+ oid = &peeled;
+
object = parse_object_or_die(oid, refname);
if (object->type != OBJ_COMMIT)
return 0;
@@ -1066,6 +1082,9 @@ static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr
struct rev_info revs;
struct bitmap_commit_cb cb = {0};
+ trace2_region_enter("midx", "find_commits_for_midx_bitmap",
+ the_repository);
+
cb.ctx = ctx;
repo_init_revisions(the_repository, &revs, NULL);
@@ -1099,6 +1118,10 @@ static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr
*indexed_commits_nr_p = cb.commits_nr;
release_revisions(&revs);
+
+ trace2_region_leave("midx", "find_commits_for_midx_bitmap",
+ the_repository);
+
return cb.commits;
}
@@ -1116,6 +1139,8 @@ static int write_midx_bitmap(const char *midx_name,
char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name,
hash_to_hex(midx_hash));
+ trace2_region_enter("midx", "write_midx_bitmap", the_repository);
+
if (flags & MIDX_WRITE_BITMAP_HASH_CACHE)
options |= BITMAP_OPT_HASH_CACHE;
@@ -1161,6 +1186,9 @@ static int write_midx_bitmap(const char *midx_name,
cleanup:
free(index);
free(bitmap_name);
+
+ trace2_region_leave("midx", "write_midx_bitmap", the_repository);
+
return ret;
}
@@ -1207,6 +1235,8 @@ static int write_midx_internal(const char *object_dir,
int result = 0;
struct chunkfile *cf;
+ trace2_region_enter("midx", "write_midx_internal", the_repository);
+
get_midx_filename(&midx_name, object_dir);
if (safe_create_leading_directories(midx_name.buf))
die_errno(_("unable to create leading directories of %s"),
@@ -1480,7 +1510,7 @@ static int write_midx_internal(const char *object_dir,
}
write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
- write_chunkfile(cf, &ctx);
+ write_chunkfile(cf, 0, &ctx);
finalize_hashfile(f, midx_hash, FSYNC_COMPONENT_PACK_METADATA,
CSUM_FSYNC | CSUM_HASH_IN_STREAM);
@@ -1548,6 +1578,8 @@ cleanup:
free(ctx.pack_order);
strbuf_release(&midx_name);
+ trace2_region_leave("midx", "write_midx_internal", the_repository);
+
return result;
}
@@ -1839,7 +1871,7 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla
if (prepare_midx_pack(r, m, i))
continue;
- if (m->packs[i]->pack_keep)
+ if (m->packs[i]->pack_keep || m->packs[i]->is_cruft)
continue;
pack_name = xstrdup(m->packs[i]->pack_name);
@@ -1895,6 +1927,8 @@ static int fill_included_packs_all(struct repository *r,
continue;
if (!pack_kept_objects && m->packs[i]->pack_keep)
continue;
+ if (m->packs[i]->is_cruft)
+ continue;
include_pack[i] = 1;
count++;
@@ -1910,9 +1944,11 @@ static int fill_included_packs_batch(struct repository *r,
{
uint32_t i, packs_to_repack;
size_t total_size;
- struct repack_info *pack_info = xcalloc(m->num_packs, sizeof(struct repack_info));
+ struct repack_info *pack_info;
int pack_kept_objects = 0;
+ CALLOC_ARRAY(pack_info, m->num_packs);
+
repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
for (i = 0; i < m->num_packs; i++) {
@@ -1924,7 +1960,7 @@ static int fill_included_packs_batch(struct repository *r,
pack_info[i].mtime = m->packs[i]->mtime;
}
- for (i = 0; batch_size && i < m->num_objects; i++) {
+ for (i = 0; i < m->num_objects; i++) {
uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
pack_info[pack_int_id].referenced_objects++;
}
@@ -1942,6 +1978,8 @@ static int fill_included_packs_batch(struct repository *r,
continue;
if (!pack_kept_objects && p->pack_keep)
continue;
+ if (p->is_cruft)
+ continue;
if (open_pack_index(p) || !p->num_objects)
continue;
diff --git a/negotiator/skipping.c b/negotiator/skipping.c
index c4398f5ae1..0f5ac48e87 100644
--- a/negotiator/skipping.c
+++ b/negotiator/skipping.c
@@ -86,21 +86,26 @@ static int clear_marks(const char *refname, const struct object_id *oid,
/*
* Mark this SEEN commit and all its SEEN ancestors as COMMON.
*/
-static void mark_common(struct data *data, struct commit *c)
+static void mark_common(struct data *data, struct commit *seen_commit)
{
- struct commit_list *p;
+ struct prio_queue queue = { NULL };
+ struct commit *c;
- if (c->object.flags & COMMON)
- return;
- c->object.flags |= COMMON;
- if (!(c->object.flags & POPPED))
- data->non_common_revs--;
+ prio_queue_put(&queue, seen_commit);
+ while ((c = prio_queue_get(&queue))) {
+ struct commit_list *p;
+ if (c->object.flags & COMMON)
+ return;
+ c->object.flags |= COMMON;
+ if (!(c->object.flags & POPPED))
+ data->non_common_revs--;
- if (!c->object.parsed)
- return;
- for (p = c->parents; p; p = p->next) {
- if (p->item->object.flags & SEEN)
- mark_common(data, p->item);
+ if (!c->object.parsed)
+ return;
+ for (p = c->parents; p; p = p->next) {
+ if (p->item->object.flags & SEEN)
+ prio_queue_put(&queue, p->item);
+ }
}
}
diff --git a/object-file.c b/object-file.c
index 5b270f046d..c118e62a55 100644
--- a/object-file.c
+++ b/object-file.c
@@ -140,27 +140,32 @@ static void git_hash_sha256_final_oid(struct object_id *oid, git_hash_ctx *ctx)
oid->algo = GIT_HASH_SHA256;
}
-static void git_hash_unknown_init(git_hash_ctx *ctx)
+static void git_hash_unknown_init(git_hash_ctx *ctx UNUSED)
{
BUG("trying to init unknown hash");
}
-static void git_hash_unknown_clone(git_hash_ctx *dst, const git_hash_ctx *src)
+static void git_hash_unknown_clone(git_hash_ctx *dst UNUSED,
+ const git_hash_ctx *src UNUSED)
{
BUG("trying to clone unknown hash");
}
-static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
+static void git_hash_unknown_update(git_hash_ctx *ctx UNUSED,
+ const void *data UNUSED,
+ size_t len UNUSED)
{
BUG("trying to update unknown hash");
}
-static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
+static void git_hash_unknown_final(unsigned char *hash UNUSED,
+ git_hash_ctx *ctx UNUSED)
{
BUG("trying to finalize unknown hash");
}
-static void git_hash_unknown_final_oid(struct object_id *oid, git_hash_ctx *ctx)
+static void git_hash_unknown_final_oid(struct object_id *oid UNUSED,
+ git_hash_ctx *ctx UNUSED)
{
BUG("trying to finalize unknown hash");
}
@@ -1599,10 +1604,6 @@ static int do_oid_object_info_extended(struct repository *r,
if (fetch_if_missing && repo_has_promisor_remote(r) &&
!already_retried &&
!(flags & OBJECT_INFO_SKIP_FETCH_OBJECT)) {
- /*
- * TODO Investigate checking promisor_remote_get_direct()
- * TODO return value and stopping on error here.
- */
promisor_remote_get_direct(r, real, 1);
already_retried = 1;
continue;
@@ -2526,7 +2527,7 @@ int index_path(struct index_state *istate, struct object_id *oid,
strbuf_release(&sb);
break;
case S_IFDIR:
- return resolve_gitlink_ref(path, "HEAD", oid);
+ return resolve_gitlink_ref(path, "HEAD", oid, NULL);
default:
return error(_("%s: unsupported file type"), path);
}
diff --git a/object.c b/object.c
index 2e4589bae5..fad1a5af4a 100644
--- a/object.c
+++ b/object.c
@@ -233,7 +233,8 @@ struct object *parse_object_buffer(struct repository *r, const struct object_id
if (commit) {
if (parse_commit_buffer(r, commit, buffer, size, 1))
return NULL;
- if (!get_cached_commit_buffer(r, commit, NULL)) {
+ if (save_commit_buffer &&
+ !get_cached_commit_buffer(r, commit, NULL)) {
set_commit_buffer(r, commit, buffer, size);
*eaten_p = 1;
}
@@ -285,9 +286,8 @@ struct object *parse_object_with_flags(struct repository *r,
return &commit->object;
}
- if ((obj && obj->type == OBJ_BLOB && repo_has_object_file(r, oid)) ||
- (!obj && repo_has_object_file(r, oid) &&
- oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
+ if ((!obj || (obj && obj->type == OBJ_BLOB)) &&
+ oid_object_info(r, oid, NULL) == OBJ_BLOB) {
if (!skip_hash && stream_object_signature(r, repl) < 0) {
error(_("hash mismatch %s"), oid_to_hex(oid));
return NULL;
diff --git a/oss-fuzz/.gitignore b/oss-fuzz/.gitignore
new file mode 100644
index 0000000000..9acb74412e
--- /dev/null
+++ b/oss-fuzz/.gitignore
@@ -0,0 +1,3 @@
+fuzz-commit-graph
+fuzz-pack-headers
+fuzz-pack-idx
diff --git a/fuzz-commit-graph.c b/oss-fuzz/fuzz-commit-graph.c
index 914026f5d8..914026f5d8 100644
--- a/fuzz-commit-graph.c
+++ b/oss-fuzz/fuzz-commit-graph.c
diff --git a/fuzz-pack-headers.c b/oss-fuzz/fuzz-pack-headers.c
index 99da1d0fd3..99da1d0fd3 100644
--- a/fuzz-pack-headers.c
+++ b/oss-fuzz/fuzz-pack-headers.c
diff --git a/fuzz-pack-idx.c b/oss-fuzz/fuzz-pack-idx.c
index 0c3d777aac..0c3d777aac 100644
--- a/fuzz-pack-idx.c
+++ b/oss-fuzz/fuzz-pack-idx.c
diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c
index a213f5eddc..cfa67a510f 100644
--- a/pack-bitmap-write.c
+++ b/pack-bitmap-write.c
@@ -384,6 +384,8 @@ static int fill_bitmap_tree(struct bitmap *bitmap,
return 0;
}
+static int reused_bitmaps_nr;
+
static int fill_bitmap_commit(struct bb_commit *ent,
struct commit *commit,
struct prio_queue *queue,
@@ -409,8 +411,10 @@ static int fill_bitmap_commit(struct bb_commit *ent,
* bitmap and add its bits to this one. No need to walk
* parents or the tree for this commit.
*/
- if (old && !rebuild_bitmap(mapping, old, ent->bitmap))
+ if (old && !rebuild_bitmap(mapping, old, ent->bitmap)) {
+ reused_bitmaps_nr++;
continue;
+ }
}
/*
@@ -526,6 +530,8 @@ int bitmap_writer_build(struct packing_data *to_pack)
trace2_region_leave("pack-bitmap-write", "building_bitmaps_total",
the_repository);
+ trace2_data_intmax("pack-bitmap-write", the_repository,
+ "building_bitmaps_reused", reused_bitmaps_nr);
stop_progress(&writer.progress);
diff --git a/pack-bitmap.c b/pack-bitmap.c
index 440407f1be..aaa2d9a104 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -354,8 +354,8 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
if (bitmap_git->pack || bitmap_git->midx) {
struct strbuf buf = STRBUF_INIT;
get_midx_filename(&buf, midx->object_dir);
- /* ignore extra bitmap file; we can only handle one */
- warning(_("ignoring extra bitmap file: '%s'"), buf.buf);
+ trace2_data_string("bitmap", the_repository,
+ "ignoring extra midx bitmap file", buf.buf);
close(fd);
strbuf_release(&buf);
return -1;
@@ -411,9 +411,6 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
struct stat st;
char *bitmap_name;
- if (open_pack_index(packfile))
- return -1;
-
bitmap_name = pack_bitmap_filename(packfile);
fd = git_open(bitmap_name);
@@ -432,8 +429,8 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
}
if (bitmap_git->pack || bitmap_git->midx) {
- /* ignore extra bitmap file; we can only handle one */
- warning(_("ignoring extra bitmap file: '%s'"), packfile->pack_name);
+ trace2_data_string("bitmap", the_repository,
+ "ignoring extra bitmap file", packfile->pack_name);
close(fd);
return -1;
}
@@ -458,6 +455,8 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
return -1;
}
+ trace2_data_string("bitmap", the_repository, "opened bitmap file",
+ packfile->pack_name);
return 0;
}
diff --git a/patch-ids.c b/patch-ids.c
index 46c6a8f3ea..3153446626 100644
--- a/patch-ids.c
+++ b/patch-ids.c
@@ -11,7 +11,7 @@ static int patch_id_defined(struct commit *commit)
}
int commit_patch_id(struct commit *commit, struct diff_options *options,
- struct object_id *oid, int diff_header_only, int stable)
+ struct object_id *oid, int diff_header_only)
{
if (!patch_id_defined(commit))
return -1;
@@ -22,7 +22,7 @@ int commit_patch_id(struct commit *commit, struct diff_options *options,
else
diff_root_tree_oid(&commit->object.oid, "", options);
diffcore_std(options);
- return diff_flush_patch_id(options, oid, diff_header_only, stable);
+ return diff_flush_patch_id(options, oid, diff_header_only);
}
/*
@@ -48,11 +48,11 @@ static int patch_id_neq(const void *cmpfn_data,
b = container_of(entry_or_key, struct patch_id, ent);
if (is_null_oid(&a->patch_id) &&
- commit_patch_id(a->commit, opt, &a->patch_id, 0, 0))
+ commit_patch_id(a->commit, opt, &a->patch_id, 0))
return error("Could not get patch ID for %s",
oid_to_hex(&a->commit->object.oid));
if (is_null_oid(&b->patch_id) &&
- commit_patch_id(b->commit, opt, &b->patch_id, 0, 0))
+ commit_patch_id(b->commit, opt, &b->patch_id, 0))
return error("Could not get patch ID for %s",
oid_to_hex(&b->commit->object.oid));
return !oideq(&a->patch_id, &b->patch_id);
@@ -82,7 +82,7 @@ static int init_patch_id_entry(struct patch_id *patch,
struct object_id header_only_patch_id;
patch->commit = commit;
- if (commit_patch_id(commit, &ids->diffopts, &header_only_patch_id, 1, 0))
+ if (commit_patch_id(commit, &ids->diffopts, &header_only_patch_id, 1))
return -1;
hashmap_entry_init(&patch->ent, oidhash(&header_only_patch_id));
diff --git a/patch-ids.h b/patch-ids.h
index ab6c6a6804..490d739371 100644
--- a/patch-ids.h
+++ b/patch-ids.h
@@ -20,7 +20,7 @@ struct patch_ids {
};
int commit_patch_id(struct commit *commit, struct diff_options *options,
- struct object_id *oid, int, int);
+ struct object_id *oid, int);
int init_patch_ids(struct repository *, struct patch_ids *);
int free_patch_ids(struct patch_ids *);
diff --git a/path.c b/path.c
index a3cfcd8a6e..492e17ad12 100644
--- a/path.c
+++ b/path.c
@@ -901,7 +901,13 @@ int adjust_shared_perm(const char *path)
if (S_ISDIR(old_mode)) {
/* Copy read bits to execute bits */
new_mode |= (new_mode & 0444) >> 2;
- new_mode |= FORCE_DIR_SET_GID;
+
+ /*
+ * g+s matters only if any extra access is granted
+ * based on group membership.
+ */
+ if (FORCE_DIR_SET_GID && (new_mode & 060))
+ new_mode |= FORCE_DIR_SET_GID;
}
if (((old_mode ^ new_mode) & ~S_IFMT) &&
diff --git a/perl/Git.pm b/perl/Git.pm
index 080cdc2a21..117765dc73 100644
--- a/perl/Git.pm
+++ b/perl/Git.pm
@@ -177,16 +177,27 @@ sub repository {
-d $opts{Directory} or throw Error::Simple("Directory not found: $opts{Directory} $!");
my $search = Git->repository(WorkingCopy => $opts{Directory});
- my $dir;
+
+ # This rev-parse will throw an exception if we're not in a
+ # repository, which is what we want, but it's kind of noisy.
+ # Ideally we'd capture stderr and relay it, but doing so is
+ # awkward without depending on it fitting in a pipe buffer. So
+ # we just reproduce a plausible error message ourselves.
+ my $out;
try {
- $dir = $search->command_oneline(['rev-parse', '--git-dir'],
- STDERR => 0);
+ # Note that "--is-bare-repository" must come first, as
+ # --git-dir output could contain newlines.
+ $out = $search->command([qw(rev-parse --is-bare-repository --git-dir)],
+ STDERR => 0);
} catch Git::Error::Command with {
- $dir = undef;
+ throw Error::Simple("fatal: not a git repository: $opts{Directory}");
};
+ chomp $out;
+ my ($bare, $dir) = split /\n/, $out, 2;
+
require Cwd;
- if ($dir) {
+ if ($bare ne 'true') {
require File::Spec;
File::Spec->file_name_is_absolute($dir) or $dir = $opts{Directory} . '/' . $dir;
$opts{Repository} = Cwd::abs_path($dir);
@@ -204,21 +215,6 @@ sub repository {
$opts{WorkingSubdir} = $prefix;
} else {
- # A bare repository? Let's see...
- $dir = $opts{Directory};
-
- unless (-d "$dir/refs" and -d "$dir/objects" and -e "$dir/HEAD") {
- # Mimic git-rev-parse --git-dir error message:
- throw Error::Simple("fatal: Not a git repository: $dir");
- }
- my $search = Git->repository(Repository => $dir);
- try {
- $search->command('symbolic-ref', 'HEAD');
- } catch Git::Error::Command with {
- # Mimic git-rev-parse --git-dir error message:
- throw Error::Simple("fatal: Not a git repository: $dir");
- }
-
$opts{Repository} = Cwd::abs_path($dir);
}
diff --git a/promisor-remote.c b/promisor-remote.c
index 68f46f5ec7..faa7612941 100644
--- a/promisor-remote.c
+++ b/promisor-remote.c
@@ -4,6 +4,7 @@
#include "config.h"
#include "transport.h"
#include "strvec.h"
+#include "packfile.h"
struct promisor_remote_config {
struct promisor_remote *promisors;
@@ -230,18 +231,18 @@ static int remove_fetched_oids(struct repository *repo,
return remaining_nr;
}
-int promisor_remote_get_direct(struct repository *repo,
- const struct object_id *oids,
- int oid_nr)
+void promisor_remote_get_direct(struct repository *repo,
+ const struct object_id *oids,
+ int oid_nr)
{
struct promisor_remote *r;
struct object_id *remaining_oids = (struct object_id *)oids;
int remaining_nr = oid_nr;
int to_free = 0;
- int res = -1;
+ int i;
if (oid_nr == 0)
- return 0;
+ return;
promisor_remote_init(repo);
@@ -256,12 +257,16 @@ int promisor_remote_get_direct(struct repository *repo,
continue;
}
}
- res = 0;
- break;
+ goto all_fetched;
}
+ for (i = 0; i < remaining_nr; i++) {
+ if (is_promisor_object(&remaining_oids[i]))
+ die(_("could not fetch %s from promisor remote"),
+ oid_to_hex(&remaining_oids[i]));
+ }
+
+all_fetched:
if (to_free)
free(remaining_oids);
-
- return res;
}
diff --git a/promisor-remote.h b/promisor-remote.h
index edc45ab0f5..df36eb08ef 100644
--- a/promisor-remote.h
+++ b/promisor-remote.h
@@ -39,13 +39,12 @@ static inline int has_promisor_remote(void)
/*
* Fetches all requested objects from all promisor remotes, trying them one at
- * a time until all objects are fetched. Returns 0 upon success, and non-zero
- * otherwise.
+ * a time until all objects are fetched.
*
- * If oid_nr is 0, this function returns 0 (success) immediately.
+ * If oid_nr is 0, this function returns immediately.
*/
-int promisor_remote_get_direct(struct repository *repo,
- const struct object_id *oids,
- int oid_nr);
+void promisor_remote_get_direct(struct repository *repo,
+ const struct object_id *oids,
+ int oid_nr);
#endif /* PROMISOR_REMOTE_H */
diff --git a/read-cache.c b/read-cache.c
index b09128b188..57293825a8 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -285,7 +285,7 @@ static int ce_compare_gitlink(const struct cache_entry *ce)
*
* If so, we consider it always to match.
*/
- if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0)
+ if (resolve_gitlink_ref(ce->name, "HEAD", &oid, NULL) < 0)
return 0;
return !oideq(&oid, &ce->oid);
}
@@ -781,7 +781,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
namelen = strlen(path);
if (S_ISDIR(st_mode)) {
- if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ if (resolve_gitlink_ref(path, "HEAD", &oid, NULL) < 0)
return error(_("'%s' does not have a commit checked out"), path);
while (namelen && path[namelen-1] == '/')
namelen--;
@@ -1817,6 +1817,8 @@ static int verify_hdr(const struct cache_header *hdr, unsigned long size)
git_hash_ctx c;
unsigned char hash[GIT_MAX_RAWSZ];
int hdr_version;
+ int all_zeroes = 1;
+ unsigned char *start, *end;
if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
return error(_("bad signature 0x%08x"), hdr->hdr_signature);
@@ -1827,10 +1829,23 @@ static int verify_hdr(const struct cache_header *hdr, unsigned long size)
if (!verify_index_checksum)
return 0;
+ end = (unsigned char *)hdr + size;
+ start = end - the_hash_algo->rawsz;
+ while (start < end) {
+ if (*start != 0) {
+ all_zeroes = 0;
+ break;
+ }
+ start++;
+ }
+
+ if (all_zeroes)
+ return 0;
+
the_hash_algo->init_fn(&c);
the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
the_hash_algo->final_fn(hash, &c);
- if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
+ if (!hasheq(hash, end - the_hash_algo->rawsz))
return error(_("bad index file sha1 signature"));
return 0;
}
@@ -1873,9 +1888,20 @@ static int read_index_extension(struct index_state *istate,
return 0;
}
+/*
+ * Parses the contents of the cache entry contained within the 'ondisk' buffer
+ * into a new incore 'cache_entry'.
+ *
+ * Note that 'char *ondisk' may not be aligned to a 4-byte address interval in
+ * index v4, so we cannot cast it to 'struct ondisk_cache_entry *' and access
+ * its members. Instead, we use the byte offsets of members within the struct to
+ * identify where 'get_be16()', 'get_be32()', and 'oidread()' (which can all
+ * read from an unaligned memory buffer) should read from the 'ondisk' buffer
+ * into the corresponding incore 'cache_entry' members.
+ */
static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
unsigned int version,
- struct ondisk_cache_entry *ondisk,
+ const char *ondisk,
unsigned long *ent_size,
const struct cache_entry *previous_ce)
{
@@ -1883,7 +1909,7 @@ static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
size_t len;
const char *name;
const unsigned hashsz = the_hash_algo->rawsz;
- const uint16_t *flagsp = (const uint16_t *)(ondisk->data + hashsz);
+ const char *flagsp = ondisk + offsetof(struct ondisk_cache_entry, data) + hashsz;
unsigned int flags;
size_t copy_len = 0;
/*
@@ -1901,15 +1927,15 @@ static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
if (flags & CE_EXTENDED) {
int extended_flags;
- extended_flags = get_be16(flagsp + 1) << 16;
+ extended_flags = get_be16(flagsp + sizeof(uint16_t)) << 16;
/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
if (extended_flags & ~CE_EXTENDED_FLAGS)
die(_("unknown index entry format 0x%08x"), extended_flags);
flags |= extended_flags;
- name = (const char *)(flagsp + 2);
+ name = (const char *)(flagsp + 2 * sizeof(uint16_t));
}
else
- name = (const char *)(flagsp + 1);
+ name = (const char *)(flagsp + sizeof(uint16_t));
if (expand_name_field) {
const unsigned char *cp = (const unsigned char *)name;
@@ -1935,20 +1961,32 @@ static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
ce = mem_pool__ce_alloc(ce_mem_pool, len);
- ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
- ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
- ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec);
- ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec);
- ce->ce_stat_data.sd_dev = get_be32(&ondisk->dev);
- ce->ce_stat_data.sd_ino = get_be32(&ondisk->ino);
- ce->ce_mode = get_be32(&ondisk->mode);
- ce->ce_stat_data.sd_uid = get_be32(&ondisk->uid);
- ce->ce_stat_data.sd_gid = get_be32(&ondisk->gid);
- ce->ce_stat_data.sd_size = get_be32(&ondisk->size);
+ /*
+ * NEEDSWORK: using 'offsetof()' is cumbersome and should be replaced
+ * with something more akin to 'load_bitmap_entries_v1()'s use of
+ * 'read_be16'/'read_be32'. For consistency with the corresponding
+ * ondisk entry write function ('copy_cache_entry_to_ondisk()'), this
+ * should be done at the same time as removing references to
+ * 'ondisk_cache_entry' there.
+ */
+ ce->ce_stat_data.sd_ctime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime)
+ + offsetof(struct cache_time, sec));
+ ce->ce_stat_data.sd_mtime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime)
+ + offsetof(struct cache_time, sec));
+ ce->ce_stat_data.sd_ctime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime)
+ + offsetof(struct cache_time, nsec));
+ ce->ce_stat_data.sd_mtime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime)
+ + offsetof(struct cache_time, nsec));
+ ce->ce_stat_data.sd_dev = get_be32(ondisk + offsetof(struct ondisk_cache_entry, dev));
+ ce->ce_stat_data.sd_ino = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ino));
+ ce->ce_mode = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mode));
+ ce->ce_stat_data.sd_uid = get_be32(ondisk + offsetof(struct ondisk_cache_entry, uid));
+ ce->ce_stat_data.sd_gid = get_be32(ondisk + offsetof(struct ondisk_cache_entry, gid));
+ ce->ce_stat_data.sd_size = get_be32(ondisk + offsetof(struct ondisk_cache_entry, size));
ce->ce_flags = flags & ~CE_NAMEMASK;
ce->ce_namelen = len;
ce->index = 0;
- oidread(&ce->oid, ondisk->data);
+ oidread(&ce->oid, (const unsigned char *)ondisk + offsetof(struct ondisk_cache_entry, data));
if (expand_name_field) {
if (copy_len)
@@ -2117,12 +2155,12 @@ static unsigned long load_cache_entry_block(struct index_state *istate,
unsigned long src_offset = start_offset;
for (i = offset; i < offset + nr; i++) {
- struct ondisk_cache_entry *disk_ce;
struct cache_entry *ce;
unsigned long consumed;
- disk_ce = (struct ondisk_cache_entry *)(mmap + src_offset);
- ce = create_from_disk(ce_mem_pool, istate->version, disk_ce, &consumed, previous_ce);
+ ce = create_from_disk(ce_mem_pool, istate->version,
+ mmap + src_offset,
+ &consumed, previous_ce);
set_index_entry(istate, i, ce);
src_offset += consumed;
@@ -2535,6 +2573,11 @@ int discard_index(struct index_state *istate)
free_untracked_cache(istate->untracked);
istate->untracked = NULL;
+ if (istate->sparse_checkout_patterns) {
+ clear_pattern_list(istate->sparse_checkout_patterns);
+ FREE_AND_NULL(istate->sparse_checkout_patterns);
+ }
+
if (istate->ce_mem_pool) {
mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
FREE_AND_NULL(istate->ce_mem_pool);
@@ -2894,9 +2937,14 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
int ieot_entries = 1;
struct index_entry_offset_table *ieot = NULL;
int nr, nr_threads;
+ int compute_hash;
f = hashfd(tempfile->fd, tempfile->filename.buf);
+ if (!git_config_get_maybe_bool("index.computehash", &compute_hash) &&
+ !compute_hash)
+ f->skip_hash = 1;
+
for (i = removed = extended = 0; i < entries; i++) {
if (cache[i]->ce_flags & CE_REMOVE)
removed++;
diff --git a/ref-filter.c b/ref-filter.c
index fd1cb14b0f..caf10ab23e 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -1358,6 +1358,7 @@ static void find_subpos(const char *buf,
/* parse signature first; we might not even have a subject line */
parse_signature(buf, end - buf, &payload, &signature);
+ strbuf_release(&payload);
/* skip past header until we hit empty line */
while (*buf && *buf != '\n') {
@@ -1375,12 +1376,12 @@ static void find_subpos(const char *buf,
/* subject is first non-empty line */
*sub = buf;
/* subject goes to first empty line before signature begins */
- if ((eol = strstr(*sub, "\n\n"))) {
+ if ((eol = strstr(*sub, "\n\n")) ||
+ (eol = strstr(*sub, "\r\n\r\n"))) {
eol = eol < sigstart ? eol : sigstart;
- /* check if message uses CRLF */
- } else if (! (eol = strstr(*sub, "\r\n\r\n"))) {
+ } else {
/* treat whole message as subject */
- eol = strrchr(*sub, '\0');
+ eol = sigstart;
}
buf = eol;
*sublen = buf - *sub;
@@ -1722,6 +1723,8 @@ char *get_head_description(void)
} else
strbuf_addstr(&desc, _("(no branch)"));
+ wt_status_state_free_buffers(&state);
+
return strbuf_detach(&desc, NULL);
}
diff --git a/reflog-walk.c b/reflog-walk.c
index 7aa6595a51..8a4d8fa3bd 100644
--- a/reflog-walk.c
+++ b/reflog-walk.c
@@ -55,7 +55,7 @@ static void free_complete_reflog(struct complete_reflogs *array)
free(array);
}
-static void complete_reflogs_clear(void *util, const char *str)
+static void complete_reflogs_clear(void *util, const char *str UNUSED)
{
struct complete_reflogs *array = util;
free_complete_reflog(array);
diff --git a/reflog.c b/reflog.c
index d258fd3199..78e9350e20 100644
--- a/reflog.c
+++ b/reflog.c
@@ -312,16 +312,9 @@ static int push_tip_to_list(const char *refname UNUSED,
static int is_head(const char *refname)
{
- switch (ref_type(refname)) {
- case REF_TYPE_OTHER_PSEUDOREF:
- case REF_TYPE_MAIN_PSEUDOREF:
- if (parse_worktree_ref(refname, NULL, NULL, &refname))
- BUG("not a worktree ref: %s", refname);
- break;
- default:
- break;
- }
- return !strcmp(refname, "HEAD");
+ const char *stripped_refname;
+ parse_worktree_ref(refname, NULL, NULL, &stripped_refname);
+ return !strcmp(stripped_refname, "HEAD");
}
void reflog_expiry_prepare(const char *refname,
diff --git a/refs.c b/refs.c
index c89d558892..23ddc95909 100644
--- a/refs.c
+++ b/refs.c
@@ -811,7 +811,7 @@ int dwim_log(const char *str, int len, struct object_id *oid, char **log)
return repo_dwim_log(the_repository, str, len, oid, log);
}
-static int is_per_worktree_ref(const char *refname)
+int is_per_worktree_ref(const char *refname)
{
return starts_with(refname, "refs/worktree/") ||
starts_with(refname, "refs/bisect/") ||
@@ -827,37 +827,63 @@ static int is_pseudoref_syntax(const char *refname)
return 0;
}
+ /*
+ * HEAD is not a pseudoref, but it certainly uses the
+ * pseudoref syntax.
+ */
return 1;
}
-static int is_main_pseudoref_syntax(const char *refname)
-{
- return skip_prefix(refname, "main-worktree/", &refname) &&
- *refname &&
- is_pseudoref_syntax(refname);
+static int is_current_worktree_ref(const char *ref) {
+ return is_pseudoref_syntax(ref) || is_per_worktree_ref(ref);
}
-static int is_other_pseudoref_syntax(const char *refname)
+enum ref_worktree_type parse_worktree_ref(const char *maybe_worktree_ref,
+ const char **worktree_name, int *worktree_name_length,
+ const char **bare_refname)
{
- if (!skip_prefix(refname, "worktrees/", &refname))
- return 0;
- refname = strchr(refname, '/');
- if (!refname || !refname[1])
- return 0;
- return is_pseudoref_syntax(refname + 1);
-}
+ const char *name_dummy;
+ int name_length_dummy;
+ const char *ref_dummy;
-enum ref_type ref_type(const char *refname)
-{
- if (is_per_worktree_ref(refname))
- return REF_TYPE_PER_WORKTREE;
- if (is_pseudoref_syntax(refname))
- return REF_TYPE_PSEUDOREF;
- if (is_main_pseudoref_syntax(refname))
- return REF_TYPE_MAIN_PSEUDOREF;
- if (is_other_pseudoref_syntax(refname))
- return REF_TYPE_OTHER_PSEUDOREF;
- return REF_TYPE_NORMAL;
+ if (!worktree_name)
+ worktree_name = &name_dummy;
+ if (!worktree_name_length)
+ worktree_name_length = &name_length_dummy;
+ if (!bare_refname)
+ bare_refname = &ref_dummy;
+
+ if (skip_prefix(maybe_worktree_ref, "worktrees/", bare_refname)) {
+ const char *slash = strchr(*bare_refname, '/');
+
+ *worktree_name = *bare_refname;
+ if (!slash) {
+ *worktree_name_length = strlen(*worktree_name);
+
+ /* This is an error condition, and the caller tell because the bare_refname is "" */
+ *bare_refname = *worktree_name + *worktree_name_length;
+ return REF_WORKTREE_OTHER;
+ }
+
+ *worktree_name_length = slash - *bare_refname;
+ *bare_refname = slash + 1;
+
+ if (is_current_worktree_ref(*bare_refname))
+ return REF_WORKTREE_OTHER;
+ }
+
+ *worktree_name = NULL;
+ *worktree_name_length = 0;
+
+ if (skip_prefix(maybe_worktree_ref, "main-worktree/", bare_refname)
+ && is_current_worktree_ref(*bare_refname))
+ return REF_WORKTREE_MAIN;
+
+ *bare_refname = maybe_worktree_ref;
+ if (is_current_worktree_ref(maybe_worktree_ref))
+ return REF_WORKTREE_CURRENT;
+
+ return REF_WORKTREE_SHARED;
}
long get_files_ref_lock_timeout_ms(void)
@@ -1388,9 +1414,8 @@ char *shorten_unambiguous_ref(const char *refname, int strict)
refname, strict);
}
-static struct string_list *hide_refs;
-
-int parse_hide_refs_config(const char *var, const char *value, const char *section)
+int parse_hide_refs_config(const char *var, const char *value, const char *section,
+ struct string_list *hide_refs)
{
const char *key;
if (!strcmp("transfer.hiderefs", var) ||
@@ -1405,21 +1430,16 @@ int parse_hide_refs_config(const char *var, const char *value, const char *secti
len = strlen(ref);
while (len && ref[len - 1] == '/')
ref[--len] = '\0';
- if (!hide_refs) {
- CALLOC_ARRAY(hide_refs, 1);
- hide_refs->strdup_strings = 1;
- }
- string_list_append(hide_refs, ref);
+ string_list_append_nodup(hide_refs, ref);
}
return 0;
}
-int ref_is_hidden(const char *refname, const char *refname_full)
+int ref_is_hidden(const char *refname, const char *refname_full,
+ const struct string_list *hide_refs)
{
int i;
- if (!hide_refs)
- return 0;
for (i = hide_refs->nr - 1; i >= 0; i--) {
const char *match = hide_refs->items[i].string;
const char *subject;
@@ -1878,19 +1898,21 @@ const char *resolve_ref_unsafe(const char *refname, int resolve_flags,
}
int resolve_gitlink_ref(const char *submodule, const char *refname,
- struct object_id *oid)
+ struct object_id *oid, const char **target_out)
{
struct ref_store *refs;
int flags;
+ const char *target;
refs = get_submodule_ref_store(submodule);
if (!refs)
return -1;
-
- if (!refs_resolve_ref_unsafe(refs, refname, 0, oid, &flags) ||
- is_null_oid(oid))
+ target = refs_resolve_ref_unsafe(refs, refname, 0, oid, &flags);
+ if (!target || is_null_oid(oid))
return -1;
+ if (target_out)
+ *target_out = target;
return 0;
}
@@ -1956,6 +1978,17 @@ static struct ref_store *lookup_ref_store_map(struct hashmap *map,
return entry ? entry->refs : NULL;
}
+static int add_ref_format_flags(enum ref_format_flags flags, int caps) {
+ if (flags & REF_FORMAT_FILES)
+ caps |= REF_STORE_FORMAT_FILES;
+ if (flags & REF_FORMAT_PACKED)
+ caps |= REF_STORE_FORMAT_PACKED;
+ if (flags & REF_FORMAT_PACKED_V2)
+ caps |= REF_STORE_FORMAT_PACKED_V2;
+
+ return caps;
+}
+
/*
* Create, record, and return a ref_store instance for the specified
* gitdir.
@@ -1965,9 +1998,17 @@ static struct ref_store *ref_store_init(struct repository *repo,
unsigned int flags)
{
const char *be_name = "files";
- struct ref_storage_be *be = find_ref_storage_backend(be_name);
+ struct ref_storage_be *be;
struct ref_store *refs;
+ flags = add_ref_format_flags(repo->ref_format, flags);
+
+ if (!(flags & REF_STORE_FORMAT_FILES) &&
+ packed_refs_enabled(flags))
+ be_name = "packed";
+
+ be = find_ref_storage_backend(be_name);
+
if (!be)
BUG("reference backend %s is unknown", be_name);
@@ -1983,7 +2024,8 @@ struct ref_store *get_main_ref_store(struct repository *r)
if (!r->gitdir)
BUG("attempting to get main_ref_store outside of repository");
- r->refs_private = ref_store_init(r, r->gitdir, REF_STORE_ALL_CAPS);
+ r->refs_private = ref_store_init(r, r->gitdir,
+ REF_STORE_ALL_CAPS);
r->refs_private = maybe_debug_wrap_ref_store(r->gitdir, r->refs_private);
return r->refs_private;
}
diff --git a/refs.h b/refs.h
index d6575b8c2b..ee8675d4aa 100644
--- a/refs.h
+++ b/refs.h
@@ -137,9 +137,12 @@ int peel_iterated_oid(const struct object_id *base, struct object_id *peeled);
* submodule (which must be non-NULL). If the resolution is
* successful, return 0 and set oid to the name of the object;
* otherwise, return a non-zero value.
+ *
+ * FIXME: Return "target" just like refs_resolve_ref_unsafe(). This will be
+ * safe to do once we merge resolve_gitlink_ref() into master.
*/
int resolve_gitlink_ref(const char *submodule, const char *refname,
- struct object_id *oid);
+ struct object_id *oid, const char **target);
/*
* Return true iff abbrev_name is a possible abbreviation for
@@ -808,7 +811,8 @@ int update_ref(const char *msg, const char *refname,
const struct object_id *new_oid, const struct object_id *old_oid,
unsigned int flags, enum action_on_err onerr);
-int parse_hide_refs_config(const char *var, const char *value, const char *);
+int parse_hide_refs_config(const char *var, const char *value, const char *,
+ struct string_list *);
/*
* Check whether a ref is hidden. If no namespace is set, both the first and
@@ -818,17 +822,36 @@ int parse_hide_refs_config(const char *var, const char *value, const char *);
* the ref is outside that namespace, the first parameter is NULL. The second
* parameter always points to the full ref name.
*/
-int ref_is_hidden(const char *, const char *);
+int ref_is_hidden(const char *, const char *, const struct string_list *);
+
+/* Is this a per-worktree ref living in the refs/ namespace? */
+int is_per_worktree_ref(const char *refname);
-enum ref_type {
- REF_TYPE_PER_WORKTREE, /* refs inside refs/ but not shared */
- REF_TYPE_PSEUDOREF, /* refs outside refs/ in current worktree */
- REF_TYPE_MAIN_PSEUDOREF, /* pseudo refs from the main worktree */
- REF_TYPE_OTHER_PSEUDOREF, /* pseudo refs from other worktrees */
- REF_TYPE_NORMAL, /* normal/shared refs inside refs/ */
+/* Describes how a refname relates to worktrees */
+enum ref_worktree_type {
+ REF_WORKTREE_CURRENT, /* implicitly per worktree, eg. HEAD or
+ refs/bisect/SOMETHING */
+ REF_WORKTREE_MAIN, /* explicitly in main worktree, eg.
+ main-worktree/HEAD */
+ REF_WORKTREE_OTHER, /* explicitly in named worktree, eg.
+ worktrees/bla/HEAD */
+ REF_WORKTREE_SHARED, /* the default, eg. refs/heads/main */
};
-enum ref_type ref_type(const char *refname);
+/*
+ * Parse a `maybe_worktree_ref` as a ref that possibly refers to a worktree ref
+ * (ie. either REFNAME, main-worktree/REFNAME or worktree/WORKTREE/REFNAME). It
+ * returns what kind of ref was found, and in case of REF_WORKTREE_OTHER, the
+ * worktree name is returned in `worktree_name` (pointing into
+ * `maybe_worktree_ref`) and `worktree_name_length`. The bare refname (the
+ * refname stripped of prefixes) is returned in `bare_refname`. The
+ * `worktree_name`, `worktree_name_length` and `bare_refname` arguments may be
+ * NULL.
+ */
+enum ref_worktree_type parse_worktree_ref(const char *maybe_worktree_ref,
+ const char **worktree_name,
+ int *worktree_name_length,
+ const char **bare_refname);
enum expire_reflog_flags {
EXPIRE_REFLOGS_DRY_RUN = 1 << 0,
diff --git a/refs/files-backend.c b/refs/files-backend.c
index e4009b3c42..4a18aed620 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -138,44 +138,30 @@ static struct files_ref_store *files_downcast(struct ref_store *ref_store,
return refs;
}
-static void files_reflog_path_other_worktrees(struct files_ref_store *refs,
- struct strbuf *sb,
- const char *refname)
-{
- const char *real_ref;
- const char *worktree_name;
- int length;
-
- if (parse_worktree_ref(refname, &worktree_name, &length, &real_ref))
- BUG("refname %s is not a other-worktree ref", refname);
-
- if (worktree_name)
- strbuf_addf(sb, "%s/worktrees/%.*s/logs/%s", refs->gitcommondir,
- length, worktree_name, real_ref);
- else
- strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir,
- real_ref);
-}
-
static void files_reflog_path(struct files_ref_store *refs,
struct strbuf *sb,
const char *refname)
{
- switch (ref_type(refname)) {
- case REF_TYPE_PER_WORKTREE:
- case REF_TYPE_PSEUDOREF:
+ const char *bare_refname;
+ const char *wtname;
+ int wtname_len;
+ enum ref_worktree_type wt_type = parse_worktree_ref(
+ refname, &wtname, &wtname_len, &bare_refname);
+
+ switch (wt_type) {
+ case REF_WORKTREE_CURRENT:
strbuf_addf(sb, "%s/logs/%s", refs->base.gitdir, refname);
break;
- case REF_TYPE_OTHER_PSEUDOREF:
- case REF_TYPE_MAIN_PSEUDOREF:
- files_reflog_path_other_worktrees(refs, sb, refname);
+ case REF_WORKTREE_SHARED:
+ case REF_WORKTREE_MAIN:
+ strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, bare_refname);
break;
- case REF_TYPE_NORMAL:
- strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, refname);
+ case REF_WORKTREE_OTHER:
+ strbuf_addf(sb, "%s/worktrees/%.*s/logs/%s", refs->gitcommondir,
+ wtname_len, wtname, bare_refname);
break;
default:
- BUG("unknown ref type %d of ref %s",
- ref_type(refname), refname);
+ BUG("unknown ref type %d of ref %s", wt_type, refname);
}
}
@@ -183,22 +169,25 @@ static void files_ref_path(struct files_ref_store *refs,
struct strbuf *sb,
const char *refname)
{
- switch (ref_type(refname)) {
- case REF_TYPE_PER_WORKTREE:
- case REF_TYPE_PSEUDOREF:
+ const char *bare_refname;
+ const char *wtname;
+ int wtname_len;
+ enum ref_worktree_type wt_type = parse_worktree_ref(
+ refname, &wtname, &wtname_len, &bare_refname);
+ switch (wt_type) {
+ case REF_WORKTREE_CURRENT:
strbuf_addf(sb, "%s/%s", refs->base.gitdir, refname);
break;
- case REF_TYPE_MAIN_PSEUDOREF:
- if (!skip_prefix(refname, "main-worktree/", &refname))
- BUG("ref %s is not a main pseudoref", refname);
- /* fallthrough */
- case REF_TYPE_OTHER_PSEUDOREF:
- case REF_TYPE_NORMAL:
- strbuf_addf(sb, "%s/%s", refs->gitcommondir, refname);
+ case REF_WORKTREE_OTHER:
+ strbuf_addf(sb, "%s/worktrees/%.*s/%s", refs->gitcommondir,
+ wtname_len, wtname, bare_refname);
+ break;
+ case REF_WORKTREE_SHARED:
+ case REF_WORKTREE_MAIN:
+ strbuf_addf(sb, "%s/%s", refs->gitcommondir, bare_refname);
break;
default:
- BUG("unknown ref type %d of ref %s",
- ref_type(refname), refname);
+ BUG("unknown ref type %d of ref %s", wt_type, refname);
}
}
@@ -771,7 +760,8 @@ static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
- ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
+ parse_worktree_ref(iter->iter0->refname, NULL, NULL,
+ NULL) != REF_WORKTREE_CURRENT)
continue;
if ((iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS) &&
@@ -1178,7 +1168,8 @@ static int should_pack_ref(const char *refname,
unsigned int pack_flags)
{
/* Do not pack per-worktree refs: */
- if (ref_type(refname) != REF_TYPE_NORMAL)
+ if (parse_worktree_ref(refname, NULL, NULL, NULL) !=
+ REF_WORKTREE_SHARED)
return 0;
/* Do not pack non-tags unless PACK_REFS_ALL is set: */
@@ -1207,6 +1198,12 @@ static int files_pack_refs(struct ref_store *ref_store, unsigned int flags)
struct strbuf err = STRBUF_INIT;
struct ref_transaction *transaction;
+ if (!packed_refs_enabled(refs->store_flags)) {
+ warning(_("refusing to create '%s' file because '%s' is not set"),
+ "packed-refs", "extensions.refFormat=packed");
+ return -1;
+ }
+
transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
if (!transaction)
return -1;
@@ -2267,7 +2264,8 @@ static enum iterator_selection reflog_iterator_select(
*/
return ITER_SELECT_0;
} else if (iter_common) {
- if (ref_type(iter_common->refname) == REF_TYPE_NORMAL)
+ if (parse_worktree_ref(iter_common->refname, NULL, NULL,
+ NULL) == REF_WORKTREE_SHARED)
return ITER_SELECT_1;
/*
@@ -3282,7 +3280,7 @@ static int files_init_db(struct ref_store *ref_store, struct strbuf *err UNUSED)
}
struct ref_storage_be refs_be_files = {
- .next = NULL,
+ .next = &refs_be_packed,
.name = "files",
.init = files_ref_store_create,
.init_db = files_init_db,
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index 43cdb97f8b..e84f669c42 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -36,121 +36,6 @@ static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
static enum mmap_strategy mmap_strategy = MMAP_OK;
#endif
-struct packed_ref_store;
-
-/*
- * A `snapshot` represents one snapshot of a `packed-refs` file.
- *
- * Normally, this will be a mmapped view of the contents of the
- * `packed-refs` file at the time the snapshot was created. However,
- * if the `packed-refs` file was not sorted, this might point at heap
- * memory holding the contents of the `packed-refs` file with its
- * records sorted by refname.
- *
- * `snapshot` instances are reference counted (via
- * `acquire_snapshot()` and `release_snapshot()`). This is to prevent
- * an instance from disappearing while an iterator is still iterating
- * over it. Instances are garbage collected when their `referrers`
- * count goes to zero.
- *
- * The most recent `snapshot`, if available, is referenced by the
- * `packed_ref_store`. Its freshness is checked whenever
- * `get_snapshot()` is called; if the existing snapshot is obsolete, a
- * new snapshot is taken.
- */
-struct snapshot {
- /*
- * A back-pointer to the packed_ref_store with which this
- * snapshot is associated:
- */
- struct packed_ref_store *refs;
-
- /* Is the `packed-refs` file currently mmapped? */
- int mmapped;
-
- /*
- * The contents of the `packed-refs` file:
- *
- * - buf -- a pointer to the start of the memory
- * - start -- a pointer to the first byte of actual references
- * (i.e., after the header line, if one is present)
- * - eof -- a pointer just past the end of the reference
- * contents
- *
- * If the `packed-refs` file was already sorted, `buf` points
- * at the mmapped contents of the file. If not, it points at
- * heap-allocated memory containing the contents, sorted. If
- * there were no contents (e.g., because the file didn't
- * exist), `buf`, `start`, and `eof` are all NULL.
- */
- char *buf, *start, *eof;
-
- /*
- * What is the peeled state of the `packed-refs` file that
- * this snapshot represents? (This is usually determined from
- * the file's header.)
- */
- enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
-
- /*
- * Count of references to this instance, including the pointer
- * from `packed_ref_store::snapshot`, if any. The instance
- * will not be freed as long as the reference count is
- * nonzero.
- */
- unsigned int referrers;
-
- /*
- * The metadata of the `packed-refs` file from which this
- * snapshot was created, used to tell if the file has been
- * replaced since we read it.
- */
- struct stat_validity validity;
-};
-
-/*
- * A `ref_store` representing references stored in a `packed-refs`
- * file. It implements the `ref_store` interface, though it has some
- * limitations:
- *
- * - It cannot store symbolic references.
- *
- * - It cannot store reflogs.
- *
- * - It does not support reference renaming (though it could).
- *
- * On the other hand, it can be locked outside of a reference
- * transaction. In that case, it remains locked even after the
- * transaction is done and the new `packed-refs` file is activated.
- */
-struct packed_ref_store {
- struct ref_store base;
-
- unsigned int store_flags;
-
- /* The path of the "packed-refs" file: */
- char *path;
-
- /*
- * A snapshot of the values read from the `packed-refs` file,
- * if it might still be current; otherwise, NULL.
- */
- struct snapshot *snapshot;
-
- /*
- * Lock used for the "packed-refs" file. Note that this (and
- * thus the enclosing `packed_ref_store`) must not be freed.
- */
- struct lock_file lock;
-
- /*
- * Temporary file used when rewriting new contents to the
- * "packed-refs" file. Note that this (and thus the enclosing
- * `packed_ref_store`) must not be freed.
- */
- struct tempfile *tempfile;
-};
-
/*
* Increment the reference count of `*snapshot`.
*/
@@ -164,7 +49,7 @@ static void acquire_snapshot(struct snapshot *snapshot)
* memory and close the file, or free the memory. Then set the buffer
* pointers to NULL.
*/
-static void clear_snapshot_buffer(struct snapshot *snapshot)
+void clear_snapshot_buffer(struct snapshot *snapshot)
{
if (snapshot->mmapped) {
if (munmap(snapshot->buf, snapshot->eof - snapshot->buf))
@@ -181,7 +66,7 @@ static void clear_snapshot_buffer(struct snapshot *snapshot)
* Decrease the reference count of `*snapshot`. If it goes to zero,
* free `*snapshot` and return true; otherwise return false.
*/
-static int release_snapshot(struct snapshot *snapshot)
+int release_snapshot(struct snapshot *snapshot)
{
if (!--snapshot->referrers) {
stat_validity_clear(&snapshot->validity);
@@ -245,224 +130,6 @@ static void clear_snapshot(struct packed_ref_store *refs)
}
}
-static NORETURN void die_unterminated_line(const char *path,
- const char *p, size_t len)
-{
- if (len < 80)
- die("unterminated line in %s: %.*s", path, (int)len, p);
- else
- die("unterminated line in %s: %.75s...", path, p);
-}
-
-static NORETURN void die_invalid_line(const char *path,
- const char *p, size_t len)
-{
- const char *eol = memchr(p, '\n', len);
-
- if (!eol)
- die_unterminated_line(path, p, len);
- else if (eol - p < 80)
- die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
- else
- die("unexpected line in %s: %.75s...", path, p);
-
-}
-
-struct snapshot_record {
- const char *start;
- size_t len;
-};
-
-static int cmp_packed_ref_records(const void *v1, const void *v2)
-{
- const struct snapshot_record *e1 = v1, *e2 = v2;
- const char *r1 = e1->start + the_hash_algo->hexsz + 1;
- const char *r2 = e2->start + the_hash_algo->hexsz + 1;
-
- while (1) {
- if (*r1 == '\n')
- return *r2 == '\n' ? 0 : -1;
- if (*r1 != *r2) {
- if (*r2 == '\n')
- return 1;
- else
- return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
- }
- r1++;
- r2++;
- }
-}
-
-/*
- * Compare a snapshot record at `rec` to the specified NUL-terminated
- * refname.
- */
-static int cmp_record_to_refname(const char *rec, const char *refname)
-{
- const char *r1 = rec + the_hash_algo->hexsz + 1;
- const char *r2 = refname;
-
- while (1) {
- if (*r1 == '\n')
- return *r2 ? -1 : 0;
- if (!*r2)
- return 1;
- if (*r1 != *r2)
- return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
- r1++;
- r2++;
- }
-}
-
-/*
- * `snapshot->buf` is not known to be sorted. Check whether it is, and
- * if not, sort it into new memory and munmap/free the old storage.
- */
-static void sort_snapshot(struct snapshot *snapshot)
-{
- struct snapshot_record *records = NULL;
- size_t alloc = 0, nr = 0;
- int sorted = 1;
- const char *pos, *eof, *eol;
- size_t len, i;
- char *new_buffer, *dst;
-
- pos = snapshot->start;
- eof = snapshot->eof;
-
- if (pos == eof)
- return;
-
- len = eof - pos;
-
- /*
- * Initialize records based on a crude estimate of the number
- * of references in the file (we'll grow it below if needed):
- */
- ALLOC_GROW(records, len / 80 + 20, alloc);
-
- while (pos < eof) {
- eol = memchr(pos, '\n', eof - pos);
- if (!eol)
- /* The safety check should prevent this. */
- BUG("unterminated line found in packed-refs");
- if (eol - pos < the_hash_algo->hexsz + 2)
- die_invalid_line(snapshot->refs->path,
- pos, eof - pos);
- eol++;
- if (eol < eof && *eol == '^') {
- /*
- * Keep any peeled line together with its
- * reference:
- */
- const char *peeled_start = eol;
-
- eol = memchr(peeled_start, '\n', eof - peeled_start);
- if (!eol)
- /* The safety check should prevent this. */
- BUG("unterminated peeled line found in packed-refs");
- eol++;
- }
-
- ALLOC_GROW(records, nr + 1, alloc);
- records[nr].start = pos;
- records[nr].len = eol - pos;
- nr++;
-
- if (sorted &&
- nr > 1 &&
- cmp_packed_ref_records(&records[nr - 2],
- &records[nr - 1]) >= 0)
- sorted = 0;
-
- pos = eol;
- }
-
- if (sorted)
- goto cleanup;
-
- /* We need to sort the memory. First we sort the records array: */
- QSORT(records, nr, cmp_packed_ref_records);
-
- /*
- * Allocate a new chunk of memory, and copy the old memory to
- * the new in the order indicated by `records` (not bothering
- * with the header line):
- */
- new_buffer = xmalloc(len);
- for (dst = new_buffer, i = 0; i < nr; i++) {
- memcpy(dst, records[i].start, records[i].len);
- dst += records[i].len;
- }
-
- /*
- * Now munmap the old buffer and use the sorted buffer in its
- * place:
- */
- clear_snapshot_buffer(snapshot);
- snapshot->buf = snapshot->start = new_buffer;
- snapshot->eof = new_buffer + len;
-
-cleanup:
- free(records);
-}
-
-/*
- * Return a pointer to the start of the record that contains the
- * character `*p` (which must be within the buffer). If no other
- * record start is found, return `buf`.
- */
-static const char *find_start_of_record(const char *buf, const char *p)
-{
- while (p > buf && (p[-1] != '\n' || p[0] == '^'))
- p--;
- return p;
-}
-
-/*
- * Return a pointer to the start of the record following the record
- * that contains `*p`. If none is found before `end`, return `end`.
- */
-static const char *find_end_of_record(const char *p, const char *end)
-{
- while (++p < end && (p[-1] != '\n' || p[0] == '^'))
- ;
- return p;
-}
-
-/*
- * We want to be able to compare mmapped reference records quickly,
- * without totally parsing them. We can do so because the records are
- * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
- * + 1) bytes past the beginning of the record.
- *
- * But what if the `packed-refs` file contains garbage? We're willing
- * to tolerate not detecting the problem, as long as we don't produce
- * totally garbled output (we can't afford to check the integrity of
- * the whole file during every Git invocation). But we do want to be
- * sure that we never read past the end of the buffer in memory and
- * perform an illegal memory access.
- *
- * Guarantee that minimum level of safety by verifying that the last
- * record in the file is LF-terminated, and that it has at least
- * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
- * these checks fails.
- */
-static void verify_buffer_safe(struct snapshot *snapshot)
-{
- const char *start = snapshot->start;
- const char *eof = snapshot->eof;
- const char *last_line;
-
- if (start == eof)
- return;
-
- last_line = find_start_of_record(start, eof - 1);
- if (*(eof - 1) != '\n' || eof - last_line < the_hash_algo->hexsz + 2)
- die_invalid_line(snapshot->refs->path,
- last_line, eof - last_line);
-}
-
#define SMALL_FILE_SIZE (32*1024)
/*
@@ -475,9 +142,11 @@ static int load_contents(struct snapshot *snapshot)
{
int fd;
struct stat st;
- size_t size;
ssize_t bytes_read;
+ if (!packed_refs_enabled(snapshot->refs->store_flags))
+ return 0;
+
fd = open(snapshot->refs->path, O_RDONLY);
if (fd < 0) {
if (errno == ENOENT) {
@@ -498,91 +167,30 @@ static int load_contents(struct snapshot *snapshot)
if (fstat(fd, &st) < 0)
die_errno("couldn't stat %s", snapshot->refs->path);
- size = xsize_t(st.st_size);
+ snapshot->buflen = xsize_t(st.st_size);
- if (!size) {
+ if (!snapshot->buflen) {
close(fd);
return 0;
- } else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) {
- snapshot->buf = xmalloc(size);
- bytes_read = read_in_full(fd, snapshot->buf, size);
- if (bytes_read < 0 || bytes_read != size)
+ } else if (mmap_strategy == MMAP_NONE || snapshot->buflen <= SMALL_FILE_SIZE) {
+ snapshot->buf = xmalloc(snapshot->buflen);
+ bytes_read = read_in_full(fd, snapshot->buf, snapshot->buflen);
+ if (bytes_read < 0 || bytes_read != snapshot->buflen)
die_errno("couldn't read %s", snapshot->refs->path);
snapshot->mmapped = 0;
} else {
- snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+ snapshot->buf = xmmap(NULL, snapshot->buflen, PROT_READ, MAP_PRIVATE, fd, 0);
snapshot->mmapped = 1;
}
close(fd);
snapshot->start = snapshot->buf;
- snapshot->eof = snapshot->buf + size;
+ snapshot->eof = snapshot->buf + snapshot->buflen;
return 1;
}
/*
- * Find the place in `snapshot->buf` where the start of the record for
- * `refname` starts. If `mustexist` is true and the reference doesn't
- * exist, then return NULL. If `mustexist` is false and the reference
- * doesn't exist, then return the point where that reference would be
- * inserted, or `snapshot->eof` (which might be NULL) if it would be
- * inserted at the end of the file. In the latter mode, `refname`
- * doesn't have to be a proper reference name; for example, one could
- * search for "refs/replace/" to find the start of any replace
- * references.
- *
- * The record is sought using a binary search, so `snapshot->buf` must
- * be sorted.
- */
-static const char *find_reference_location(struct snapshot *snapshot,
- const char *refname, int mustexist)
-{
- /*
- * This is not *quite* a garden-variety binary search, because
- * the data we're searching is made up of records, and we
- * always need to find the beginning of a record to do a
- * comparison. A "record" here is one line for the reference
- * itself and zero or one peel lines that start with '^'. Our
- * loop invariant is described in the next two comments.
- */
-
- /*
- * A pointer to the character at the start of a record whose
- * preceding records all have reference names that come
- * *before* `refname`.
- */
- const char *lo = snapshot->start;
-
- /*
- * A pointer to a the first character of a record whose
- * reference name comes *after* `refname`.
- */
- const char *hi = snapshot->eof;
-
- while (lo != hi) {
- const char *mid, *rec;
- int cmp;
-
- mid = lo + (hi - lo) / 2;
- rec = find_start_of_record(lo, mid);
- cmp = cmp_record_to_refname(rec, refname);
- if (cmp < 0) {
- lo = find_end_of_record(mid, hi);
- } else if (cmp > 0) {
- hi = rec;
- } else {
- return rec;
- }
- }
-
- if (mustexist)
- return NULL;
- else
- return lo;
-}
-
-/*
* Create a newly-allocated `snapshot` of the `packed-refs` file in
* its current state and return it. The return value will already have
* its reference count incremented.
@@ -623,72 +231,52 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
snapshot->refs = refs;
acquire_snapshot(snapshot);
snapshot->peeled = PEELED_NONE;
+ snapshot->version = 1;
if (!load_contents(snapshot))
return snapshot;
- /* If the file has a header line, process it: */
- if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
- char *tmp, *p, *eol;
- struct string_list traits = STRING_LIST_INIT_NODUP;
-
- eol = memchr(snapshot->buf, '\n',
- snapshot->eof - snapshot->buf);
- if (!eol)
- die_unterminated_line(refs->path,
- snapshot->buf,
- snapshot->eof - snapshot->buf);
-
- tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
-
- if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
- die_invalid_line(refs->path,
- snapshot->buf,
- snapshot->eof - snapshot->buf);
-
- string_list_split_in_place(&traits, p, ' ', -1);
-
- if (unsorted_string_list_has_string(&traits, "fully-peeled"))
- snapshot->peeled = PEELED_FULLY;
- else if (unsorted_string_list_has_string(&traits, "peeled"))
- snapshot->peeled = PEELED_TAGS;
+ if ((refs->store_flags & REF_STORE_FORMAT_PACKED) &&
+ !detect_packed_format_v2_header(refs, snapshot)) {
+ parse_packed_format_v1_header(refs, snapshot, &sorted);
+ snapshot->version = 1;
+ verify_buffer_safe_v1(snapshot);
- sorted = unsorted_string_list_has_string(&traits, "sorted");
+ if (!sorted) {
+ sort_snapshot_v1(snapshot);
- /* perhaps other traits later as well */
-
- /* The "+ 1" is for the LF character. */
- snapshot->start = eol + 1;
-
- string_list_clear(&traits, 0);
- free(tmp);
- }
-
- verify_buffer_safe(snapshot);
+ /*
+ * Reordering the records might have moved a short one
+ * to the end of the buffer, so verify the buffer's
+ * safety again:
+ */
+ verify_buffer_safe_v1(snapshot);
+ }
- if (!sorted) {
- sort_snapshot(snapshot);
+ if (mmap_strategy != MMAP_OK && snapshot->mmapped) {
+ /*
+ * We don't want to leave the file mmapped, so we are
+ * forced to make a copy now:
+ */
+ char *buf_copy = xmalloc(snapshot->buflen);
+
+ memcpy(buf_copy, snapshot->start, snapshot->buflen);
+ clear_snapshot_buffer(snapshot);
+ snapshot->buf = snapshot->start = buf_copy;
+ snapshot->eof = buf_copy + snapshot->buflen;
+ }
- /*
- * Reordering the records might have moved a short one
- * to the end of the buffer, so verify the buffer's
- * safety again:
- */
- verify_buffer_safe(snapshot);
+ return snapshot;
}
- if (mmap_strategy != MMAP_OK && snapshot->mmapped) {
+ if (refs->store_flags & REF_STORE_FORMAT_PACKED_V2) {
/*
- * We don't want to leave the file mmapped, so we are
- * forced to make a copy now:
+ * Assume we are in v2 format mode, now.
+ *
+ * fill_snapshot_v2() will die() if parsing fails.
*/
- size_t size = snapshot->eof - snapshot->start;
- char *buf_copy = xmalloc(size);
-
- memcpy(buf_copy, snapshot->start, size);
- clear_snapshot_buffer(snapshot);
- snapshot->buf = snapshot->start = buf_copy;
- snapshot->eof = buf_copy + size;
+ fill_snapshot_v2(snapshot);
+ snapshot->version = 2;
}
return snapshot;
@@ -732,54 +320,26 @@ static int packed_read_raw_ref(struct ref_store *ref_store, const char *refname,
struct packed_ref_store *refs =
packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
struct snapshot *snapshot = get_snapshot(refs);
- const char *rec;
-
- *type = 0;
- rec = find_reference_location(snapshot, refname, 1);
-
- if (!rec) {
+ if (!snapshot) {
/* refname is not a packed reference. */
*failure_errno = ENOENT;
return -1;
}
- if (get_oid_hex(rec, oid))
- die_invalid_line(refs->path, rec, snapshot->eof - rec);
-
- *type = REF_ISPACKED;
- return 0;
-}
-
-/*
- * This value is set in `base.flags` if the peeled value of the
- * current reference is known. In that case, `peeled` contains the
- * correct peeled value for the reference, which might be `null_oid`
- * if the reference is not a tag or if it is broken.
- */
-#define REF_KNOWS_PEELED 0x40
-
-/*
- * An iterator over a snapshot of a `packed-refs` file.
- */
-struct packed_ref_iterator {
- struct ref_iterator base;
-
- struct snapshot *snapshot;
-
- /* The current position in the snapshot's buffer: */
- const char *pos;
+ switch (snapshot->version) {
+ case 1:
+ return packed_read_raw_ref_v1(refs, snapshot, refname,
+ oid, type, failure_errno);
- /* The end of the part of the buffer that will be iterated over: */
- const char *eof;
+ case 2:
+ return packed_read_raw_ref_v2(refs, snapshot, refname,
+ oid, type, failure_errno);
- /* Scratch space for current values: */
- struct object_id oid, peeled;
- struct strbuf refname_buf;
-
- struct repository *repo;
- unsigned int flags;
-};
+ default:
+ return -1;
+ }
+}
/*
* Move the iterator to the next record in the snapshot, without
@@ -790,68 +350,16 @@ struct packed_ref_iterator {
*/
static int next_record(struct packed_ref_iterator *iter)
{
- const char *p = iter->pos, *eol;
-
- strbuf_reset(&iter->refname_buf);
+ switch (iter->version) {
+ case 1:
+ return next_record_v1(iter);
- if (iter->pos == iter->eof)
- return ITER_DONE;
-
- iter->base.flags = REF_ISPACKED;
-
- if (iter->eof - p < the_hash_algo->hexsz + 2 ||
- parse_oid_hex(p, &iter->oid, &p) ||
- !isspace(*p++))
- die_invalid_line(iter->snapshot->refs->path,
- iter->pos, iter->eof - iter->pos);
-
- eol = memchr(p, '\n', iter->eof - p);
- if (!eol)
- die_unterminated_line(iter->snapshot->refs->path,
- iter->pos, iter->eof - iter->pos);
-
- strbuf_add(&iter->refname_buf, p, eol - p);
- iter->base.refname = iter->refname_buf.buf;
-
- if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
- if (!refname_is_safe(iter->base.refname))
- die("packed refname is dangerous: %s",
- iter->base.refname);
- oidclr(&iter->oid);
- iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
- }
- if (iter->snapshot->peeled == PEELED_FULLY ||
- (iter->snapshot->peeled == PEELED_TAGS &&
- starts_with(iter->base.refname, "refs/tags/")))
- iter->base.flags |= REF_KNOWS_PEELED;
-
- iter->pos = eol + 1;
-
- if (iter->pos < iter->eof && *iter->pos == '^') {
- p = iter->pos + 1;
- if (iter->eof - p < the_hash_algo->hexsz + 1 ||
- parse_oid_hex(p, &iter->peeled, &p) ||
- *p++ != '\n')
- die_invalid_line(iter->snapshot->refs->path,
- iter->pos, iter->eof - iter->pos);
- iter->pos = p;
+ case 2:
+ return next_record_v2(iter);
- /*
- * Regardless of what the file header said, we
- * definitely know the value of *this* reference. But
- * we suppress it if the reference is broken:
- */
- if ((iter->base.flags & REF_ISBROKEN)) {
- oidclr(&iter->peeled);
- iter->base.flags &= ~REF_KNOWS_PEELED;
- } else {
- iter->base.flags |= REF_KNOWS_PEELED;
- }
- } else {
- oidclr(&iter->peeled);
+ default:
+ return -1;
}
-
- return ITER_OK;
}
static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
@@ -862,7 +370,7 @@ static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
while ((ok = next_record(iter)) == ITER_OK) {
if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
- ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE)
+ !is_per_worktree_ref(iter->base.refname))
continue;
if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
@@ -926,6 +434,7 @@ static struct ref_iterator *packed_ref_iterator_begin(
struct packed_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
+ size_t v2_row = 0;
if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
required_flags |= REF_STORE_ODB;
@@ -938,10 +447,21 @@ static struct ref_iterator *packed_ref_iterator_begin(
*/
snapshot = get_snapshot(refs);
- if (prefix && *prefix)
- start = find_reference_location(snapshot, prefix, 0);
- else
- start = snapshot->start;
+ if (!snapshot || snapshot->version < 0 || snapshot->version > 2)
+ return empty_ref_iterator_begin();
+
+ if (prefix && *prefix) {
+ if (snapshot->version == 1)
+ start = find_reference_location_v1(snapshot, prefix, 0);
+ else
+ start = find_reference_location_v2(snapshot, prefix, 0,
+ &v2_row);
+ } else {
+ if (snapshot->version == 1)
+ start = snapshot->start;
+ else
+ start = snapshot->refs_chunk;
+ }
if (start == snapshot->eof)
return empty_ref_iterator_begin();
@@ -952,6 +472,10 @@ static struct ref_iterator *packed_ref_iterator_begin(
iter->snapshot = snapshot;
acquire_snapshot(snapshot);
+ iter->version = snapshot->version;
+ iter->row = v2_row;
+
+ init_iterator_prefix_info(prefix, iter);
iter->pos = start;
iter->eof = snapshot->eof;
@@ -969,23 +493,6 @@ static struct ref_iterator *packed_ref_iterator_begin(
return ref_iterator;
}
-/*
- * Write an entry to the packed-refs file for the specified refname.
- * If peeled is non-NULL, write it as the entry's peeled value. On
- * error, return a nonzero value and leave errno set at the value left
- * by the failing call to `fprintf()`.
- */
-static int write_packed_entry(FILE *fh, const char *refname,
- const struct object_id *oid,
- const struct object_id *peeled)
-{
- if (fprintf(fh, "%s %s\n", oid_to_hex(oid), refname) < 0 ||
- (peeled && fprintf(fh, "^%s\n", oid_to_hex(peeled)) < 0))
- return -1;
-
- return 0;
-}
-
int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
{
struct packed_ref_store *refs =
@@ -1067,17 +574,6 @@ int packed_refs_is_locked(struct ref_store *ref_store)
return is_lock_file_locked(&refs->lock);
}
-/*
- * The packed-refs header line that we write out. Perhaps other traits
- * will be added later.
- *
- * Note that earlier versions of Git used to parse these traits by
- * looking for " trait " in the line. For this reason, the space after
- * the colon and the trailing space are required.
- */
-static const char PACKED_REFS_HEADER[] =
- "# pack-refs with: peeled fully-peeled sorted \n";
-
static int packed_init_db(struct ref_store *ref_store UNUSED,
struct strbuf *err UNUSED)
{
@@ -1085,56 +581,20 @@ static int packed_init_db(struct ref_store *ref_store UNUSED,
return 0;
}
-/*
- * Write the packed refs from the current snapshot to the packed-refs
- * tempfile, incorporating any changes from `updates`. `updates` must
- * be a sorted string list whose keys are the refnames and whose util
- * values are `struct ref_update *`. On error, rollback the tempfile,
- * write an error message to `err`, and return a nonzero value.
- *
- * The packfile must be locked before calling this function and will
- * remain locked when it is done.
- */
-static int write_with_updates(struct packed_ref_store *refs,
- struct string_list *updates,
- struct strbuf *err)
+static void add_write_error(struct packed_ref_store *refs, struct strbuf *err)
{
- struct ref_iterator *iter = NULL;
- size_t i;
- int ok;
- FILE *out;
- struct strbuf sb = STRBUF_INIT;
- char *packed_refs_path;
-
- if (!is_lock_file_locked(&refs->lock))
- BUG("write_with_updates() called while unlocked");
-
- /*
- * If packed-refs is a symlink, we want to overwrite the
- * symlinked-to file, not the symlink itself. Also, put the
- * staging file next to it:
- */
- packed_refs_path = get_locked_file_path(&refs->lock);
- strbuf_addf(&sb, "%s.new", packed_refs_path);
- free(packed_refs_path);
- refs->tempfile = create_tempfile(sb.buf);
- if (!refs->tempfile) {
- strbuf_addf(err, "unable to create file %s: %s",
- sb.buf, strerror(errno));
- strbuf_release(&sb);
- return -1;
- }
- strbuf_release(&sb);
-
- out = fdopen_tempfile(refs->tempfile, "w");
- if (!out) {
- strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
- strerror(errno));
- goto error;
- }
+ strbuf_addf(err, "error writing to %s: %s",
+ get_tempfile_path(refs->tempfile), strerror(errno));
+}
- if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
- goto write_error;
+int merge_iterator_and_updates(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err,
+ write_ref_fn write_fn,
+ void *write_data)
+{
+ struct ref_iterator *iter = NULL;
+ int ok, i;
/*
* We iterate in parallel through the current list of refs and
@@ -1227,10 +687,13 @@ static int write_with_updates(struct packed_ref_store *refs,
struct object_id peeled;
int peel_error = ref_iterator_peel(iter, &peeled);
- if (write_packed_entry(out, iter->refname,
- iter->oid,
- peel_error ? NULL : &peeled))
- goto write_error;
+ if (write_fn(iter->refname,
+ iter->oid,
+ peel_error ? NULL : &peeled,
+ write_data)) {
+ add_write_error(refs, err);
+ goto error;
+ }
if ((ok = ref_iterator_advance(iter)) != ITER_OK)
iter = NULL;
@@ -1248,15 +711,133 @@ static int write_with_updates(struct packed_ref_store *refs,
int peel_error = peel_object(&update->new_oid,
&peeled);
- if (write_packed_entry(out, update->refname,
- &update->new_oid,
- peel_error ? NULL : &peeled))
- goto write_error;
+ if (write_fn(update->refname,
+ &update->new_oid,
+ peel_error ? NULL : &peeled,
+ write_data)) {
+ add_write_error(refs, err);
+ goto error;
+ }
i++;
}
}
+error:
+ if (iter)
+ ref_iterator_abort(iter);
+ return ok;
+}
+
+static int write_with_updates_v1(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err)
+{
+ FILE *out;
+
+ out = fdopen_tempfile(refs->tempfile, "w");
+ if (!out) {
+ strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
+ strerror(errno));
+ goto error;
+ }
+
+ if (write_packed_file_header_v1(out) < 0) {
+ add_write_error(refs, err);
+ goto error;
+ }
+
+ return merge_iterator_and_updates(refs, updates, err,
+ write_packed_entry_v1, out);
+
+error:
+ return -1;
+}
+
+static int write_with_updates_v2(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err)
+{
+ struct write_packed_refs_v2_context *ctx = create_v2_context(refs, updates, err);
+ int ok = -1;
+
+ if ((ok = write_packed_refs_v2(ctx)) < 0)
+ add_write_error(refs, err);
+
+ free_v2_context(ctx);
+ return ok;
+}
+
+/*
+ * Write the packed refs from the current snapshot to the packed-refs
+ * tempfile, incorporating any changes from `updates`. `updates` must
+ * be a sorted string list whose keys are the refnames and whose util
+ * values are `struct ref_update *`. On error, rollback the tempfile,
+ * write an error message to `err`, and return a nonzero value.
+ *
+ * The packfile must be locked before calling this function and will
+ * remain locked when it is done.
+ */
+static int write_with_updates(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err)
+{
+ int ok;
+ struct strbuf sb = STRBUF_INIT;
+ char *packed_refs_path;
+ int version;
+
+ if (!is_lock_file_locked(&refs->lock))
+ BUG("write_with_updates() called while unlocked");
+
+ /*
+ * If packed-refs is a symlink, we want to overwrite the
+ * symlinked-to file, not the symlink itself. Also, put the
+ * staging file next to it:
+ */
+ packed_refs_path = get_locked_file_path(&refs->lock);
+ strbuf_addf(&sb, "%s.new", packed_refs_path);
+ free(packed_refs_path);
+ refs->tempfile = create_tempfile(sb.buf);
+ if (!refs->tempfile) {
+ strbuf_addf(err, "unable to create file %s: %s",
+ sb.buf, strerror(errno));
+ strbuf_release(&sb);
+ return -1;
+ }
+ strbuf_release(&sb);
+
+ if (!(version = git_env_ulong("GIT_TEST_PACKED_REFS_VERSION", 0)) &&
+ git_config_get_int("refs.packedrefsversion", &version)) {
+ /*
+ * Set the default depending on the current extension
+ * list. Default to version 1 if available, but allow a
+ * default of 2 if only "packed-v2" exists.
+ */
+ if (refs->store_flags & REF_STORE_FORMAT_PACKED)
+ version = 1;
+ else if (refs->store_flags & REF_STORE_FORMAT_PACKED_V2)
+ version = 2;
+ else
+ BUG("writing a packed-refs file without an extension");
+ }
+
+ switch (version) {
+ case 1:
+ ok = write_with_updates_v1(refs, updates, err);
+ break;
+
+ case 2:
+ /* Convert the normal error codes to ITER_DONE. */
+ ok = write_with_updates_v2(refs, updates, err) ? -2 : ITER_DONE;
+ break;
+
+ default:
+ strbuf_addf(err, "unknown packed-refs version: %d",
+ version);
+ goto error;
+ }
+
if (ok != ITER_DONE) {
strbuf_addstr(err, "unable to write packed-refs file: "
"error iterating over old contents");
@@ -1275,14 +856,7 @@ static int write_with_updates(struct packed_ref_store *refs,
return 0;
-write_error:
- strbuf_addf(err, "error writing to %s: %s",
- get_tempfile_path(refs->tempfile), strerror(errno));
-
error:
- if (iter)
- ref_iterator_abort(iter);
-
delete_tempfile(&refs->tempfile);
return -1;
}
diff --git a/refs/packed-backend.h b/refs/packed-backend.h
index 9dd8a344c3..1936bb5c76 100644
--- a/refs/packed-backend.h
+++ b/refs/packed-backend.h
@@ -1,6 +1,10 @@
#ifndef REFS_PACKED_BACKEND_H
#define REFS_PACKED_BACKEND_H
+#include "../cache.h"
+#include "refs-internal.h"
+#include "../lockfile.h"
+
struct repository;
struct ref_transaction;
@@ -36,4 +40,281 @@ int packed_refs_is_locked(struct ref_store *ref_store);
int is_packed_transaction_needed(struct ref_store *ref_store,
struct ref_transaction *transaction);
+struct packed_ref_store;
+
+/*
+ * A `snapshot` represents one snapshot of a `packed-refs` file.
+ *
+ * Normally, this will be a mmapped view of the contents of the
+ * `packed-refs` file at the time the snapshot was created. However,
+ * if the `packed-refs` file was not sorted, this might point at heap
+ * memory holding the contents of the `packed-refs` file with its
+ * records sorted by refname.
+ *
+ * `snapshot` instances are reference counted (via
+ * `acquire_snapshot()` and `release_snapshot()`). This is to prevent
+ * an instance from disappearing while an iterator is still iterating
+ * over it. Instances are garbage collected when their `referrers`
+ * count goes to zero.
+ *
+ * The most recent `snapshot`, if available, is referenced by the
+ * `packed_ref_store`. Its freshness is checked whenever
+ * `get_snapshot()` is called; if the existing snapshot is obsolete, a
+ * new snapshot is taken.
+ */
+struct snapshot {
+ /*
+ * A back-pointer to the packed_ref_store with which this
+ * snapshot is associated:
+ */
+ struct packed_ref_store *refs;
+
+ /* Is the `packed-refs` file currently mmapped? */
+ int mmapped;
+
+ /* which file format version is this file? */
+ int version;
+
+ /*
+ * The contents of the `packed-refs` file:
+ *
+ * - buf -- a pointer to the start of the memory
+ * - start -- a pointer to the first byte of actual references
+ * (i.e., after the header line, if one is present)
+ * - eof -- a pointer just past the end of the reference
+ * contents
+ *
+ * If the `packed-refs` file was already sorted, `buf` points
+ * at the mmapped contents of the file. If not, it points at
+ * heap-allocated memory containing the contents, sorted. If
+ * there were no contents (e.g., because the file didn't
+ * exist), `buf`, `start`, and `eof` are all NULL.
+ */
+ char *buf, *start, *eof;
+
+ /*
+ * What is the peeled state of the `packed-refs` file that
+ * this snapshot represents? (This is usually determined from
+ * the file's header.)
+ */
+ enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
+
+ /*************************
+ * packed-refs v2 values *
+ *************************/
+ size_t nr;
+ size_t prefixes_nr;
+ size_t buflen;
+ const unsigned char *offset_chunk;
+ const char *refs_chunk;
+ const unsigned char *prefix_offsets_chunk;
+ const char *prefix_chunk;
+
+ /*
+ * Count of references to this instance, including the pointer
+ * from `packed_ref_store::snapshot`, if any. The instance
+ * will not be freed as long as the reference count is
+ * nonzero.
+ */
+ unsigned int referrers;
+
+ /*
+ * The metadata of the `packed-refs` file from which this
+ * snapshot was created, used to tell if the file has been
+ * replaced since we read it.
+ */
+ struct stat_validity validity;
+};
+
+int release_snapshot(struct snapshot *snapshot);
+
+/*
+ * If the buffer in `snapshot` is active, then either munmap the
+ * memory and close the file, or free the memory. Then set the buffer
+ * pointers to NULL.
+ */
+void clear_snapshot_buffer(struct snapshot *snapshot);
+
+/*
+ * A `ref_store` representing references stored in a `packed-refs`
+ * file. It implements the `ref_store` interface, though it has some
+ * limitations:
+ *
+ * - It cannot store symbolic references.
+ *
+ * - It cannot store reflogs.
+ *
+ * - It does not support reference renaming (though it could).
+ *
+ * On the other hand, it can be locked outside of a reference
+ * transaction. In that case, it remains locked even after the
+ * transaction is done and the new `packed-refs` file is activated.
+ */
+struct packed_ref_store {
+ struct ref_store base;
+
+ unsigned int store_flags;
+
+ /* The path of the "packed-refs" file: */
+ char *path;
+
+ /*
+ * A snapshot of the values read from the `packed-refs` file,
+ * if it might still be current; otherwise, NULL.
+ */
+ struct snapshot *snapshot;
+
+ /*
+ * Lock used for the "packed-refs" file. Note that this (and
+ * thus the enclosing `packed_ref_store`) must not be freed.
+ */
+ struct lock_file lock;
+
+ /*
+ * Temporary file used when rewriting new contents to the
+ * "packed-refs" file. Note that this (and thus the enclosing
+ * `packed_ref_store`) must not be freed.
+ */
+ struct tempfile *tempfile;
+};
+
+/*
+ * This value is set in `base.flags` if the peeled value of the
+ * current reference is known. In that case, `peeled` contains the
+ * correct peeled value for the reference, which might be `null_oid`
+ * if the reference is not a tag or if it is broken.
+ */
+#define REF_KNOWS_PEELED 0x40
+
+/*
+ * An iterator over a snapshot of a `packed-refs` file.
+ */
+struct packed_ref_iterator {
+ struct ref_iterator base;
+ struct snapshot *snapshot;
+ struct repository *repo;
+ unsigned int flags;
+ int version;
+
+ /* Scratch space for current values: */
+ struct object_id oid, peeled;
+ struct strbuf refname_buf;
+
+ /* The current position in the snapshot's buffer: */
+ const char *pos;
+
+ /***********************************
+ * packed-refs v1 iterator values. *
+ ***********************************/
+
+ /* The end of the part of the buffer that will be iterated over: */
+ const char *eof;
+
+ /***********************************
+ * packed-refs v2 iterator values. *
+ ***********************************/
+ size_t nr;
+ size_t row;
+ size_t prefix_row_end;
+ size_t prefix_i;
+ const char *cur_prefix;
+};
+
+typedef int (*write_ref_fn)(const char *refname,
+ const struct object_id *oid,
+ const struct object_id *peeled,
+ void *write_data);
+
+int merge_iterator_and_updates(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err,
+ write_ref_fn write_fn,
+ void *write_data);
+
+/**
+ * Parse the buffer at the given snapshot to verify that it is a
+ * packed-refs file in version 1 format. Update the snapshot->peeled
+ * value according to the header information. Update the given
+ * 'sorted' value with whether or not the packed-refs file is sorted.
+ */
+int parse_packed_format_v1_header(struct packed_ref_store *refs,
+ struct snapshot *snapshot,
+ int *sorted);
+
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+const char *find_reference_location_v1(struct snapshot *snapshot,
+ const char *refname, int mustexist);
+
+int packed_read_raw_ref_v1(struct packed_ref_store *refs, struct snapshot *snapshot,
+ const char *refname, struct object_id *oid,
+ unsigned int *type, int *failure_errno);
+
+void verify_buffer_safe_v1(struct snapshot *snapshot);
+void sort_snapshot_v1(struct snapshot *snapshot);
+int write_packed_file_header_v1(FILE *out);
+int next_record_v1(struct packed_ref_iterator *iter);
+int write_packed_entry_v1(const char *refname,
+ const struct object_id *oid,
+ const struct object_id *peeled,
+ void *write_data);
+
+/**
+ * Parse the buffer at the given snapshot to verify that it is a
+ * packed-refs file in version 1 format. Update the snapshot->peeled
+ * value according to the header information. Update the given
+ * 'sorted' value with whether or not the packed-refs file is sorted.
+ */
+int parse_packed_format_v1_header(struct packed_ref_store *refs,
+ struct snapshot *snapshot,
+ int *sorted);
+
+int detect_packed_format_v2_header(struct packed_ref_store *refs,
+ struct snapshot *snapshot);
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+const char *find_reference_location_v2(struct snapshot *snapshot,
+ const char *refname, int mustexist,
+ size_t *pos);
+
+int packed_read_raw_ref_v2(struct packed_ref_store *refs, struct snapshot *snapshot,
+ const char *refname, struct object_id *oid,
+ unsigned int *type, int *failure_errno);
+int next_record_v2(struct packed_ref_iterator *iter);
+void fill_snapshot_v2(struct snapshot *snapshot);
+
+struct write_packed_refs_v2_context;
+struct write_packed_refs_v2_context *create_v2_context(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err);
+int write_packed_refs_v2(struct write_packed_refs_v2_context *ctx);
+void free_v2_context(struct write_packed_refs_v2_context *ctx);
+
+void init_iterator_prefix_info(const char *prefix,
+ struct packed_ref_iterator *iter);
+
#endif /* REFS_PACKED_BACKEND_H */
diff --git a/refs/packed-format-v1.c b/refs/packed-format-v1.c
new file mode 100644
index 0000000000..2d071567c0
--- /dev/null
+++ b/refs/packed-format-v1.c
@@ -0,0 +1,456 @@
+#include "../cache.h"
+#include "../config.h"
+#include "../refs.h"
+#include "refs-internal.h"
+#include "packed-backend.h"
+#include "../iterator.h"
+#include "../lockfile.h"
+#include "../chdir-notify.h"
+
+static NORETURN void die_unterminated_line(const char *path,
+ const char *p, size_t len)
+{
+ if (len < 80)
+ die("unterminated line in %s: %.*s", path, (int)len, p);
+ else
+ die("unterminated line in %s: %.75s...", path, p);
+}
+
+static NORETURN void die_invalid_line(const char *path,
+ const char *p, size_t len)
+{
+ const char *eol = memchr(p, '\n', len);
+
+ if (!eol)
+ die_unterminated_line(path, p, len);
+ else if (eol - p < 80)
+ die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
+ else
+ die("unexpected line in %s: %.75s...", path, p);
+}
+
+struct snapshot_record {
+ const char *start;
+ size_t len;
+};
+
+static int cmp_packed_ref_records(const void *v1, const void *v2)
+{
+ const struct snapshot_record *e1 = v1, *e2 = v2;
+ const char *r1 = e1->start + the_hash_algo->hexsz + 1;
+ const char *r2 = e2->start + the_hash_algo->hexsz + 1;
+
+ while (1) {
+ if (*r1 == '\n')
+ return *r2 == '\n' ? 0 : -1;
+ if (*r1 != *r2) {
+ if (*r2 == '\n')
+ return 1;
+ else
+ return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
+ }
+ r1++;
+ r2++;
+ }
+}
+
+/*
+ * Compare a snapshot record at `rec` to the specified NUL-terminated
+ * refname.
+ */
+static int cmp_record_to_refname(const char *rec, const char *refname)
+{
+ const char *r1 = rec + the_hash_algo->hexsz + 1;
+ const char *r2 = refname;
+
+ while (1) {
+ if (*r1 == '\n')
+ return *r2 ? -1 : 0;
+ if (!*r2)
+ return 1;
+ if (*r1 != *r2)
+ return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
+ r1++;
+ r2++;
+ }
+}
+
+/*
+ * `snapshot->buf` is not known to be sorted. Check whether it is, and
+ * if not, sort it into new memory and munmap/free the old storage.
+ */
+void sort_snapshot_v1(struct snapshot *snapshot)
+{
+ struct snapshot_record *records = NULL;
+ size_t alloc = 0, nr = 0;
+ int sorted = 1;
+ const char *pos, *eof, *eol;
+ size_t len, i;
+ char *new_buffer, *dst;
+
+ pos = snapshot->start;
+ eof = snapshot->eof;
+
+ if (pos == eof)
+ return;
+
+ len = eof - pos;
+
+ /*
+ * Initialize records based on a crude estimate of the number
+ * of references in the file (we'll grow it below if needed):
+ */
+ ALLOC_GROW(records, len / 80 + 20, alloc);
+
+ while (pos < eof) {
+ eol = memchr(pos, '\n', eof - pos);
+ if (!eol)
+ /* The safety check should prevent this. */
+ BUG("unterminated line found in packed-refs");
+ if (eol - pos < the_hash_algo->hexsz + 2)
+ die_invalid_line(snapshot->refs->path,
+ pos, eof - pos);
+ eol++;
+ if (eol < eof && *eol == '^') {
+ /*
+ * Keep any peeled line together with its
+ * reference:
+ */
+ const char *peeled_start = eol;
+
+ eol = memchr(peeled_start, '\n', eof - peeled_start);
+ if (!eol)
+ /* The safety check should prevent this. */
+ BUG("unterminated peeled line found in packed-refs");
+ eol++;
+ }
+
+ ALLOC_GROW(records, nr + 1, alloc);
+ records[nr].start = pos;
+ records[nr].len = eol - pos;
+ nr++;
+
+ if (sorted &&
+ nr > 1 &&
+ cmp_packed_ref_records(&records[nr - 2],
+ &records[nr - 1]) >= 0)
+ sorted = 0;
+
+ pos = eol;
+ }
+
+ if (sorted)
+ goto cleanup;
+
+ /* We need to sort the memory. First we sort the records array: */
+ QSORT(records, nr, cmp_packed_ref_records);
+
+ /*
+ * Allocate a new chunk of memory, and copy the old memory to
+ * the new in the order indicated by `records` (not bothering
+ * with the header line):
+ */
+ new_buffer = xmalloc(len);
+ for (dst = new_buffer, i = 0; i < nr; i++) {
+ memcpy(dst, records[i].start, records[i].len);
+ dst += records[i].len;
+ }
+
+ /*
+ * Now munmap the old buffer and use the sorted buffer in its
+ * place:
+ */
+ clear_snapshot_buffer(snapshot);
+ snapshot->buf = snapshot->start = new_buffer;
+ snapshot->eof = new_buffer + len;
+
+cleanup:
+ free(records);
+}
+
+/*
+ * Return a pointer to the start of the record that contains the
+ * character `*p` (which must be within the buffer). If no other
+ * record start is found, return `buf`.
+ */
+static const char *find_start_of_record(const char *buf, const char *p)
+{
+ while (p > buf && (p[-1] != '\n' || p[0] == '^'))
+ p--;
+ return p;
+}
+
+/*
+ * Return a pointer to the start of the record following the record
+ * that contains `*p`. If none is found before `end`, return `end`.
+ */
+static const char *find_end_of_record(const char *p, const char *end)
+{
+ while (++p < end && (p[-1] != '\n' || p[0] == '^'))
+ ;
+ return p;
+}
+
+/*
+ * We want to be able to compare mmapped reference records quickly,
+ * without totally parsing them. We can do so because the records are
+ * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
+ * + 1) bytes past the beginning of the record.
+ *
+ * But what if the `packed-refs` file contains garbage? We're willing
+ * to tolerate not detecting the problem, as long as we don't produce
+ * totally garbled output (we can't afford to check the integrity of
+ * the whole file during every Git invocation). But we do want to be
+ * sure that we never read past the end of the buffer in memory and
+ * perform an illegal memory access.
+ *
+ * Guarantee that minimum level of safety by verifying that the last
+ * record in the file is LF-terminated, and that it has at least
+ * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
+ * these checks fails.
+ */
+void verify_buffer_safe_v1(struct snapshot *snapshot)
+{
+ const char *start = snapshot->start;
+ const char *eof = snapshot->eof;
+ const char *last_line;
+
+ if (start == eof)
+ return;
+
+ last_line = find_start_of_record(start, eof - 1);
+ if (*(eof - 1) != '\n' || eof - last_line < the_hash_algo->hexsz + 2)
+ die_invalid_line(snapshot->refs->path,
+ last_line, eof - last_line);
+}
+
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+const char *find_reference_location_v1(struct snapshot *snapshot,
+ const char *refname, int mustexist)
+{
+ /*
+ * This is not *quite* a garden-variety binary search, because
+ * the data we're searching is made up of records, and we
+ * always need to find the beginning of a record to do a
+ * comparison. A "record" here is one line for the reference
+ * itself and zero or one peel lines that start with '^'. Our
+ * loop invariant is described in the next two comments.
+ */
+
+ /*
+ * A pointer to the character at the start of a record whose
+ * preceding records all have reference names that come
+ * *before* `refname`.
+ */
+ const char *lo = snapshot->start;
+
+ /*
+ * A pointer to a the first character of a record whose
+ * reference name comes *after* `refname`.
+ */
+ const char *hi = snapshot->eof;
+
+ while (lo != hi) {
+ const char *mid, *rec;
+ int cmp;
+
+ mid = lo + (hi - lo) / 2;
+ rec = find_start_of_record(lo, mid);
+ cmp = cmp_record_to_refname(rec, refname);
+ if (cmp < 0) {
+ lo = find_end_of_record(mid, hi);
+ } else if (cmp > 0) {
+ hi = rec;
+ } else {
+ return rec;
+ }
+ }
+
+ if (mustexist)
+ return NULL;
+ else
+ return lo;
+}
+
+int parse_packed_format_v1_header(struct packed_ref_store *refs,
+ struct snapshot *snapshot,
+ int *sorted)
+{
+ *sorted = 0;
+ /* If the file has a header line, process it: */
+ if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
+ char *tmp, *p, *eol;
+ struct string_list traits = STRING_LIST_INIT_NODUP;
+
+ eol = memchr(snapshot->buf, '\n',
+ snapshot->eof - snapshot->buf);
+ if (!eol)
+ die_unterminated_line(refs->path,
+ snapshot->buf,
+ snapshot->eof - snapshot->buf);
+
+ tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
+
+ if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
+ die_invalid_line(refs->path,
+ snapshot->buf,
+ snapshot->eof - snapshot->buf);
+
+ string_list_split_in_place(&traits, p, ' ', -1);
+
+ if (unsorted_string_list_has_string(&traits, "fully-peeled"))
+ snapshot->peeled = PEELED_FULLY;
+ else if (unsorted_string_list_has_string(&traits, "peeled"))
+ snapshot->peeled = PEELED_TAGS;
+
+ *sorted = unsorted_string_list_has_string(&traits, "sorted");
+
+ /* perhaps other traits later as well */
+
+ /* The "+ 1" is for the LF character. */
+ snapshot->start = eol + 1;
+
+ string_list_clear(&traits, 0);
+ free(tmp);
+ }
+
+ return 0;
+}
+
+int packed_read_raw_ref_v1(struct packed_ref_store *refs, struct snapshot *snapshot,
+ const char *refname, struct object_id *oid,
+ unsigned int *type, int *failure_errno)
+{
+ const char *rec;
+
+ *type = 0;
+
+ rec = find_reference_location_v1(snapshot, refname, 1);
+
+ if (!rec) {
+ /* refname is not a packed reference. */
+ *failure_errno = ENOENT;
+ return -1;
+ }
+
+ if (get_oid_hex(rec, oid))
+ die_invalid_line(refs->path, rec, snapshot->eof - rec);
+
+ *type = REF_ISPACKED;
+ return 0;
+}
+
+int next_record_v1(struct packed_ref_iterator *iter)
+{
+ const char *p = iter->pos, *eol;
+
+ strbuf_reset(&iter->refname_buf);
+
+ if (iter->pos == iter->eof)
+ return ITER_DONE;
+
+ iter->base.flags = REF_ISPACKED;
+
+ if (iter->eof - p < the_hash_algo->hexsz + 2 ||
+ parse_oid_hex(p, &iter->oid, &p) ||
+ !isspace(*p++))
+ die_invalid_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+
+ eol = memchr(p, '\n', iter->eof - p);
+ if (!eol)
+ die_unterminated_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+
+ strbuf_add(&iter->refname_buf, p, eol - p);
+ iter->base.refname = iter->refname_buf.buf;
+
+ if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!refname_is_safe(iter->base.refname))
+ die("packed refname is dangerous: %s",
+ iter->base.refname);
+ oidclr(&iter->oid);
+ iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
+ }
+ if (iter->snapshot->peeled == PEELED_FULLY ||
+ (iter->snapshot->peeled == PEELED_TAGS &&
+ starts_with(iter->base.refname, "refs/tags/")))
+ iter->base.flags |= REF_KNOWS_PEELED;
+
+ iter->pos = eol + 1;
+
+ if (iter->pos < iter->eof && *iter->pos == '^') {
+ p = iter->pos + 1;
+ if (iter->eof - p < the_hash_algo->hexsz + 1 ||
+ parse_oid_hex(p, &iter->peeled, &p) ||
+ *p++ != '\n')
+ die_invalid_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+ iter->pos = p;
+
+ /*
+ * Regardless of what the file header said, we
+ * definitely know the value of *this* reference. But
+ * we suppress it if the reference is broken:
+ */
+ if ((iter->base.flags & REF_ISBROKEN)) {
+ oidclr(&iter->peeled);
+ iter->base.flags &= ~REF_KNOWS_PEELED;
+ } else {
+ iter->base.flags |= REF_KNOWS_PEELED;
+ }
+ } else {
+ oidclr(&iter->peeled);
+ }
+
+ return ITER_OK;
+}
+
+/*
+ * The packed-refs header line that we write out. Perhaps other traits
+ * will be added later.
+ *
+ * Note that earlier versions of Git used to parse these traits by
+ * looking for " trait " in the line. For this reason, the space after
+ * the colon and the trailing space are required.
+ */
+static const char PACKED_REFS_HEADER[] =
+ "# pack-refs with: peeled fully-peeled sorted \n";
+
+int write_packed_file_header_v1(FILE *out)
+{
+ return fprintf(out, "%s", PACKED_REFS_HEADER);
+}
+
+/*
+ * Write an entry to the packed-refs file for the specified refname.
+ * If peeled is non-NULL, write it as the entry's peeled value. On
+ * error, return a nonzero value and leave errno set at the value left
+ * by the failing call to `fprintf()`.
+ */
+int write_packed_entry_v1(const char *refname,
+ const struct object_id *oid,
+ const struct object_id *peeled,
+ void *write_data)
+{
+ FILE *fh = write_data;
+
+ if (fprintf(fh, "%s %s\n", oid_to_hex(oid), refname) < 0 ||
+ (peeled && fprintf(fh, "^%s\n", oid_to_hex(peeled)) < 0))
+ return -1;
+
+ return 0;
+}
diff --git a/refs/packed-format-v2.c b/refs/packed-format-v2.c
new file mode 100644
index 0000000000..ada34bf9bf
--- /dev/null
+++ b/refs/packed-format-v2.c
@@ -0,0 +1,624 @@
+#include "../cache.h"
+#include "../config.h"
+#include "../refs.h"
+#include "refs-internal.h"
+#include "packed-backend.h"
+#include "../iterator.h"
+#include "../lockfile.h"
+#include "../chdir-notify.h"
+#include "../chunk-format.h"
+#include "../csum-file.h"
+
+#define OFFSET_IS_PEELED (((uint64_t)1) << 63)
+
+#define PACKED_REFS_SIGNATURE 0x50524546 /* "PREF" */
+#define CHREFS_CHUNKID_OFFSETS 0x524F4646 /* "ROFF" */
+#define CHREFS_CHUNKID_REFS 0x52454653 /* "REFS" */
+#define CHREFS_CHUNKID_PREFIX_DATA 0x50465844 /* "PFXD" */
+#define CHREFS_CHUNKID_PREFIX_OFFSETS 0x5046584F /* "PFXO" */
+
+static const char *get_nth_prefix(struct snapshot *snapshot,
+ size_t n, size_t *len)
+{
+ uint64_t offset, next_offset;
+
+ if (n >= snapshot->prefixes_nr)
+ BUG("asking for prefix %"PRIu64" outside of bounds (%"PRIu64")",
+ (uint64_t)n, (uint64_t)snapshot->prefixes_nr);
+
+ if (n)
+ offset = get_be32(snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * (n - 1));
+ else
+ offset = 0;
+
+ if (len) {
+ next_offset = get_be32(snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * n);
+
+ /* Prefix includes null terminator. */
+ *len = next_offset - offset - 1;
+ }
+
+ return snapshot->prefix_chunk + offset;
+}
+
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+static const char *find_prefix_location(struct snapshot *snapshot,
+ const char *refname, size_t *pos)
+{
+ size_t lo = 0, hi = snapshot->prefixes_nr;
+
+ while (lo != hi) {
+ const char *rec;
+ int cmp;
+ size_t len;
+ size_t mid = lo + (hi - lo) / 2;
+
+ rec = get_nth_prefix(snapshot, mid, &len);
+ cmp = strncmp(rec, refname, len);
+ if (cmp < 0) {
+ lo = mid + 1;
+ } else if (cmp > 0) {
+ hi = mid;
+ } else {
+ /* we have a prefix match! */
+ *pos = mid;
+ return rec;
+ }
+ }
+
+ *pos = lo;
+ if (lo < snapshot->prefixes_nr)
+ return get_nth_prefix(snapshot, lo, NULL);
+ else
+ return NULL;
+}
+
+int detect_packed_format_v2_header(struct packed_ref_store *refs,
+ struct snapshot *snapshot)
+{
+ /*
+ * packed-refs v1 might not have a header, so check instead
+ * that the v2 signature is not present.
+ */
+ return get_be32(snapshot->buf) == PACKED_REFS_SIGNATURE;
+}
+
+static const char *get_nth_ref(struct snapshot *snapshot,
+ size_t n)
+{
+ uint64_t offset;
+
+ if (n >= snapshot->nr)
+ BUG("asking for position %"PRIu64" outside of bounds (%"PRIu64")",
+ (uint64_t)n, (uint64_t)snapshot->nr);
+
+ if (n)
+ offset = get_be64(snapshot->offset_chunk + (n-1) * sizeof(uint64_t))
+ & ~OFFSET_IS_PEELED;
+ else
+ offset = 0;
+
+ return snapshot->refs_chunk + offset;
+}
+
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+const char *find_reference_location_v2(struct snapshot *snapshot,
+ const char *refname, int mustexist,
+ size_t *pos)
+{
+ size_t lo = 0, hi = snapshot->nr;
+
+ if (snapshot->prefix_chunk) {
+ size_t prefix_row;
+ const char *prefix;
+ int found = 1;
+
+ prefix = find_prefix_location(snapshot, refname, &prefix_row);
+
+ if (!prefix || !starts_with(refname, prefix)) {
+ if (mustexist)
+ return NULL;
+ found = 0;
+ }
+
+ /* The second 4-byte column of the prefix offsets */
+ if (prefix_row) {
+ /* if prefix_row == 0, then lo = 0, which is already true. */
+ lo = get_be32(snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * (prefix_row - 1) + sizeof(uint32_t));
+ }
+
+ if (!found) {
+ const char *ret;
+ /* Terminate early with this lo position as the insertion point. */
+ if (pos)
+ *pos = lo;
+
+ if (lo >= snapshot->nr)
+ return NULL;
+
+ ret = get_nth_ref(snapshot, lo);
+ return ret;
+ }
+
+ hi = get_be32(snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * prefix_row + sizeof(uint32_t));
+
+ if (prefix)
+ refname += strlen(prefix);
+ }
+
+ while (lo != hi) {
+ const char *rec;
+ int cmp;
+ size_t mid = lo + (hi - lo) / 2;
+
+ rec = get_nth_ref(snapshot, mid);
+ cmp = strcmp(rec, refname);
+ if (cmp < 0) {
+ lo = mid + 1;
+ } else if (cmp > 0) {
+ hi = mid;
+ } else {
+ if (pos)
+ *pos = mid;
+ return rec;
+ }
+ }
+
+ if (mustexist) {
+ return NULL;
+ } else {
+ const char *ret;
+ /*
+ * We are likely doing a prefix match, so use the current
+ * 'lo' position as the indicator.
+ */
+ if (pos)
+ *pos = lo;
+ if (lo >= snapshot->nr)
+ return NULL;
+
+ ret = get_nth_ref(snapshot, lo);
+ return ret;
+ }
+}
+
+int packed_read_raw_ref_v2(struct packed_ref_store *refs, struct snapshot *snapshot,
+ const char *refname, struct object_id *oid,
+ unsigned int *type, int *failure_errno)
+{
+ const char *rec;
+
+ *type = 0;
+
+ rec = find_reference_location_v2(snapshot, refname, 1, NULL);
+
+ if (!rec) {
+ /* refname is not a packed reference. */
+ *failure_errno = ENOENT;
+ return -1;
+ }
+
+ hashcpy(oid->hash, (const unsigned char *)rec + strlen(rec) + 1);
+ oid->algo = hash_algo_by_ptr(the_hash_algo);
+
+ *type = REF_ISPACKED;
+ return 0;
+}
+
+static int packed_refs_read_offsets(const unsigned char *chunk_start,
+ size_t chunk_size, void *data)
+{
+ struct snapshot *snapshot = data;
+
+ snapshot->offset_chunk = chunk_start;
+ snapshot->nr = chunk_size / sizeof(uint64_t);
+ return 0;
+}
+
+static int packed_refs_read_prefix_offsets(const unsigned char *chunk_start,
+ size_t chunk_size, void *data)
+{
+ struct snapshot *snapshot = data;
+
+ snapshot->prefix_offsets_chunk = chunk_start;
+ snapshot->prefixes_nr = chunk_size / sizeof(uint64_t);
+ return 0;
+}
+
+void fill_snapshot_v2(struct snapshot *snapshot)
+{
+ uint32_t file_signature, file_version, hash_version;
+ struct chunkfile *cf;
+
+ file_signature = get_be32(snapshot->buf);
+ if (file_signature != PACKED_REFS_SIGNATURE)
+ die(_("%s file signature %X does not match signature %X"),
+ "packed-ref", file_signature, PACKED_REFS_SIGNATURE);
+
+ file_version = get_be32(snapshot->buf + sizeof(uint32_t));
+ if (file_version != 2)
+ die(_("format version %u does not match expected file version %u"),
+ file_version, 2);
+
+ hash_version = get_be32(snapshot->buf + 2 * sizeof(uint32_t));
+ if (hash_version != the_hash_algo->format_id)
+ die(_("hash version %X does not match expected hash version %X"),
+ hash_version, the_hash_algo->format_id);
+
+ cf = init_chunkfile(NULL);
+
+ if (read_trailing_table_of_contents(cf, (const unsigned char *)snapshot->buf, snapshot->buflen)) {
+ release_snapshot(snapshot);
+ snapshot = NULL;
+ goto cleanup;
+ }
+
+ read_chunk(cf, CHREFS_CHUNKID_OFFSETS, packed_refs_read_offsets, snapshot);
+ pair_chunk(cf, CHREFS_CHUNKID_REFS, (const unsigned char**)&snapshot->refs_chunk);
+
+ read_chunk(cf, CHREFS_CHUNKID_PREFIX_OFFSETS, packed_refs_read_prefix_offsets, snapshot);
+ pair_chunk(cf, CHREFS_CHUNKID_PREFIX_DATA, (const unsigned char**)&snapshot->prefix_chunk);
+
+ /* TODO: add error checks for invalid chunk combinations. */
+
+cleanup:
+ free_chunkfile(cf);
+}
+
+/*
+ * Move the iterator to the next record in the snapshot, without
+ * respect for whether the record is actually required by the current
+ * iteration. Adjust the fields in `iter` and return `ITER_OK` or
+ * `ITER_DONE`. This function does not free the iterator in the case
+ * of `ITER_DONE`.
+ */
+int next_record_v2(struct packed_ref_iterator *iter)
+{
+ uint64_t offset;
+ const char *pos = iter->pos;
+ strbuf_reset(&iter->refname_buf);
+
+ if (iter->row == iter->snapshot->nr)
+ return ITER_DONE;
+
+ iter->base.flags = REF_ISPACKED;
+
+ if (iter->cur_prefix)
+ strbuf_addstr(&iter->refname_buf, iter->cur_prefix);
+ strbuf_addstr(&iter->refname_buf, pos);
+ iter->base.refname = iter->refname_buf.buf;
+ pos += strlen(pos) + 1;
+
+ hashcpy(iter->oid.hash, (const unsigned char *)pos);
+ iter->oid.algo = hash_algo_by_ptr(the_hash_algo);
+ pos += the_hash_algo->rawsz;
+
+ if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!refname_is_safe(iter->base.refname))
+ die("packed refname is dangerous: %s",
+ iter->base.refname);
+ oidclr(&iter->oid);
+ iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
+ }
+
+ /* We always know the peeled value! */
+ iter->base.flags |= REF_KNOWS_PEELED;
+
+ offset = get_be64(iter->snapshot->offset_chunk + sizeof(uint64_t) * iter->row);
+ if (offset & OFFSET_IS_PEELED) {
+ hashcpy(iter->peeled.hash, (const unsigned char *)pos);
+ iter->peeled.algo = hash_algo_by_ptr(the_hash_algo);
+ } else {
+ oidclr(&iter->peeled);
+ }
+
+ /* TODO: somehow all tags are getting OFFSET_IS_PEELED even though
+ * some are not annotated tags.
+ */
+ iter->pos = iter->snapshot->refs_chunk + (offset & (~OFFSET_IS_PEELED));
+
+ iter->row++;
+
+ if (iter->row == iter->prefix_row_end && iter->snapshot->prefix_chunk) {
+ size_t prefix_pos = get_be32(iter->snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * iter->prefix_i);
+ iter->cur_prefix = iter->snapshot->prefix_chunk + prefix_pos;
+ iter->prefix_i++;
+ iter->prefix_row_end = get_be32(iter->snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * iter->prefix_i + sizeof(uint32_t));
+ }
+
+ return ITER_OK;
+}
+
+void init_iterator_prefix_info(const char *prefix,
+ struct packed_ref_iterator *iter)
+{
+ struct snapshot *snapshot = iter->snapshot;
+
+ if (snapshot->version != 2 || !snapshot->prefix_chunk) {
+ iter->prefix_row_end = snapshot->nr;
+ return;
+ }
+
+ if (prefix)
+ iter->cur_prefix = find_prefix_location(snapshot, prefix, &iter->prefix_i);
+ else {
+ iter->cur_prefix = snapshot->prefix_chunk;
+ iter->prefix_i = 0;
+ }
+
+ iter->prefix_row_end = get_be32(snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * iter->prefix_i +
+ sizeof(uint32_t));
+}
+
+struct write_packed_refs_v2_context {
+ struct packed_ref_store *refs;
+ struct string_list *updates;
+ struct strbuf *err;
+
+ struct hashfile *f;
+ struct chunkfile *cf;
+
+ /*
+ * As we stream the ref names to the refs chunk, store these
+ * values in-memory. These arrays are populated one for every ref.
+ */
+ uint64_t *offsets;
+ size_t nr;
+ size_t offsets_alloc;
+
+ int write_prefixes;
+ const char *cur_prefix;
+ size_t cur_prefix_len;
+
+ char **prefixes;
+ uint32_t *prefix_offsets;
+ uint32_t *prefix_rows;
+ size_t prefix_nr;
+ size_t prefixes_alloc;
+ size_t prefix_offsets_alloc;
+ size_t prefix_rows_alloc;
+};
+
+struct write_packed_refs_v2_context *create_v2_context(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err)
+{
+ struct write_packed_refs_v2_context *ctx;
+ int do_skip_hash;
+ CALLOC_ARRAY(ctx, 1);
+
+ ctx->refs = refs;
+ ctx->updates = updates;
+ ctx->err = err;
+
+ if (!fdopen_tempfile(refs->tempfile, "w")) {
+ strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
+ strerror(errno));
+ return ctx;
+ }
+
+ ctx->f = hashfd(refs->tempfile->fd, refs->tempfile->filename.buf);
+
+ /* Default to true, so skip_hash if not set. */
+ if (git_config_get_maybe_bool("refs.hashpackedrefs", &do_skip_hash) ||
+ do_skip_hash)
+ ctx->f->skip_hash = 1;
+
+ ctx->cf = init_chunkfile(ctx->f);
+
+ return ctx;
+}
+
+static int write_packed_entry_v2(const char *refname,
+ const struct object_id *oid,
+ const struct object_id *peeled,
+ void *write_data)
+{
+ struct write_packed_refs_v2_context *ctx = write_data;
+ size_t reflen = strlen(refname) + 1;
+ size_t i = ctx->nr;
+
+ ALLOC_GROW(ctx->offsets, i + 1, ctx->offsets_alloc);
+
+ if (ctx->write_prefixes) {
+ if (ctx->cur_prefix && starts_with(refname, ctx->cur_prefix)) {
+ /* skip ahead! */
+ refname += ctx->cur_prefix_len;
+ reflen -= ctx->cur_prefix_len;
+ } else {
+ size_t len;
+ const char *slash, *slashslash = NULL;
+ if (ctx->prefix_nr) {
+ /* close out the old prefix. */
+ ctx->prefix_rows[ctx->prefix_nr - 1] = ctx->nr;
+ }
+
+ /* Find the new prefix. */
+ slash = strchr(refname, '/');
+ if (slash)
+ slashslash = strchr(slash + 1, '/');
+ /* If there are two slashes, use that. */
+ slash = slashslash ? slashslash : slash;
+ /*
+ * If there is at least one slash, use that,
+ * and include the slash in the string.
+ * Otherwise, use the end of the ref.
+ */
+ slash = slash ? slash + 1 : refname + strlen(refname);
+
+ len = slash - refname;
+ ALLOC_GROW(ctx->prefixes, ctx->prefix_nr + 1, ctx->prefixes_alloc);
+ ALLOC_GROW(ctx->prefix_offsets, ctx->prefix_nr + 1, ctx->prefix_offsets_alloc);
+ ALLOC_GROW(ctx->prefix_rows, ctx->prefix_nr + 1, ctx->prefix_rows_alloc);
+
+ if (ctx->prefix_nr)
+ ctx->prefix_offsets[ctx->prefix_nr] = ctx->prefix_offsets[ctx->prefix_nr - 1] + len + 1;
+ else
+ ctx->prefix_offsets[ctx->prefix_nr] = len + 1;
+
+ ctx->prefixes[ctx->prefix_nr] = xstrndup(refname, len);
+ ctx->cur_prefix = ctx->prefixes[ctx->prefix_nr];
+ ctx->prefix_nr++;
+
+ refname += len;
+ reflen -= len;
+ ctx->cur_prefix_len = len;
+ }
+
+ /* Update the last row continually. */
+ ctx->prefix_rows[ctx->prefix_nr - 1] = i + 1;
+ }
+
+
+ /* Write entire ref, including null terminator. */
+ hashwrite(ctx->f, refname, reflen);
+ hashwrite(ctx->f, oid->hash, the_hash_algo->rawsz);
+ if (peeled)
+ hashwrite(ctx->f, peeled->hash, the_hash_algo->rawsz);
+
+ if (i)
+ ctx->offsets[i] = (ctx->offsets[i - 1] & (~OFFSET_IS_PEELED));
+ else
+ ctx->offsets[i] = 0;
+ ctx->offsets[i] += reflen + the_hash_algo->rawsz;
+
+ if (peeled) {
+ ctx->offsets[i] += the_hash_algo->rawsz;
+ ctx->offsets[i] |= OFFSET_IS_PEELED;
+ }
+
+ ctx->nr++;
+ return 0;
+}
+
+static int write_refs_chunk_refs(struct hashfile *f,
+ void *data)
+{
+ struct write_packed_refs_v2_context *ctx = data;
+ int ok;
+
+ trace2_region_enter("refs", "refs-chunk", the_repository);
+ ok = merge_iterator_and_updates(ctx->refs, ctx->updates, ctx->err,
+ write_packed_entry_v2, ctx);
+ trace2_region_leave("refs", "refs-chunk", the_repository);
+
+ return ok != ITER_DONE;
+}
+
+static int write_refs_chunk_offsets(struct hashfile *f,
+ void *data)
+{
+ struct write_packed_refs_v2_context *ctx = data;
+ size_t i;
+
+ trace2_region_enter("refs", "offsets", the_repository);
+ for (i = 0; i < ctx->nr; i++)
+ hashwrite_be64(f, ctx->offsets[i]);
+
+ trace2_region_leave("refs", "offsets", the_repository);
+ return 0;
+}
+
+static int write_refs_chunk_prefix_data(struct hashfile *f,
+ void *data)
+{
+ struct write_packed_refs_v2_context *ctx = data;
+ size_t i;
+
+ trace2_region_enter("refs", "prefix-data", the_repository);
+ for (i = 0; i < ctx->prefix_nr; i++) {
+ size_t len = strlen(ctx->prefixes[i]) + 1;
+ hashwrite(f, ctx->prefixes[i], len);
+
+ /* TODO: assert the prefix lengths match the stored offsets? */
+ }
+
+ trace2_region_leave("refs", "prefix-data", the_repository);
+ return 0;
+}
+
+static int write_refs_chunk_prefix_offsets(struct hashfile *f,
+ void *data)
+{
+ struct write_packed_refs_v2_context *ctx = data;
+ size_t i;
+
+ trace2_region_enter("refs", "prefix-offsets", the_repository);
+ for (i = 0; i < ctx->prefix_nr; i++) {
+ hashwrite_be32(f, ctx->prefix_offsets[i]);
+ hashwrite_be32(f, ctx->prefix_rows[i]);
+ }
+
+ trace2_region_leave("refs", "prefix-offsets", the_repository);
+ return 0;
+}
+
+int write_packed_refs_v2(struct write_packed_refs_v2_context *ctx)
+{
+ unsigned char file_hash[GIT_MAX_RAWSZ];
+
+ ctx->write_prefixes = git_env_bool("GIT_TEST_WRITE_PACKED_REFS_PREFIXES", 1);
+
+ add_chunk(ctx->cf, CHREFS_CHUNKID_REFS, 0, write_refs_chunk_refs);
+ add_chunk(ctx->cf, CHREFS_CHUNKID_OFFSETS, 0, write_refs_chunk_offsets);
+
+ if (ctx->write_prefixes) {
+ add_chunk(ctx->cf, CHREFS_CHUNKID_PREFIX_DATA, 0, write_refs_chunk_prefix_data);
+ add_chunk(ctx->cf, CHREFS_CHUNKID_PREFIX_OFFSETS, 0, write_refs_chunk_prefix_offsets);
+ }
+
+ hashwrite_be32(ctx->f, PACKED_REFS_SIGNATURE);
+ hashwrite_be32(ctx->f, 2);
+ hashwrite_be32(ctx->f, the_hash_algo->format_id);
+
+ if (write_chunkfile(ctx->cf, CHUNKFILE_TRAILING_TOC, ctx))
+ goto failure;
+
+ finalize_hashfile(ctx->f, file_hash, FSYNC_COMPONENT_REFERENCE,
+ CSUM_HASH_IN_STREAM | CSUM_FSYNC);
+
+ return 0;
+
+failure:
+ return -1;
+}
+
+void free_v2_context(struct write_packed_refs_v2_context *ctx)
+{
+ if (ctx->cf)
+ free_chunkfile(ctx->cf);
+ free(ctx);
+}
diff --git a/refs/refs-internal.h b/refs/refs-internal.h
index 69f93b0e2a..39b93fce97 100644
--- a/refs/refs-internal.h
+++ b/refs/refs-internal.h
@@ -521,6 +521,15 @@ struct ref_store;
REF_STORE_ODB | \
REF_STORE_MAIN)
+#define REF_STORE_FORMAT_FILES (1 << 8) /* can use loose ref files */
+#define REF_STORE_FORMAT_PACKED (1 << 9) /* can use v1 packed-refs file */
+#define REF_STORE_FORMAT_PACKED_V2 (1 << 10) /* can use v2 packed-refs file */
+
+static inline int packed_refs_enabled(int flags)
+{
+ return flags & (REF_STORE_FORMAT_PACKED | REF_STORE_FORMAT_PACKED_V2);
+}
+
/*
* Initialize the ref_store for the specified gitdir. These functions
* should call base_ref_store_init() to initialize the shared part of
diff --git a/repo-settings.c b/repo-settings.c
index e8b58151bc..3ec0a53ea6 100644
--- a/repo-settings.c
+++ b/repo-settings.c
@@ -43,6 +43,7 @@ void prepare_repo_settings(struct repository *r)
/* Defaults modified by feature.* */
if (experimental) {
r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_SKIPPING;
+ r->settings.gc_cruft_packs = 1;
}
if (manyfiles) {
r->settings.index_version = 4;
@@ -72,6 +73,19 @@ void prepare_repo_settings(struct repository *r)
r->settings.core_multi_pack_index = 1;
/*
+ * If the environment variable is set, assume that it was set by an
+ * invocation of git running in a superproject with
+ * submodule.propagateBranches set and that is recursing into this repo
+ * as a submodule. Therefore, we should ignore whatever is set in this
+ * repo's config.
+ */
+ r->settings.submodule_propagate_branches =
+ git_env_bool(GIT_SUBMODULE_PROPAGATE_BRANCHES_ENVIRONMENT, -1);
+ if (r->settings.submodule_propagate_branches == -1)
+ repo_cfg_bool(r, "submodule.propagateBranches",
+ &r->settings.submodule_propagate_branches, 0);
+
+ /*
* Non-boolean config
*/
if (!repo_config_get_int(r, "index.version", &value))
diff --git a/repository.c b/repository.c
index 5d166b692c..96533fc76b 100644
--- a/repository.c
+++ b/repository.c
@@ -182,6 +182,8 @@ int repo_init(struct repository *repo,
repo->repository_format_partial_clone = format.partial_clone;
format.partial_clone = NULL;
+ repo->ref_format = format.ref_format;
+
if (worktree)
repo_set_worktree(repo, worktree);
diff --git a/repository.h b/repository.h
index 24316ac944..9f5ce21f70 100644
--- a/repository.h
+++ b/repository.h
@@ -34,9 +34,11 @@ struct repo_settings {
int commit_graph_generation_version;
int commit_graph_read_changed_paths;
int gc_write_commit_graph;
+ int gc_cruft_packs;
int fetch_write_commit_graph;
int command_requires_full_index;
int sparse_index;
+ int submodule_propagate_branches;
struct fsmonitor_settings *fsmonitor; /* lazily loaded */
@@ -61,6 +63,12 @@ struct repo_path_cache {
char *shallow;
};
+enum ref_format_flags {
+ REF_FORMAT_FILES = (1 << 0),
+ REF_FORMAT_PACKED = (1 << 1),
+ REF_FORMAT_PACKED_V2 = (1 << 2),
+};
+
struct repository {
/* Environment */
/*
@@ -95,6 +103,7 @@ struct repository {
* the ref object.
*/
struct ref_store *refs_private;
+ enum ref_format_flags ref_format;
/*
* Contains path to often used file names.
diff --git a/reset.c b/reset.c
index e3383a9334..5ded23611f 100644
--- a/reset.c
+++ b/reset.c
@@ -128,6 +128,7 @@ int reset_head(struct repository *r, const struct reset_head_opts *opts)
unpack_tree_opts.update = 1;
unpack_tree_opts.merge = 1;
unpack_tree_opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
+ unpack_tree_opts.skip_cache_tree_update = 1;
init_checkout_metadata(&unpack_tree_opts.meta, switch_to_branch, oid, NULL);
if (reset_hard)
unpack_tree_opts.reset = UNPACK_RESET_PROTECT_UNTRACKED;
diff --git a/revision.c b/revision.c
index 36e31942ce..c86e76e471 100644
--- a/revision.c
+++ b/revision.c
@@ -1,4 +1,5 @@
#include "cache.h"
+#include "config.h"
#include "object-store.h"
#include "tag.h"
#include "blob.h"
@@ -47,13 +48,6 @@ static inline int want_ancestry(const struct rev_info *revs);
void show_object_with_name(FILE *out, struct object *obj, const char *name)
{
fprintf(out, "%s ", oid_to_hex(&obj->oid));
- /*
- * This "for (const char *p = ..." is made as a first step towards
- * making use of such declarations elsewhere in our codebase. If
- * it causes compilation problems on your platform, please report
- * it to the Git mailing list at git@vger.kernel.org. In the meantime,
- * adding -std=gnu99 to CFLAGS may help if you are with older GCC.
- */
for (const char *p = name; *p && *p != '\n'; p++)
fputc(*p, out);
fputc('\n', out);
@@ -1524,27 +1518,77 @@ static void add_rev_cmdline_list(struct rev_info *revs,
}
}
-struct all_refs_cb {
- int all_flags;
- int warned_bad_reflog;
- struct rev_info *all_revs;
- const char *name_for_errormsg;
- struct worktree *wt;
-};
-
-int ref_excluded(struct string_list *ref_excludes, const char *path)
+int ref_excluded(const struct ref_exclusions *exclusions, const char *path)
{
+ const char *stripped_path = strip_namespace(path);
struct string_list_item *item;
- if (!ref_excludes)
- return 0;
- for_each_string_list_item(item, ref_excludes) {
+ for_each_string_list_item(item, &exclusions->excluded_refs) {
if (!wildmatch(item->string, path, 0))
return 1;
}
+
+ if (ref_is_hidden(stripped_path, path, &exclusions->hidden_refs))
+ return 1;
+
return 0;
}
+void init_ref_exclusions(struct ref_exclusions *exclusions)
+{
+ struct ref_exclusions blank = REF_EXCLUSIONS_INIT;
+ memcpy(exclusions, &blank, sizeof(*exclusions));
+}
+
+void clear_ref_exclusions(struct ref_exclusions *exclusions)
+{
+ string_list_clear(&exclusions->excluded_refs, 0);
+ string_list_clear(&exclusions->hidden_refs, 0);
+ exclusions->hidden_refs_configured = 0;
+}
+
+void add_ref_exclusion(struct ref_exclusions *exclusions, const char *exclude)
+{
+ string_list_append(&exclusions->excluded_refs, exclude);
+}
+
+struct exclude_hidden_refs_cb {
+ struct ref_exclusions *exclusions;
+ const char *section;
+};
+
+static int hide_refs_config(const char *var, const char *value, void *cb_data)
+{
+ struct exclude_hidden_refs_cb *cb = cb_data;
+ cb->exclusions->hidden_refs_configured = 1;
+ return parse_hide_refs_config(var, value, cb->section,
+ &cb->exclusions->hidden_refs);
+}
+
+void exclude_hidden_refs(struct ref_exclusions *exclusions, const char *section)
+{
+ struct exclude_hidden_refs_cb cb;
+
+ if (strcmp(section, "receive") && strcmp(section, "uploadpack"))
+ die(_("unsupported section for hidden refs: %s"), section);
+
+ if (exclusions->hidden_refs_configured)
+ die(_("--exclude-hidden= passed more than once"));
+
+ cb.exclusions = exclusions;
+ cb.section = section;
+
+ git_config(hide_refs_config, &cb);
+}
+
+struct all_refs_cb {
+ int all_flags;
+ int warned_bad_reflog;
+ struct rev_info *all_revs;
+ const char *name_for_errormsg;
+ struct worktree *wt;
+};
+
static int handle_one_ref(const char *path, const struct object_id *oid,
int flag UNUSED,
void *cb_data)
@@ -1552,7 +1596,7 @@ static int handle_one_ref(const char *path, const struct object_id *oid,
struct all_refs_cb *cb = cb_data;
struct object *object;
- if (ref_excluded(cb->all_revs->ref_excludes, path))
+ if (ref_excluded(&cb->all_revs->ref_excludes, path))
return 0;
object = get_reference(cb->all_revs, path, oid, cb->all_flags);
@@ -1570,24 +1614,6 @@ static void init_all_refs_cb(struct all_refs_cb *cb, struct rev_info *revs,
cb->wt = NULL;
}
-void clear_ref_exclusion(struct string_list **ref_excludes_p)
-{
- if (*ref_excludes_p) {
- string_list_clear(*ref_excludes_p, 0);
- free(*ref_excludes_p);
- }
- *ref_excludes_p = NULL;
-}
-
-void add_ref_exclusion(struct string_list **ref_excludes_p, const char *exclude)
-{
- if (!*ref_excludes_p) {
- CALLOC_ARRAY(*ref_excludes_p, 1);
- (*ref_excludes_p)->strdup_strings = 1;
- }
- string_list_append(*ref_excludes_p, exclude);
-}
-
static void handle_refs(struct ref_store *refs,
struct rev_info *revs, unsigned flags,
int (*for_each)(struct ref_store *, each_ref_fn, void *))
@@ -1872,30 +1898,15 @@ void repo_init_revisions(struct repository *r,
struct rev_info *revs,
const char *prefix)
{
- memset(revs, 0, sizeof(*revs));
+ struct rev_info blank = REV_INFO_INIT;
+ memcpy(revs, &blank, sizeof(*revs));
revs->repo = r;
- revs->abbrev = DEFAULT_ABBREV;
- revs->simplify_history = 1;
revs->pruning.repo = r;
- revs->pruning.flags.recursive = 1;
- revs->pruning.flags.quick = 1;
revs->pruning.add_remove = file_add_remove;
revs->pruning.change = file_change;
revs->pruning.change_fn_data = revs;
- revs->sort_order = REV_SORT_IN_GRAPH_ORDER;
- revs->dense = 1;
revs->prefix = prefix;
- revs->max_age = -1;
- revs->max_age_as_filter = -1;
- revs->min_age = -1;
- revs->skip_count = -1;
- revs->max_count = -1;
- revs->max_parents = -1;
- revs->expand_tabs_in_log = -1;
-
- revs->commit_format = CMIT_FMT_DEFAULT;
- revs->expand_tabs_in_log_default = 8;
grep_init(&revs->grep_filter, revs->repo);
revs->grep_filter.status_only = 1;
@@ -1908,6 +1919,7 @@ void repo_init_revisions(struct repository *r,
init_display_notes(&revs->notes_opt);
list_objects_filter_init(&revs->filter);
+ init_ref_exclusions(&revs->ref_excludes);
}
static void add_pending_commit_list(struct rev_info *revs,
@@ -2120,9 +2132,8 @@ static int handle_revision_arg_1(const char *arg_, struct rev_info *revs, int fl
int exclude_parent = 1;
if (mark[2]) {
- char *end;
- exclude_parent = strtoul(mark + 2, &end, 10);
- if (*end != '\0' || !exclude_parent)
+ if (strtol_i(mark + 2, 10, &exclude_parent) ||
+ exclude_parent < 1)
return -1;
}
@@ -2233,7 +2244,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
!strcmp(arg, "--bisect") || starts_with(arg, "--glob=") ||
!strcmp(arg, "--indexed-objects") ||
!strcmp(arg, "--alternate-refs") ||
- starts_with(arg, "--exclude=") ||
+ starts_with(arg, "--exclude=") || starts_with(arg, "--exclude-hidden=") ||
starts_with(arg, "--branches=") || starts_with(arg, "--tags=") ||
starts_with(arg, "--remotes=") || starts_with(arg, "--no-walk="))
{
@@ -2697,10 +2708,12 @@ static int handle_revision_pseudo_opt(struct rev_info *revs,
init_all_refs_cb(&cb, revs, *flags);
other_head_refs(handle_one_ref, &cb);
}
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (!strcmp(arg, "--branches")) {
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --branches"));
handle_refs(refs, revs, *flags, refs_for_each_branch_ref);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (!strcmp(arg, "--bisect")) {
read_bisect_terms(&term_bad, &term_good);
handle_refs(refs, revs, *flags, for_each_bad_bisect_ref);
@@ -2708,35 +2721,48 @@ static int handle_revision_pseudo_opt(struct rev_info *revs,
for_each_good_bisect_ref);
revs->bisect = 1;
} else if (!strcmp(arg, "--tags")) {
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --tags"));
handle_refs(refs, revs, *flags, refs_for_each_tag_ref);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (!strcmp(arg, "--remotes")) {
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --remotes"));
handle_refs(refs, revs, *flags, refs_for_each_remote_ref);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if ((argcount = parse_long_opt("glob", argv, &optarg))) {
struct all_refs_cb cb;
init_all_refs_cb(&cb, revs, *flags);
for_each_glob_ref(handle_one_ref, optarg, &cb);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
return argcount;
} else if ((argcount = parse_long_opt("exclude", argv, &optarg))) {
add_ref_exclusion(&revs->ref_excludes, optarg);
return argcount;
+ } else if ((argcount = parse_long_opt("exclude-hidden", argv, &optarg))) {
+ exclude_hidden_refs(&revs->ref_excludes, optarg);
+ return argcount;
} else if (skip_prefix(arg, "--branches=", &optarg)) {
struct all_refs_cb cb;
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --branches"));
init_all_refs_cb(&cb, revs, *flags);
for_each_glob_ref_in(handle_one_ref, optarg, "refs/heads/", &cb);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (skip_prefix(arg, "--tags=", &optarg)) {
struct all_refs_cb cb;
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --tags"));
init_all_refs_cb(&cb, revs, *flags);
for_each_glob_ref_in(handle_one_ref, optarg, "refs/tags/", &cb);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (skip_prefix(arg, "--remotes=", &optarg)) {
struct all_refs_cb cb;
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --remotes"));
init_all_refs_cb(&cb, revs, *flags);
for_each_glob_ref_in(handle_one_ref, optarg, "refs/remotes/", &cb);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (!strcmp(arg, "--reflog")) {
add_reflogs_to_pending(revs, *flags);
} else if (!strcmp(arg, "--indexed-objects")) {
@@ -3028,6 +3054,7 @@ void release_revisions(struct rev_info *revs)
date_mode_release(&revs->date_mode);
release_revisions_mailmap(revs->mailmap);
free_grep_patterns(&revs->grep_filter);
+ graph_clear(revs->graph);
/* TODO (need to handle "no_free"): diff_free(&revs->diffopt) */
diff_free(&revs->pruning);
reflog_walk_info_release(revs->reflog_info);
diff --git a/revision.h b/revision.h
index afe1b77985..30febad09a 100644
--- a/revision.h
+++ b/revision.h
@@ -81,6 +81,35 @@ struct rev_cmdline_info {
} *rev;
};
+struct ref_exclusions {
+ /*
+ * Excluded refs is a list of wildmatch patterns. If any of the
+ * patterns matches, the reference will be excluded.
+ */
+ struct string_list excluded_refs;
+
+ /*
+ * Hidden refs is a list of patterns that is to be hidden via
+ * `ref_is_hidden()`.
+ */
+ struct string_list hidden_refs;
+
+ /*
+ * Indicates whether hidden refs have been configured. This is to
+ * distinguish between no hidden refs existing and hidden refs not
+ * being parsed.
+ */
+ char hidden_refs_configured;
+};
+
+/**
+ * Initialize a `struct ref_exclusions` with a macro.
+ */
+#define REF_EXCLUSIONS_INIT { \
+ .excluded_refs = STRING_LIST_INIT_DUP, \
+ .hidden_refs = STRING_LIST_INIT_DUP, \
+}
+
struct oidset;
struct topo_walk_info;
@@ -103,7 +132,7 @@ struct rev_info {
struct list_objects_filter_options filter;
/* excluding from --branches, --refs, etc. expansion */
- struct string_list *ref_excludes;
+ struct ref_exclusions ref_excludes;
/* Basic information */
const char *prefix;
@@ -357,7 +386,23 @@ struct rev_info {
* called before release_revisions() the "struct rev_info" can be left
* uninitialized.
*/
-#define REV_INFO_INIT { 0 }
+#define REV_INFO_INIT { \
+ .abbrev = DEFAULT_ABBREV, \
+ .simplify_history = 1, \
+ .pruning.flags.recursive = 1, \
+ .pruning.flags.quick = 1, \
+ .sort_order = REV_SORT_IN_GRAPH_ORDER, \
+ .dense = 1, \
+ .max_age = -1, \
+ .max_age_as_filter = -1, \
+ .min_age = -1, \
+ .skip_count = -1, \
+ .max_count = -1, \
+ .max_parents = -1, \
+ .expand_tabs_in_log = -1, \
+ .commit_format = CMIT_FMT_DEFAULT, \
+ .expand_tabs_in_log_default = 8, \
+}
/**
* Initialize a rev_info structure with default values. The third parameter may
@@ -439,12 +484,14 @@ void mark_trees_uninteresting_sparse(struct repository *r, struct oidset *trees)
void show_object_with_name(FILE *, struct object *, const char *);
/**
- * Helpers to check if a "struct string_list" item matches with
- * wildmatch().
+ * Helpers to check if a reference should be excluded.
*/
-int ref_excluded(struct string_list *, const char *path);
-void clear_ref_exclusion(struct string_list **);
-void add_ref_exclusion(struct string_list **, const char *exclude);
+
+int ref_excluded(const struct ref_exclusions *exclusions, const char *path);
+void init_ref_exclusions(struct ref_exclusions *);
+void clear_ref_exclusions(struct ref_exclusions *);
+void add_ref_exclusion(struct ref_exclusions *, const char *exclude);
+void exclude_hidden_refs(struct ref_exclusions *, const char *section);
/**
* This function can be used if you want to add commit objects as revision
diff --git a/run-command.c b/run-command.c
index 5ec3a46dcc..02cdb22d7b 100644
--- a/run-command.c
+++ b/run-command.c
@@ -1004,41 +1004,6 @@ int run_command(struct child_process *cmd)
return finish_command(cmd);
}
-int run_command_v_opt(const char **argv, int opt)
-{
- return run_command_v_opt_cd_env(argv, opt, NULL, NULL);
-}
-
-int run_command_v_opt_tr2(const char **argv, int opt, const char *tr2_class)
-{
- return run_command_v_opt_cd_env_tr2(argv, opt, NULL, NULL, tr2_class);
-}
-
-int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env)
-{
- return run_command_v_opt_cd_env_tr2(argv, opt, dir, env, NULL);
-}
-
-int run_command_v_opt_cd_env_tr2(const char **argv, int opt, const char *dir,
- const char *const *env, const char *tr2_class)
-{
- struct child_process cmd = CHILD_PROCESS_INIT;
- strvec_pushv(&cmd.args, argv);
- cmd.no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0;
- cmd.git_cmd = opt & RUN_GIT_CMD ? 1 : 0;
- cmd.stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0;
- cmd.silent_exec_failure = opt & RUN_SILENT_EXEC_FAILURE ? 1 : 0;
- cmd.use_shell = opt & RUN_USING_SHELL ? 1 : 0;
- cmd.clean_on_exit = opt & RUN_CLEAN_ON_EXIT ? 1 : 0;
- cmd.wait_after_clean = opt & RUN_WAIT_AFTER_CLEAN ? 1 : 0;
- cmd.close_object_store = opt & RUN_CLOSE_OBJECT_STORE ? 1 : 0;
- cmd.dir = dir;
- if (env)
- strvec_pushv(&cmd.env, (const char **)env);
- cmd.trace2_child_class = tr2_class;
- return run_command(&cmd);
-}
-
#ifndef NO_PTHREADS
static pthread_t main_thread;
static int main_thread_set;
@@ -1496,16 +1461,8 @@ enum child_state {
GIT_CP_WAIT_CLEANUP,
};
-int run_processes_parallel_ungroup;
struct parallel_processes {
- void *data;
-
- int max_processes;
- int nr_processes;
-
- get_next_task_fn get_next_task;
- start_failure_fn start_failure;
- task_finished_fn task_finished;
+ size_t nr_processes;
struct {
enum child_state state;
@@ -1520,81 +1477,63 @@ struct parallel_processes {
struct pollfd *pfd;
unsigned shutdown : 1;
- unsigned ungroup : 1;
- int output_owner;
+ size_t output_owner;
struct strbuf buffered_output; /* of finished children */
};
-static int default_start_failure(struct strbuf *out,
- void *pp_cb,
- void *pp_task_cb)
-{
- return 0;
-}
+struct parallel_processes_for_signal {
+ const struct run_process_parallel_opts *opts;
+ const struct parallel_processes *pp;
+};
-static int default_task_finished(int result,
- struct strbuf *out,
- void *pp_cb,
- void *pp_task_cb)
+static void kill_children(const struct parallel_processes *pp,
+ const struct run_process_parallel_opts *opts,
+ int signo)
{
- return 0;
+ for (size_t i = 0; i < opts->processes; i++)
+ if (pp->children[i].state == GIT_CP_WORKING)
+ kill(pp->children[i].process.pid, signo);
}
-static void kill_children(struct parallel_processes *pp, int signo)
+static void kill_children_signal(const struct parallel_processes_for_signal *pp_sig,
+ int signo)
{
- int i, n = pp->max_processes;
-
- for (i = 0; i < n; i++)
- if (pp->children[i].state == GIT_CP_WORKING)
- kill(pp->children[i].process.pid, signo);
+ kill_children(pp_sig->pp, pp_sig->opts, signo);
}
-static struct parallel_processes *pp_for_signal;
+static struct parallel_processes_for_signal *pp_for_signal;
static void handle_children_on_signal(int signo)
{
- kill_children(pp_for_signal, signo);
+ kill_children_signal(pp_for_signal, signo);
sigchain_pop(signo);
raise(signo);
}
static void pp_init(struct parallel_processes *pp,
- int n,
- get_next_task_fn get_next_task,
- start_failure_fn start_failure,
- task_finished_fn task_finished,
- void *data, int ungroup)
+ const struct run_process_parallel_opts *opts,
+ struct parallel_processes_for_signal *pp_sig)
{
- int i;
-
- if (n < 1)
- n = online_cpus();
+ const size_t n = opts->processes;
- pp->max_processes = n;
+ if (!n)
+ BUG("you must provide a non-zero number of processes!");
- trace_printf("run_processes_parallel: preparing to run up to %d tasks", n);
+ trace_printf("run_processes_parallel: preparing to run up to %"PRIuMAX" tasks",
+ (uintmax_t)n);
- pp->data = data;
- if (!get_next_task)
+ if (!opts->get_next_task)
BUG("you need to specify a get_next_task function");
- pp->get_next_task = get_next_task;
+
+ if (opts->duplicate_output && opts->ungroup)
+ BUG("duplicate_output and ungroup are incompatible with each other");
- pp->start_failure = start_failure ? start_failure : default_start_failure;
- pp->task_finished = task_finished ? task_finished : default_task_finished;
-
- pp->nr_processes = 0;
- pp->output_owner = 0;
- pp->shutdown = 0;
- pp->ungroup = ungroup;
CALLOC_ARRAY(pp->children, n);
- if (pp->ungroup)
- pp->pfd = NULL;
- else
+ if (!opts->ungroup)
CALLOC_ARRAY(pp->pfd, n);
- strbuf_init(&pp->buffered_output, 0);
- for (i = 0; i < n; i++) {
+ for (size_t i = 0; i < n; i++) {
strbuf_init(&pp->children[i].err, 0);
child_process_init(&pp->children[i].process);
if (pp->pfd) {
@@ -1603,16 +1542,17 @@ static void pp_init(struct parallel_processes *pp,
}
}
- pp_for_signal = pp;
+ pp_sig->pp = pp;
+ pp_sig->opts = opts;
+ pp_for_signal = pp_sig;
sigchain_push_common(handle_children_on_signal);
}
-static void pp_cleanup(struct parallel_processes *pp)
+static void pp_cleanup(struct parallel_processes *pp,
+ const struct run_process_parallel_opts *opts)
{
- int i;
-
trace_printf("run_processes_parallel: done");
- for (i = 0; i < pp->max_processes; i++) {
+ for (size_t i = 0; i < opts->processes; i++) {
strbuf_release(&pp->children[i].err);
child_process_clear(&pp->children[i].process);
}
@@ -1637,39 +1577,45 @@ static void pp_cleanup(struct parallel_processes *pp)
* <0 no new job was started, user wishes to shutdown early. Use negative code
* to signal the children.
*/
-static int pp_start_one(struct parallel_processes *pp)
+static int pp_start_one(struct parallel_processes *pp,
+ const struct run_process_parallel_opts *opts)
{
- int i, code;
+ size_t i;
+ int code;
- for (i = 0; i < pp->max_processes; i++)
+ for (i = 0; i < opts->processes; i++)
if (pp->children[i].state == GIT_CP_FREE)
break;
- if (i == pp->max_processes)
+ if (i == opts->processes)
BUG("bookkeeping is hard");
- code = pp->get_next_task(&pp->children[i].process,
- pp->ungroup ? NULL : &pp->children[i].err,
- pp->data,
- &pp->children[i].data);
+ code = opts->get_next_task(&pp->children[i].process,
+ opts->ungroup ? NULL : &pp->children[i].err,
+ opts->data,
+ &pp->children[i].data);
if (!code) {
- if (!pp->ungroup) {
+ if (!opts->ungroup) {
strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
strbuf_reset(&pp->children[i].err);
}
return 1;
}
- if (!pp->ungroup) {
+ if (!opts->ungroup) {
pp->children[i].process.err = -1;
pp->children[i].process.stdout_to_stderr = 1;
}
pp->children[i].process.no_stdin = 1;
if (start_command(&pp->children[i].process)) {
- code = pp->start_failure(pp->ungroup ? NULL :
- &pp->children[i].err,
- pp->data,
- pp->children[i].data);
- if (!pp->ungroup) {
+ if (opts->start_failure)
+ code = opts->start_failure(opts->ungroup ? NULL :
+ &pp->children[i].err,
+ opts->data,
+ pp->children[i].data);
+ else
+ code = 0;
+
+ if (!opts->ungroup) {
strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
strbuf_reset(&pp->children[i].err);
}
@@ -1685,23 +1631,31 @@ static int pp_start_one(struct parallel_processes *pp)
return 0;
}
-static void pp_buffer_stderr(struct parallel_processes *pp, int output_timeout)
+static void pp_buffer_stderr(struct parallel_processes *pp,
+ const struct run_process_parallel_opts *opts,
+ int output_timeout)
{
int i;
- while ((i = poll(pp->pfd, pp->max_processes, output_timeout)) < 0) {
+ while ((i = poll(pp->pfd, opts->processes, output_timeout) < 0)) {
if (errno == EINTR)
continue;
- pp_cleanup(pp);
+ pp_cleanup(pp, opts);
die_errno("poll");
}
/* Buffer output from all pipes. */
- for (i = 0; i < pp->max_processes; i++) {
+ for (size_t i = 0; i < opts->processes; i++) {
if (pp->children[i].state == GIT_CP_WORKING &&
pp->pfd[i].revents & (POLLIN | POLLHUP)) {
- int n = strbuf_read_once(&pp->children[i].err,
- pp->children[i].process.err, 0);
+ struct strbuf buf = STRBUF_INIT;
+ int n = strbuf_read_once(&buf, pp->children[i].process.err, 0);
+ strbuf_addbuf(&pp->children[i].err, &buf);
+ if (opts->duplicate_output)
+ opts->duplicate_output(&buf, &pp->children[i].err,
+ opts->data,
+ pp->children[i].data);
+ strbuf_release(&buf);
if (n == 0) {
close(pp->children[i].process.err);
pp->children[i].state = GIT_CP_WAIT_CLEANUP;
@@ -1712,9 +1666,9 @@ static void pp_buffer_stderr(struct parallel_processes *pp, int output_timeout)
}
}
-static void pp_output(struct parallel_processes *pp)
+static void pp_output(const struct parallel_processes *pp)
{
- int i = pp->output_owner;
+ size_t i = pp->output_owner;
if (pp->children[i].state == GIT_CP_WORKING &&
pp->children[i].err.len) {
@@ -1723,24 +1677,28 @@ static void pp_output(struct parallel_processes *pp)
}
}
-static int pp_collect_finished(struct parallel_processes *pp)
+static int pp_collect_finished(struct parallel_processes *pp,
+ const struct run_process_parallel_opts *opts)
{
- int i, code;
- int n = pp->max_processes;
+ int code;
+ size_t i;
int result = 0;
while (pp->nr_processes > 0) {
- for (i = 0; i < pp->max_processes; i++)
+ for (i = 0; i < opts->processes; i++)
if (pp->children[i].state == GIT_CP_WAIT_CLEANUP)
break;
- if (i == pp->max_processes)
+ if (i == opts->processes)
break;
code = finish_command(&pp->children[i].process);
- code = pp->task_finished(code, pp->ungroup ? NULL :
- &pp->children[i].err, pp->data,
- pp->children[i].data);
+ if (opts->task_finished)
+ code = opts->task_finished(code, opts->ungroup ? NULL :
+ &pp->children[i].err, opts->data,
+ pp->children[i].data);
+ else
+ code = 0;
if (code)
result = code;
@@ -1753,12 +1711,14 @@ static int pp_collect_finished(struct parallel_processes *pp)
pp->pfd[i].fd = -1;
child_process_init(&pp->children[i].process);
- if (pp->ungroup) {
+ if (opts->ungroup) {
; /* no strbuf_*() work to do here */
} else if (i != pp->output_owner) {
strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
strbuf_reset(&pp->children[i].err);
} else {
+ const size_t n = opts->processes;
+
strbuf_write(&pp->children[i].err, stderr);
strbuf_reset(&pp->children[i].err);
@@ -1783,76 +1743,60 @@ static int pp_collect_finished(struct parallel_processes *pp)
return result;
}
-int run_processes_parallel(int n,
- get_next_task_fn get_next_task,
- start_failure_fn start_failure,
- task_finished_fn task_finished,
- void *pp_cb)
+void run_processes_parallel(const struct run_process_parallel_opts *opts)
{
int i, code;
int output_timeout = 100;
int spawn_cap = 4;
- int ungroup = run_processes_parallel_ungroup;
- struct parallel_processes pp;
-
- /* unset for the next API user */
- run_processes_parallel_ungroup = 0;
-
- pp_init(&pp, n, get_next_task, start_failure, task_finished, pp_cb,
- ungroup);
+ struct parallel_processes_for_signal pp_sig;
+ struct parallel_processes pp = {
+ .buffered_output = STRBUF_INIT,
+ };
+ /* options */
+ const char *tr2_category = opts->tr2_category;
+ const char *tr2_label = opts->tr2_label;
+ const int do_trace2 = tr2_category && tr2_label;
+
+ if (do_trace2)
+ trace2_region_enter_printf(tr2_category, tr2_label, NULL,
+ "max:%d", opts->processes);
+
+ pp_init(&pp, opts, &pp_sig);
while (1) {
for (i = 0;
i < spawn_cap && !pp.shutdown &&
- pp.nr_processes < pp.max_processes;
+ pp.nr_processes < opts->processes;
i++) {
- code = pp_start_one(&pp);
+ code = pp_start_one(&pp, opts);
if (!code)
continue;
if (code < 0) {
pp.shutdown = 1;
- kill_children(&pp, -code);
+ kill_children(&pp, opts, -code);
}
break;
}
if (!pp.nr_processes)
break;
- if (ungroup) {
- int i;
-
- for (i = 0; i < pp.max_processes; i++)
+ if (opts->ungroup) {
+ for (size_t i = 0; i < opts->processes; i++)
pp.children[i].state = GIT_CP_WAIT_CLEANUP;
} else {
- pp_buffer_stderr(&pp, output_timeout);
+ pp_buffer_stderr(&pp, opts, output_timeout);
pp_output(&pp);
}
- code = pp_collect_finished(&pp);
+ code = pp_collect_finished(&pp, opts);
if (code) {
pp.shutdown = 1;
if (code < 0)
- kill_children(&pp, -code);
+ kill_children(&pp, opts,-code);
}
}
- pp_cleanup(&pp);
- return 0;
-}
-
-int run_processes_parallel_tr2(int n, get_next_task_fn get_next_task,
- start_failure_fn start_failure,
- task_finished_fn task_finished, void *pp_cb,
- const char *tr2_category, const char *tr2_label)
-{
- int result;
-
- trace2_region_enter_printf(tr2_category, tr2_label, NULL, "max:%d",
- ((n < 1) ? online_cpus() : n));
+ pp_cleanup(&pp, opts);
- result = run_processes_parallel(n, get_next_task, start_failure,
- task_finished, pp_cb);
-
- trace2_region_leave(tr2_category, tr2_label, NULL);
-
- return result;
+ if (do_trace2)
+ trace2_region_leave(tr2_category, tr2_label, NULL);
}
int run_auto_maintenance(int quiet)
diff --git a/run-command.h b/run-command.h
index 0e85e5846a..709b36a411 100644
--- a/run-command.h
+++ b/run-command.h
@@ -150,9 +150,7 @@ struct child_process {
}
/**
- * The functions: child_process_init, start_command, finish_command,
- * run_command, run_command_v_opt, run_command_v_opt_cd_env, child_process_clear
- * do the following:
+ * The functions: start_command, finish_command, run_command do the following:
*
* - If a system call failed, errno is set and -1 is returned. A diagnostic
* is printed.
@@ -224,36 +222,6 @@ int run_command(struct child_process *);
*/
int run_auto_maintenance(int quiet);
-#define RUN_COMMAND_NO_STDIN (1<<0)
-#define RUN_GIT_CMD (1<<1)
-#define RUN_COMMAND_STDOUT_TO_STDERR (1<<2)
-#define RUN_SILENT_EXEC_FAILURE (1<<3)
-#define RUN_USING_SHELL (1<<4)
-#define RUN_CLEAN_ON_EXIT (1<<5)
-#define RUN_WAIT_AFTER_CLEAN (1<<6)
-#define RUN_CLOSE_OBJECT_STORE (1<<7)
-
-/**
- * Convenience functions that encapsulate a sequence of
- * start_command() followed by finish_command(). The argument argv
- * specifies the program and its arguments. The argument opt is zero
- * or more of the flags `RUN_COMMAND_NO_STDIN`, `RUN_GIT_CMD`,
- * `RUN_COMMAND_STDOUT_TO_STDERR`, or `RUN_SILENT_EXEC_FAILURE`
- * that correspond to the members .no_stdin, .git_cmd,
- * .stdout_to_stderr, .silent_exec_failure of `struct child_process`.
- * The argument dir corresponds the member .dir. The argument env
- * corresponds to the member .env.
- */
-int run_command_v_opt(const char **argv, int opt);
-int run_command_v_opt_tr2(const char **argv, int opt, const char *tr2_class);
-/*
- * env (the environment) is to be formatted like environ: "VAR=VALUE".
- * To unset an environment variable use just "VAR".
- */
-int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env);
-int run_command_v_opt_cd_env_tr2(const char **argv, int opt, const char *dir,
- const char *const *env, const char *tr2_class);
-
/**
* Execute the given command, sending "in" to its stdin, and capturing its
* stdout and stderr in the "out" and "err" strbufs. Any of the three may
@@ -441,6 +409,24 @@ typedef int (*start_failure_fn)(struct strbuf *out,
void *pp_task_cb);
/**
+ * This callback is called whenever output from a child process is buffered
+ *
+ * "struct strbuf *process_out" contains the output from the child process
+ *
+ * See run_processes_parallel() below for a discussion of the "struct
+ * strbuf *out" parameter.
+ *
+ * pp_cb is the callback cookie as passed into run_processes_parallel,
+ * pp_task_cb is the callback cookie as passed into get_next_task_fn.
+ *
+ * This function is incompatible with "ungroup"
+ */
+typedef void (*duplicate_output_fn)(struct strbuf *process_out,
+ struct strbuf *out,
+ void *pp_cb,
+ void *pp_task_cb);
+
+/**
* This callback is called on every child process that finished processing.
*
* See run_processes_parallel() below for a discussion of the "struct
@@ -459,17 +445,70 @@ typedef int (*task_finished_fn)(int result,
void *pp_task_cb);
/**
- * Runs up to n processes at the same time. Whenever a process can be
- * started, the callback get_next_task_fn is called to obtain the data
+ * Option used by run_processes_parallel(), { 0 }-initialized means no
+ * options.
+ */
+struct run_process_parallel_opts
+{
+ /**
+ * tr2_category & tr2_label: sets the trace2 category and label for
+ * logging. These must either be unset, or both of them must be set.
+ */
+ const char *tr2_category;
+ const char *tr2_label;
+
+ /**
+ * processes: see 'processes' in run_processes_parallel() below.
+ */
+ size_t processes;
+
+ /**
+ * ungroup: see 'ungroup' in run_processes_parallel() below.
+ */
+ unsigned int ungroup:1;
+
+ /**
+ * get_next_task: See get_next_task_fn() above. This must be
+ * specified.
+ */
+ get_next_task_fn get_next_task;
+
+ /**
+ * start_failure: See start_failure_fn() above. This can be
+ * NULL to omit any special handling.
+ */
+ start_failure_fn start_failure;
+
+ /**
+ * duplicate_output: See duplicate_output_fn() above. This should be
+ * NULL unless process specific output is needed
+ */
+ duplicate_output_fn duplicate_output;
+
+ /**
+ * task_finished: See task_finished_fn() above. This can be
+ * NULL to omit any special handling.
+ */
+ task_finished_fn task_finished;
+
+ /**
+ * data: user data, will be passed as "pp_cb" to the callback
+ * parameters.
+ */
+ void *data;
+};
+
+/**
+ * Options are passed via the "struct run_process_parallel_opts" above.
+ *
+ * Runs N 'processes' at the same time. Whenever a process can be
+ * started, the callback opts.get_next_task is called to obtain the data
* required to start another child process.
*
* The children started via this function run in parallel. Their output
* (both stdout and stderr) is routed to stderr in a manner that output
* from different tasks does not interleave (but see "ungroup" below).
*
- * start_failure_fn and task_finished_fn can be NULL to omit any
- * special handling.
- *
* If the "ungroup" option isn't specified, the API will set the
* "stdout_to_stderr" parameter in "struct child_process" and provide
* the callbacks with a "struct strbuf *out" parameter to write output
@@ -479,20 +518,8 @@ typedef int (*task_finished_fn)(int result,
* NULL "struct strbuf *out" parameter, and are responsible for
* emitting their own output, including dealing with any race
* conditions due to writing in parallel to stdout and stderr.
- * The "ungroup" option can be enabled by setting the global
- * "run_processes_parallel_ungroup" to "1" before invoking
- * run_processes_parallel(), it will be set back to "0" as soon as the
- * API reads that setting.
*/
-extern int run_processes_parallel_ungroup;
-int run_processes_parallel(int n,
- get_next_task_fn,
- start_failure_fn,
- task_finished_fn,
- void *pp_cb);
-int run_processes_parallel_tr2(int n, get_next_task_fn, start_failure_fn,
- task_finished_fn, void *pp_cb,
- const char *tr2_category, const char *tr2_label);
+void run_processes_parallel(const struct run_process_parallel_opts *opts);
/**
* Convenience function which prepares env for a command to be run in a
diff --git a/scalar.c b/scalar.c
index c5c1ce6891..6c52243cdf 100644
--- a/scalar.c
+++ b/scalar.c
@@ -69,21 +69,18 @@ static void setup_enlistment_directory(int argc, const char **argv,
static int run_git(const char *arg, ...)
{
- struct strvec argv = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
va_list args;
const char *p;
- int res;
va_start(args, arg);
- strvec_push(&argv, arg);
+ strvec_push(&cmd.args, arg);
while ((p = va_arg(args, const char *)))
- strvec_push(&argv, p);
+ strvec_push(&cmd.args, p);
va_end(args);
- res = run_command_v_opt(argv.v, RUN_GIT_CMD);
-
- strvec_clear(&argv);
- return res;
+ cmd.git_cmd = 1;
+ return run_command(&cmd);
}
struct scalar_config {
@@ -207,7 +204,10 @@ static int set_recommended_config(int reconfigure)
static int toggle_maintenance(int enable)
{
- return run_git("maintenance", enable ? "start" : "unregister", NULL);
+ return run_git("maintenance",
+ enable ? "start" : "unregister",
+ enable ? NULL : "--force",
+ NULL);
}
static int add_or_remove_enlistment(int add)
@@ -596,6 +596,24 @@ static int get_scalar_repos(const char *key, const char *value, void *data)
return 0;
}
+static int remove_deleted_enlistment(struct strbuf *path)
+{
+ int res = 0;
+ strbuf_realpath_forgiving(path, path->buf, 1);
+
+ if (run_git("config", "--global",
+ "--unset", "--fixed-value",
+ "scalar.repo", path->buf, NULL) < 0)
+ res = -1;
+
+ if (run_git("config", "--global",
+ "--unset", "--fixed-value",
+ "maintenance.repo", path->buf, NULL) < 0)
+ res = -1;
+
+ return res;
+}
+
static int cmd_reconfigure(int argc, const char **argv)
{
int all = 0;
@@ -635,8 +653,22 @@ static int cmd_reconfigure(int argc, const char **argv)
strbuf_reset(&gitdir);
if (chdir(dir) < 0) {
- warning_errno(_("could not switch to '%s'"), dir);
- res = -1;
+ struct strbuf buf = STRBUF_INIT;
+
+ if (errno != ENOENT) {
+ warning_errno(_("could not switch to '%s'"), dir);
+ res = -1;
+ continue;
+ }
+
+ strbuf_addstr(&buf, dir);
+ if (remove_deleted_enlistment(&buf))
+ res = error(_("could not remove stale "
+ "scalar.repo '%s'"), dir);
+ else
+ warning(_("removing stale scalar.repo '%s'"),
+ dir);
+ strbuf_release(&buf);
} else if (discover_git_directory(&commondir, &gitdir) < 0) {
warning_errno(_("git repository gone in '%s'"), dir);
res = -1;
@@ -722,24 +754,6 @@ static int cmd_run(int argc, const char **argv)
return 0;
}
-static int remove_deleted_enlistment(struct strbuf *path)
-{
- int res = 0;
- strbuf_realpath_forgiving(path, path->buf, 1);
-
- if (run_git("config", "--global",
- "--unset", "--fixed-value",
- "scalar.repo", path->buf, NULL) < 0)
- res = -1;
-
- if (run_git("config", "--global",
- "--unset", "--fixed-value",
- "maintenance.repo", path->buf, NULL) < 0)
- res = -1;
-
- return res;
-}
-
static int cmd_unregister(int argc, const char **argv)
{
struct option options[] = {
diff --git a/sequencer.c b/sequencer.c
index d26ede83c4..d4608d6a08 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -375,6 +375,7 @@ int sequencer_remove_state(struct replay_opts *opts)
}
free(opts->gpg_sign);
+ free(opts->reflog_action);
free(opts->default_strategy);
free(opts->strategy);
for (i = 0; i < opts->xopts_nr; i++)
@@ -915,7 +916,7 @@ int read_author_script(const char *path, char **name, char **email, char **date,
error(_("missing 'GIT_AUTHOR_EMAIL'"));
if (date_i == -2)
error(_("missing 'GIT_AUTHOR_DATE'"));
- if (date_i < 0 || email_i < 0 || date_i < 0 || err)
+ if (name_i < 0 || email_i < 0 || date_i < 0 || err)
goto finish;
*name = kv.items[name_i].util;
*email = kv.items[email_i].util;
@@ -1050,6 +1051,8 @@ static int run_git_commit(const char *defmsg,
gpg_opt, gpg_opt);
}
+ strvec_pushf(&cmd.env, GIT_REFLOG_ACTION "=%s", opts->reflog_message);
+
if (opts->committer_date_is_author_date)
strvec_pushf(&cmd.env, "GIT_COMMITTER_DATE=%s",
opts->ignore_date ?
@@ -1589,8 +1592,8 @@ static int try_to_commit(struct repository *r,
goto out;
}
- if (update_head_with_reflog(current_head, oid,
- getenv("GIT_REFLOG_ACTION"), msg, &err)) {
+ if (update_head_with_reflog(current_head, oid, opts->reflog_message,
+ msg, &err)) {
res = error("%s", err.buf);
goto out;
}
@@ -2894,6 +2897,7 @@ static void read_strategy_opts(struct replay_opts *opts, struct strbuf *buf)
strbuf_reset(buf);
if (!read_oneliner(buf, rebase_path_strategy(), 0))
return;
+ free(opts->strategy);
opts->strategy = strbuf_detach(buf, NULL);
if (!read_oneliner(buf, rebase_path_strategy_opts(), 0))
return;
@@ -3183,18 +3187,15 @@ static int rollback_is_safe(void)
static int reset_merge(const struct object_id *oid)
{
- int ret;
- struct strvec argv = STRVEC_INIT;
+ struct child_process cmd = CHILD_PROCESS_INIT;
- strvec_pushl(&argv, "reset", "--merge", NULL);
+ cmd.git_cmd = 1;
+ strvec_pushl(&cmd.args, "reset", "--merge", NULL);
if (!is_null_oid(oid))
- strvec_push(&argv, oid_to_hex(oid));
+ strvec_push(&cmd.args, oid_to_hex(oid));
- ret = run_command_v_opt(argv.v, RUN_GIT_CMD);
- strvec_clear(&argv);
-
- return ret;
+ return run_command(&cmd);
}
static int rollback_single_pick(struct repository *r)
@@ -3558,12 +3559,13 @@ static int error_failed_squash(struct repository *r,
static int do_exec(struct repository *r, const char *command_line)
{
- const char *child_argv[] = { NULL, NULL };
+ struct child_process cmd = CHILD_PROCESS_INIT;
int dirty, status;
fprintf(stderr, _("Executing: %s\n"), command_line);
- child_argv[0] = command_line;
- status = run_command_v_opt(child_argv, RUN_USING_SHELL);
+ cmd.use_shell = 1;
+ strvec_push(&cmd.args, command_line);
+ status = run_command(&cmd);
/* force re-reading of the cache */
if (discard_index(r->index) < 0 || repo_read_index(r) < 0)
@@ -3674,17 +3676,28 @@ static int do_label(struct repository *r, const char *name, int len)
return ret;
}
+static const char *sequencer_reflog_action(struct replay_opts *opts)
+{
+ if (!opts->reflog_action) {
+ opts->reflog_action = getenv(GIT_REFLOG_ACTION);
+ opts->reflog_action =
+ xstrdup(opts->reflog_action ? opts->reflog_action
+ : action_name(opts));
+ }
+
+ return opts->reflog_action;
+}
+
__attribute__((format (printf, 3, 4)))
static const char *reflog_message(struct replay_opts *opts,
const char *sub_action, const char *fmt, ...)
{
va_list ap;
static struct strbuf buf = STRBUF_INIT;
- char *reflog_action = getenv(GIT_REFLOG_ACTION);
va_start(ap, fmt);
strbuf_reset(&buf);
- strbuf_addstr(&buf, reflog_action ? reflog_action : action_name(opts));
+ strbuf_addstr(&buf, sequencer_reflog_action(opts));
if (sub_action)
strbuf_addf(&buf, " (%s)", sub_action);
if (fmt) {
@@ -3696,6 +3709,28 @@ static const char *reflog_message(struct replay_opts *opts,
return buf.buf;
}
+static struct commit *lookup_label(struct repository *r, const char *label,
+ int len, struct strbuf *buf)
+{
+ struct commit *commit;
+ struct object_id oid;
+
+ strbuf_reset(buf);
+ strbuf_addf(buf, "refs/rewritten/%.*s", len, label);
+ if (!read_ref(buf->buf, &oid)) {
+ commit = lookup_commit_object(r, &oid);
+ } else {
+ /* fall back to non-rewritten ref or commit */
+ strbuf_splice(buf, 0, strlen("refs/rewritten/"), "", 0);
+ commit = lookup_commit_reference_by_name(buf->buf);
+ }
+
+ if (!commit)
+ error(_("could not resolve '%s'"), buf->buf);
+
+ return commit;
+}
+
static int do_reset(struct repository *r,
const char *name, int len,
struct replay_opts *opts)
@@ -3727,6 +3762,7 @@ static int do_reset(struct repository *r,
oidcpy(&oid, &opts->squash_onto);
} else {
int i;
+ struct commit *commit;
/* Determine the length of the label */
for (i = 0; i < len; i++)
@@ -3734,12 +3770,12 @@ static int do_reset(struct repository *r,
break;
len = i;
- strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
- if (get_oid(ref_name.buf, &oid) &&
- get_oid(ref_name.buf + strlen("refs/rewritten/"), &oid)) {
- ret = error(_("could not read '%s'"), ref_name.buf);
+ commit = lookup_label(r, name, len, &ref_name);
+ if (!commit) {
+ ret = -1;
goto cleanup;
}
+ oid = commit->object.oid;
}
setup_unpack_trees_porcelain(&unpack_tree_opts, "reset");
@@ -3750,6 +3786,7 @@ static int do_reset(struct repository *r,
unpack_tree_opts.merge = 1;
unpack_tree_opts.update = 1;
unpack_tree_opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
+ unpack_tree_opts.skip_cache_tree_update = 1;
init_checkout_metadata(&unpack_tree_opts.meta, name, &oid, NULL);
if (repo_read_index_unmerged(r)) {
@@ -3786,26 +3823,6 @@ cleanup:
return ret;
}
-static struct commit *lookup_label(const char *label, int len,
- struct strbuf *buf)
-{
- struct commit *commit;
-
- strbuf_reset(buf);
- strbuf_addf(buf, "refs/rewritten/%.*s", len, label);
- commit = lookup_commit_reference_by_name(buf->buf);
- if (!commit) {
- /* fall back to non-rewritten ref or commit */
- strbuf_splice(buf, 0, strlen("refs/rewritten/"), "", 0);
- commit = lookup_commit_reference_by_name(buf->buf);
- }
-
- if (!commit)
- error(_("could not resolve '%s'"), buf->buf);
-
- return commit;
-}
-
static int do_merge(struct repository *r,
struct commit *commit,
const char *arg, int arg_len,
@@ -3853,7 +3870,7 @@ static int do_merge(struct repository *r,
k = strcspn(p, " \t\n");
if (!k)
continue;
- merge_commit = lookup_label(p, k, &ref_name);
+ merge_commit = lookup_label(r, p, k, &ref_name);
if (!merge_commit) {
ret = error(_("unable to parse '%.*s'"), k, p);
goto leave_merge;
@@ -4130,11 +4147,14 @@ static int write_update_refs_state(struct string_list *refs_to_oids)
struct string_list_item *item;
char *path;
- if (!refs_to_oids->nr)
- return 0;
-
path = rebase_path_update_refs(the_repository->gitdir);
+ if (!refs_to_oids->nr) {
+ if (unlink(path) && errno != ENOENT)
+ result = error_errno(_("could not unlink: %s"), path);
+ goto cleanup;
+ }
+
if (safe_create_leading_directories(path)) {
result = error(_("unable to create leading directories of %s"),
path);
@@ -4497,7 +4517,7 @@ static int checkout_onto(struct repository *r, struct replay_opts *opts,
RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
.head_msg = reflog_message(opts, "start", "checkout %s",
onto_name),
- .default_reflog_action = "rebase"
+ .default_reflog_action = sequencer_reflog_action(opts)
};
if (reset_head(r, &ropts)) {
apply_autostash(rebase_path_autostash());
@@ -4566,11 +4586,8 @@ static int pick_commits(struct repository *r,
struct replay_opts *opts)
{
int res = 0, reschedule = 0;
- char *prev_reflog_action;
- /* Note that 0 for 3rd parameter of setenv means set only if not set */
- setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
- prev_reflog_action = xstrdup(getenv(GIT_REFLOG_ACTION));
+ opts->reflog_message = sequencer_reflog_action(opts);
if (opts->allow_ff)
assert(!(opts->signoff || opts->no_commit ||
opts->record_origin || should_edit(opts) ||
@@ -4618,14 +4635,12 @@ static int pick_commits(struct repository *r,
}
if (item->command <= TODO_SQUASH) {
if (is_rebase_i(opts))
- setenv(GIT_REFLOG_ACTION, reflog_message(opts,
- command_to_string(item->command), NULL),
- 1);
+ opts->reflog_message = reflog_message(opts,
+ command_to_string(item->command), NULL);
+
res = do_pick_commit(r, item, opts,
is_final_fixup(todo_list),
&check_todo);
- if (is_rebase_i(opts))
- setenv(GIT_REFLOG_ACTION, prev_reflog_action, 1);
if (is_rebase_i(opts) && res < 0) {
/* Reschedule */
advise(_(rescheduled_advice),
@@ -4867,14 +4882,14 @@ cleanup_head_ref:
static int continue_single_pick(struct repository *r, struct replay_opts *opts)
{
- struct strvec argv = STRVEC_INIT;
- int ret;
+ struct child_process cmd = CHILD_PROCESS_INIT;
if (!refs_ref_exists(get_main_ref_store(r), "CHERRY_PICK_HEAD") &&
!refs_ref_exists(get_main_ref_store(r), "REVERT_HEAD"))
return error(_("no cherry-pick or revert in progress"));
- strvec_push(&argv, "commit");
+ cmd.git_cmd = 1;
+ strvec_push(&cmd.args, "commit");
/*
* continue_single_pick() handles the case of recovering from a
@@ -4887,11 +4902,9 @@ static int continue_single_pick(struct repository *r, struct replay_opts *opts)
* Include --cleanup=strip as well because we don't want the
* "# Conflicts:" messages.
*/
- strvec_pushl(&argv, "--no-edit", "--cleanup=strip", NULL);
+ strvec_pushl(&cmd.args, "--no-edit", "--cleanup=strip", NULL);
- ret = run_command_v_opt(argv.v, RUN_GIT_CMD);
- strvec_clear(&argv);
- return ret;
+ return run_command(&cmd);
}
static int commit_staged_changes(struct repository *r,
@@ -5060,6 +5073,7 @@ int sequencer_continue(struct repository *r, struct replay_opts *opts)
unlink(rebase_path_dropped());
}
+ opts->reflog_message = reflog_message(opts, "continue", NULL);
if (commit_staged_changes(r, opts, &todo_list)) {
res = -1;
goto release_todo_list;
@@ -5111,7 +5125,7 @@ static int single_pick(struct repository *r,
TODO_PICK : TODO_REVERT;
item.commit = cmit;
- setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
+ opts->reflog_message = sequencer_reflog_action(opts);
return do_pick_commit(r, &item, opts, 0, &check_todo);
}
@@ -6203,8 +6217,6 @@ int todo_list_rearrange_squash(struct todo_list *todo_list)
return error(_("the script was already rearranged."));
}
- *commit_todo_item_at(&commit_todo, item->commit) = item;
-
parse_commit(item->commit);
commit_buffer = logmsg_reencode(item->commit, NULL, "UTF-8");
find_commit_subject(commit_buffer, &subject);
@@ -6271,6 +6283,8 @@ int todo_list_rearrange_squash(struct todo_list *todo_list)
strhash(entry->subject));
hashmap_put(&subject2item, &entry->entry);
}
+
+ *commit_todo_item_at(&commit_todo, item->commit) = item;
}
if (rearranged) {
diff --git a/sequencer.h b/sequencer.h
index 563fe59933..888c18aad7 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -63,6 +63,9 @@ struct replay_opts {
char **xopts;
size_t xopts_nr, xopts_alloc;
+ /* Reflog */
+ char *reflog_action;
+
/* Used by fixup/squash */
struct strbuf current_fixups;
int current_fixup_count;
@@ -73,6 +76,9 @@ struct replay_opts {
/* Only used by REPLAY_NONE */
struct rev_info *revs;
+
+ /* Private use */
+ const char *reflog_message;
};
#define REPLAY_OPTS_INIT { .edit = -1, .action = -1, .current_fixups = STRBUF_INIT }
diff --git a/setup.c b/setup.c
index cefd5f63c4..a4525732fe 100644
--- a/setup.c
+++ b/setup.c
@@ -577,6 +577,18 @@ static enum extension_result handle_extension(const char *var,
"extensions.objectformat", value);
data->hash_algo = format;
return EXTENSION_OK;
+ } else if (!strcmp(ext, "refformat")) {
+ if (!strcmp(value, "files"))
+ data->ref_format |= REF_FORMAT_FILES;
+ else if (!strcmp(value, "packed"))
+ data->ref_format |= REF_FORMAT_PACKED;
+ else if (!strcmp(value, "packed-v2"))
+ data->ref_format |= REF_FORMAT_PACKED_V2;
+ else
+ return error(_("invalid value for '%s': '%s'"),
+ "extensions.refFormat", value);
+ data->ref_format_count++;
+ return EXTENSION_OK;
}
return EXTENSION_UNKNOWN;
}
@@ -718,6 +730,14 @@ int read_repository_format(struct repository_format *format, const char *path)
git_config_from_file(check_repo_format, path, format);
if (format->version == -1)
clear_repository_format(format);
+
+ /* Set default ref_format if no extensions.refFormat exists. */
+ if (!format->ref_format_count) {
+ format->ref_format = REF_FORMAT_FILES | REF_FORMAT_PACKED;
+ if (git_env_ulong("GIT_TEST_PACKED_REFS_VERSION", 0) == 2)
+ format->ref_format |= REF_FORMAT_PACKED_V2;
+ }
+
return format->version;
}
@@ -1420,6 +1440,9 @@ int discover_git_directory(struct strbuf *commondir,
candidate.partial_clone;
candidate.partial_clone = NULL;
+ /* take ownership of candidate.ref_format */
+ the_repository->ref_format = candidate.ref_format;
+
clear_repository_format(&candidate);
return 0;
}
@@ -1556,6 +1579,8 @@ const char *setup_git_directory_gently(int *nongit_ok)
the_repository->repository_format_partial_clone =
repo_fmt.partial_clone;
repo_fmt.partial_clone = NULL;
+
+ the_repository->ref_format = repo_fmt.ref_format;
}
}
/*
@@ -1645,6 +1670,7 @@ void check_repository_format(struct repository_format *fmt)
repo_set_hash_algo(the_repository, fmt->hash_algo);
the_repository->repository_format_partial_clone =
xstrdup_or_null(fmt->partial_clone);
+ the_repository->ref_format = fmt->ref_format;
clear_repository_format(&repo_fmt);
}
diff --git a/sha1dc_git.h b/sha1dc_git.h
index 41e1c3fd3f..60e3ce8439 100644
--- a/sha1dc_git.h
+++ b/sha1dc_git.h
@@ -17,6 +17,7 @@ void git_SHA1DCInit(SHA1_CTX *);
void git_SHA1DCFinal(unsigned char [20], SHA1_CTX *);
void git_SHA1DCUpdate(SHA1_CTX *ctx, const void *data, unsigned long len);
+#define platform_SHA_IS_SHA1DC /* used by "test-tool sha1-is-sha1dc" */
#define platform_SHA_CTX SHA1_CTX
#define platform_SHA1_Init git_SHA1DCInit
#define platform_SHA1_Update git_SHA1DCUpdate
diff --git a/shared.mak b/shared.mak
index 33f43edbf9..be1f30ff20 100644
--- a/shared.mak
+++ b/shared.mak
@@ -60,6 +60,7 @@ ifndef V
QUIET_AR = @echo ' ' AR $@;
QUIET_LINK = @echo ' ' LINK $@;
QUIET_BUILT_IN = @echo ' ' BUILTIN $@;
+ QUIET_CP = @echo ' ' CP $< $@;
QUIET_LNCP = @echo ' ' LN/CP $@;
QUIET_XGETTEXT = @echo ' ' XGETTEXT $@;
QUIET_MSGINIT = @echo ' ' MSGINIT $@;
@@ -69,8 +70,11 @@ ifndef V
QUIET_SP = @echo ' ' SP $<;
QUIET_HDR = @echo ' ' HDR $(<:hcc=h);
QUIET_RC = @echo ' ' RC $@;
- QUIET_SPATCH = @echo ' ' SPATCH $<;
- QUIET_SPATCH_T = @echo ' ' SPATCH TEST $(@:.build/%=%);
+
+## Used in "Makefile": SPATCH
+ QUIET_SPATCH = @echo ' ' SPATCH $< \>$@;
+ QUIET_SPATCH_TEST = @echo ' ' SPATCH TEST $(@:.build/%=%);
+ QUIET_SPATCH_CAT = @echo ' ' SPATCH CAT $(@:%.patch=%.d/)\*\*.patch \>$@;
## Used in "Documentation/Makefile"
QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@;
diff --git a/shell.c b/shell.c
index 7ff4109db7..af0d7c734f 100644
--- a/shell.c
+++ b/shell.c
@@ -52,21 +52,24 @@ static void cd_to_homedir(void)
static void run_shell(void)
{
int done = 0;
- static const char *help_argv[] = { HELP_COMMAND, NULL };
+ struct child_process help_cmd = CHILD_PROCESS_INIT;
if (!access(NOLOGIN_COMMAND, F_OK)) {
/* Interactive login disabled. */
- const char *argv[] = { NOLOGIN_COMMAND, NULL };
+ struct child_process nologin_cmd = CHILD_PROCESS_INIT;
int status;
- status = run_command_v_opt(argv, 0);
+ strvec_push(&nologin_cmd.args, NOLOGIN_COMMAND);
+ status = run_command(&nologin_cmd);
if (status < 0)
exit(127);
exit(status);
}
/* Print help if enabled */
- run_command_v_opt(help_argv, RUN_SILENT_EXEC_FAILURE);
+ help_cmd.silent_exec_failure = 1;
+ strvec_push(&help_cmd.args, HELP_COMMAND);
+ run_command(&help_cmd);
do {
const char *prog;
@@ -125,9 +128,13 @@ static void run_shell(void)
!strcmp(prog, "exit") || !strcmp(prog, "bye")) {
done = 1;
} else if (is_valid_cmd_name(prog)) {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
full_cmd = make_cmd(prog);
argv[0] = full_cmd;
- code = run_command_v_opt(argv, RUN_SILENT_EXEC_FAILURE);
+ cmd.silent_exec_failure = 1;
+ strvec_pushv(&cmd.args, argv);
+ code = run_command(&cmd);
if (code == -1 && errno == ENOENT) {
fprintf(stderr, "unrecognized command '%s'\n", prog);
}
diff --git a/shortlog.h b/shortlog.h
index 3f7e9aabca..28d04f951a 100644
--- a/shortlog.h
+++ b/shortlog.h
@@ -2,6 +2,7 @@
#define SHORTLOG_H
#include "string-list.h"
+#include "date.h"
struct commit;
@@ -15,13 +16,16 @@ struct shortlog {
int in2;
int user_format;
int abbrev;
+ struct date_mode date_mode;
enum {
SHORTLOG_GROUP_AUTHOR = (1 << 0),
SHORTLOG_GROUP_COMMITTER = (1 << 1),
SHORTLOG_GROUP_TRAILER = (1 << 2),
+ SHORTLOG_GROUP_FORMAT = (1 << 3),
} groups;
struct string_list trailers;
+ struct string_list format;
int email;
struct string_list mailmap;
@@ -29,6 +33,7 @@ struct shortlog {
};
void shortlog_init(struct shortlog *log);
+void shortlog_finish_setup(struct shortlog *log);
void shortlog_add_commit(struct shortlog *log, struct commit *commit);
diff --git a/sparse-index.c b/sparse-index.c
index e4a54ce194..8c269dab80 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -493,24 +493,42 @@ void clear_skip_worktree_from_present_files(struct index_state *istate)
int dir_found = 1;
int i;
+ int path_count[2] = {0, 0};
+ int restarted = 0;
if (!core_apply_sparse_checkout ||
sparse_expect_files_outside_of_patterns)
return;
+ trace2_region_enter("index", "clear_skip_worktree_from_present_files",
+ istate->repo);
restart:
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce = istate->cache[i];
- if (ce_skip_worktree(ce) &&
- path_found(ce->name, &last_dirname, &dir_len, &dir_found)) {
- if (S_ISSPARSEDIR(ce->ce_mode)) {
- ensure_full_index(istate);
- goto restart;
+ if (ce_skip_worktree(ce)) {
+ path_count[restarted]++;
+ if (path_found(ce->name, &last_dirname, &dir_len, &dir_found)) {
+ if (S_ISSPARSEDIR(ce->ce_mode)) {
+ if (restarted)
+ BUG("ensure-full-index did not fully flatten?");
+ ensure_full_index(istate);
+ restarted = 1;
+ goto restart;
+ }
+ ce->ce_flags &= ~CE_SKIP_WORKTREE;
}
- ce->ce_flags &= ~CE_SKIP_WORKTREE;
}
}
+
+ if (path_count[0])
+ trace2_data_intmax("index", istate->repo,
+ "sparse_path_count", path_count[0]);
+ if (restarted)
+ trace2_data_intmax("index", istate->repo,
+ "sparse_path_count_full", path_count[1]);
+ trace2_region_leave("index", "clear_skip_worktree_from_present_files",
+ istate->repo);
}
/*
diff --git a/string-list.c b/string-list.c
index 549fc416d6..42bacaec55 100644
--- a/string-list.c
+++ b/string-list.c
@@ -156,7 +156,7 @@ void filter_string_list(struct string_list *list, int free_util,
list->nr = dst;
}
-static int item_is_not_empty(struct string_list_item *item, void *unused)
+static int item_is_not_empty(struct string_list_item *item, void *data UNUSED)
{
return *item->string != '\0';
}
diff --git a/string-list.h b/string-list.h
index d5a744e143..c7b0d5d000 100644
--- a/string-list.h
+++ b/string-list.h
@@ -141,7 +141,12 @@ void string_list_clear_func(struct string_list *list, string_list_clear_func_t c
int for_each_string_list(struct string_list *list,
string_list_each_func_t func, void *cb_data);
-/** Iterate over each item, as a macro. */
+/**
+ * Iterate over each item, as a macro.
+ *
+ * Be sure that 'list' is non-NULL. The macro cannot perform NULL
+ * checks due to -Werror=address errors.
+ */
#define for_each_string_list_item(item,list) \
for (item = (list)->items; \
item && item < (list)->items + (list)->nr; \
diff --git a/submodule-config.c b/submodule-config.c
index cd7ee236a1..4dc61b3a78 100644
--- a/submodule-config.c
+++ b/submodule-config.c
@@ -303,6 +303,8 @@ int parse_submodule_fetchjobs(const char *var, const char *value)
int fetchjobs = git_config_int(var, value);
if (fetchjobs < 0)
die(_("negative values not allowed for submodule.fetchJobs"));
+ if (!fetchjobs)
+ fetchjobs = online_cpus();
return fetchjobs;
}
diff --git a/submodule.c b/submodule.c
index bf7a2c7918..f350932ef1 100644
--- a/submodule.c
+++ b/submodule.c
@@ -503,6 +503,8 @@ static void print_submodule_diff_summary(struct repository *r, struct rev_info *
void prepare_submodule_repo_env(struct strvec *out)
{
+ if (the_repository->settings.submodule_propagate_branches)
+ strvec_pushf(out, "%s=1", GIT_SUBMODULE_PROPAGATE_BRANCHES_ENVIRONMENT);
prepare_other_repo_env(out, DEFAULT_GIT_DIR_ENVIRONMENT);
}
@@ -1130,6 +1132,12 @@ static int push_submodule(const char *path,
if (for_each_remote_ref_submodule(path, has_remote, NULL) > 0) {
struct child_process cp = CHILD_PROCESS_INIT;
strvec_push(&cp.args, "push");
+ /*
+ * When recursing into a submodule, treat any "only" configurations as "on-
+ * demand", since "only" would not work (we need all submodules to be pushed
+ * in order to be able to push the superproject).
+ */
+ strvec_push(&cp.args, "--recurse-submodules=only-is-on-demand");
if (dry_run)
strvec_push(&cp.args, "--dry-run");
@@ -1363,6 +1371,18 @@ int submodule_touches_in_range(struct repository *r,
return ret;
}
+struct submodule_parallel_status {
+ size_t index_count;
+ int result;
+
+ struct string_list *submodule_names;
+ struct repository *r;
+
+ /* Pending statuses by OIDs */
+ struct status_task **oid_status_tasks;
+ int oid_status_tasks_nr, oid_status_tasks_alloc;
+};
+
struct submodule_parallel_fetch {
/*
* The index of the last index entry processed by
@@ -1445,6 +1465,12 @@ struct fetch_task {
struct oid_array *commits; /* Ensure these commits are fetched */
};
+struct status_task {
+ const char *path;
+ unsigned dirty_submodule;
+ int ignore_untracked;
+};
+
/**
* When a submodule is not defined in .gitmodules, we cannot access it
* via the regular submodule-config. Create a fake submodule, which we can
@@ -1819,6 +1845,17 @@ int fetch_submodules(struct repository *r,
{
int i;
struct submodule_parallel_fetch spf = SPF_INIT;
+ const struct run_process_parallel_opts opts = {
+ .tr2_category = "submodule",
+ .tr2_label = "parallel/fetch",
+
+ .processes = max_parallel_jobs,
+
+ .get_next_task = get_next_submodule,
+ .start_failure = fetch_start_failure,
+ .task_finished = fetch_finish,
+ .data = &spf,
+ };
spf.r = r;
spf.command_line_option = command_line_option;
@@ -1840,12 +1877,7 @@ int fetch_submodules(struct repository *r,
calculate_changed_submodule_paths(r, &spf.changed_submodule_names);
string_list_sort(&spf.changed_submodule_names);
- run_processes_parallel_tr2(max_parallel_jobs,
- get_next_submodule,
- fetch_start_failure,
- fetch_finish,
- &spf,
- "submodule", "parallel/fetch");
+ run_processes_parallel(&opts);
if (spf.submodules_with_errors.len > 0)
fprintf(stderr, _("Errors during submodule fetch:\n%s"),
@@ -1858,6 +1890,45 @@ out:
return spf.result;
}
+static int parse_status_porcelain(char *str, size_t len,
+ unsigned *dirty_submodule,
+ int ignore_untracked)
+{
+ /* regular untracked files */
+ if (str[0] == '?')
+ *dirty_submodule |= DIRTY_SUBMODULE_UNTRACKED;
+
+ if (str[0] == 'u' ||
+ str[0] == '1' ||
+ str[0] == '2') {
+ /* T = line type, XY = status, SSSS = submodule state */
+ if (len < strlen("T XY SSSS"))
+ BUG("invalid status --porcelain=2 line %s",
+ str);
+
+ if (str[5] == 'S' && str[8] == 'U')
+ /* nested untracked file */
+ *dirty_submodule |= DIRTY_SUBMODULE_UNTRACKED;
+
+ if (str[0] == 'u' ||
+ str[0] == '2' ||
+ memcmp(str + 5, "S..U", 4))
+ /* other change */
+ *dirty_submodule |= DIRTY_SUBMODULE_MODIFIED;
+ }
+
+ if ((*dirty_submodule & DIRTY_SUBMODULE_MODIFIED) &&
+ ((*dirty_submodule & DIRTY_SUBMODULE_UNTRACKED) ||
+ ignore_untracked)) {
+ /*
+ * We're not interested in any further information from
+ * the child any more, neither output nor its exit code.
+ */
+ return 1;
+ }
+ return 0;
+}
+
unsigned is_submodule_modified(const char *path, int ignore_untracked)
{
struct child_process cp = CHILD_PROCESS_INIT;
@@ -1894,39 +1965,13 @@ unsigned is_submodule_modified(const char *path, int ignore_untracked)
fp = xfdopen(cp.out, "r");
while (strbuf_getwholeline(&buf, fp, '\n') != EOF) {
- /* regular untracked files */
- if (buf.buf[0] == '?')
- dirty_submodule |= DIRTY_SUBMODULE_UNTRACKED;
-
- if (buf.buf[0] == 'u' ||
- buf.buf[0] == '1' ||
- buf.buf[0] == '2') {
- /* T = line type, XY = status, SSSS = submodule state */
- if (buf.len < strlen("T XY SSSS"))
- BUG("invalid status --porcelain=2 line %s",
- buf.buf);
-
- if (buf.buf[5] == 'S' && buf.buf[8] == 'U')
- /* nested untracked file */
- dirty_submodule |= DIRTY_SUBMODULE_UNTRACKED;
-
- if (buf.buf[0] == 'u' ||
- buf.buf[0] == '2' ||
- memcmp(buf.buf + 5, "S..U", 4))
- /* other change */
- dirty_submodule |= DIRTY_SUBMODULE_MODIFIED;
- }
+ char *str = buf.buf;
+ const size_t len = buf.len;
- if ((dirty_submodule & DIRTY_SUBMODULE_MODIFIED) &&
- ((dirty_submodule & DIRTY_SUBMODULE_UNTRACKED) ||
- ignore_untracked)) {
- /*
- * We're not interested in any further information from
- * the child any more, neither output nor its exit code.
- */
- ignore_cp_exit_code = 1;
+ ignore_cp_exit_code = parse_status_porcelain(str, len, &dirty_submodule,
+ ignore_untracked);
+ if (ignore_cp_exit_code)
break;
- }
}
fclose(fp);
@@ -1937,6 +1982,142 @@ unsigned is_submodule_modified(const char *path, int ignore_untracked)
return dirty_submodule;
}
+static struct status_task *
+get_status_task_from_index(struct submodule_parallel_status *sps,
+ struct strbuf *err)
+{
+ for (; sps->index_count < sps->submodule_names->nr; sps->index_count++) {
+ struct submodule_status_util *util = sps->submodule_names->items[sps->index_count].util;
+ const struct cache_entry *ce = util->ce;
+ struct status_task *task;
+ struct status_task tmp = {
+ .path = ce->name,
+ .dirty_submodule = util->dirty_submodule,
+ .ignore_untracked = util->ignore_untracked,
+ };
+ struct strbuf buf = STRBUF_INIT;
+ const char *git_dir;
+
+ strbuf_addf(&buf, "%s/.git", ce->name);
+ git_dir = read_gitfile(buf.buf);
+ if (!git_dir)
+ git_dir = buf.buf;
+ if (!is_git_directory(git_dir)) {
+ if (is_directory(git_dir))
+ die(_("'%s' not recognized as a git repository"), git_dir);
+ strbuf_release(&buf);
+ /* The submodule is not checked out, so it is not modified */
+ util->dirty_submodule = 0;
+ continue;
+ }
+ strbuf_release(&buf);
+
+ task = xmalloc(sizeof(*task));
+ memcpy(task, &tmp, sizeof(*task));
+ sps->index_count++;
+ return task;
+ }
+ return NULL;
+}
+
+
+static int get_next_submodule_status(struct child_process *cp,
+ struct strbuf *err, void *data,
+ void **task_cb)
+{
+ struct submodule_parallel_status *sps = data;
+ struct status_task *task = get_status_task_from_index(sps, err);
+
+ if (!task)
+ return 0;
+
+ child_process_init(cp);
+ prepare_submodule_repo_env_in_gitdir(&cp->env);
+
+ strvec_init(&cp->args);
+ strvec_pushl(&cp->args, "status", "--porcelain=2", NULL);
+ if (task->ignore_untracked)
+ strvec_push(&cp->args, "-uno");
+
+ prepare_submodule_repo_env(&cp->env);
+ cp->git_cmd = 1;
+ cp->dir = task->path;
+ *task_cb = task;
+ return 1;
+}
+
+static int status_start_failure(struct strbuf *err,
+ void *cb, void *task_cb)
+{
+ struct submodule_parallel_status *sps = cb;
+
+ sps->result = 1;
+ return 0;
+}
+
+static void status_duplicate_output(struct strbuf *process_out,
+ struct strbuf *out,
+ void *cb, void *task_cb)
+{
+ struct status_task *task = task_cb;
+ struct string_list list = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
+
+ string_list_split(&list, process_out->buf, '\n', -1);
+
+ for_each_string_list_item(item, &list) {
+ if (parse_status_porcelain(item->string,
+ strlen(item->string),
+ &task->dirty_submodule,
+ task->ignore_untracked))
+ break;
+ }
+ string_list_clear(&list, 0);
+ strbuf_reset(out);
+}
+
+static int status_finish(int retvalue, struct strbuf *err,
+ void *cb, void *task_cb)
+{
+ struct submodule_parallel_status *sps = cb;
+ struct status_task *task = task_cb;
+ struct string_list_item *it =
+ string_list_lookup(sps->submodule_names, task->path);
+ struct submodule_status_util *util = it->util;
+
+ util->dirty_submodule = task->dirty_submodule;
+ free(task);
+
+ return 0;
+}
+
+int get_submodules_status(struct repository *r,
+ struct string_list *submodules,
+ int max_parallel_jobs)
+{
+ struct submodule_parallel_status sps = {
+ .r = r,
+ .submodule_names = submodules,
+ };
+ const struct run_process_parallel_opts opts = {
+ .tr2_category = "submodule",
+ .tr2_label = "parallel/status",
+
+ .processes = max_parallel_jobs,
+
+ .get_next_task = get_next_submodule_status,
+ .start_failure = status_start_failure,
+ .duplicate_output = status_duplicate_output,
+ .task_finished = status_finish,
+ .data = &sps,
+ };
+
+ string_list_sort(sps.submodule_names);
+ run_processes_parallel(&opts);
+
+ return sps.result;
+}
+
int submodule_uses_gitfile(const char *path)
{
struct child_process cp = CHILD_PROCESS_INIT;
@@ -2133,8 +2314,7 @@ int submodule_move_head(const char *path,
if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) {
if (old_head) {
if (!submodule_uses_gitfile(path))
- absorb_git_dir_into_superproject(path,
- ABSORB_GITDIR_RECURSE_SUBMODULES);
+ absorb_git_dir_into_superproject(path);
} else {
struct strbuf gitdir = STRBUF_INIT;
submodule_name_to_gitdir(&gitdir, the_repository,
@@ -2268,6 +2448,7 @@ static void relocate_single_git_dir_into_superproject(const char *path)
char *old_git_dir = NULL, *real_old_git_dir = NULL, *real_new_git_dir = NULL;
struct strbuf new_gitdir = STRBUF_INIT;
const struct submodule *sub;
+ size_t off = 0;
if (submodule_uses_worktrees(path))
die(_("relocate_gitdir for submodule '%s' with "
@@ -2292,9 +2473,12 @@ static void relocate_single_git_dir_into_superproject(const char *path)
die(_("could not create directory '%s'"), new_gitdir.buf);
real_new_git_dir = real_pathdup(new_gitdir.buf, 1);
- fprintf(stderr, _("Migrating git directory of '%s%s' from\n'%s' to\n'%s'\n"),
+ while (real_old_git_dir[off] && real_new_git_dir[off] &&
+ real_old_git_dir[off] == real_new_git_dir[off])
+ off++;
+ fprintf(stderr, _("Migrating git directory of '%s%s' from '%s' to '%s'\n"),
get_super_prefix_or_empty(), path,
- real_old_git_dir, real_new_git_dir);
+ real_old_git_dir + off, real_new_git_dir + off);
relocate_gitdir(path, real_old_git_dir, real_new_git_dir);
@@ -2304,13 +2488,29 @@ static void relocate_single_git_dir_into_superproject(const char *path)
strbuf_release(&new_gitdir);
}
+static void absorb_git_dir_into_superproject_recurse(const char *path)
+{
+
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.dir = path;
+ cp.git_cmd = 1;
+ cp.no_stdin = 1;
+ strvec_pushf(&cp.args, "--super-prefix=%s%s/",
+ get_super_prefix_or_empty(), path);
+ strvec_pushl(&cp.args, "submodule--helper",
+ "absorbgitdirs", NULL);
+ prepare_submodule_repo_env(&cp.env);
+ if (run_command(&cp))
+ die(_("could not recurse into submodule '%s'"), path);
+}
+
/*
* Migrate the git directory of the submodule given by path from
* having its git directory within the working tree to the git dir nested
* in its superprojects git dir under modules/.
*/
-void absorb_git_dir_into_superproject(const char *path,
- unsigned flags)
+void absorb_git_dir_into_superproject(const char *path)
{
int err_code;
const char *sub_git_dir;
@@ -2359,29 +2559,7 @@ void absorb_git_dir_into_superproject(const char *path,
}
strbuf_release(&gitdir);
- if (flags & ABSORB_GITDIR_RECURSE_SUBMODULES) {
- struct child_process cp = CHILD_PROCESS_INIT;
- struct strbuf sb = STRBUF_INIT;
-
- if (flags & ~ABSORB_GITDIR_RECURSE_SUBMODULES)
- BUG("we don't know how to pass the flags down?");
-
- strbuf_addstr(&sb, get_super_prefix_or_empty());
- strbuf_addstr(&sb, path);
- strbuf_addch(&sb, '/');
-
- cp.dir = path;
- cp.git_cmd = 1;
- cp.no_stdin = 1;
- strvec_pushl(&cp.args, "--super-prefix", sb.buf,
- "submodule--helper",
- "absorbgitdirs", NULL);
- prepare_submodule_repo_env(&cp.env);
- if (run_command(&cp))
- die(_("could not recurse into submodule '%s'"), path);
-
- strbuf_release(&sb);
- }
+ absorb_git_dir_into_superproject_recurse(path);
}
int get_superproject_working_tree(struct strbuf *buf)
diff --git a/submodule.h b/submodule.h
index 6a9fec6de1..bdcc597d0d 100644
--- a/submodule.h
+++ b/submodule.h
@@ -41,6 +41,12 @@ struct submodule_update_strategy {
.type = SM_UPDATE_UNSPECIFIED, \
}
+struct submodule_status_util {
+ int changed, ignore_untracked;
+ unsigned dirty_submodule, newmode;
+ struct cache_entry *ce;
+};
+
int is_gitmodules_unmerged(struct index_state *istate);
int is_writing_gitmodules_ok(void);
int is_staging_gitmodules_ok(struct index_state *istate);
@@ -94,6 +100,9 @@ int fetch_submodules(struct repository *r,
int command_line_option,
int default_option,
int quiet, int max_parallel_jobs);
+int get_submodules_status(struct repository *r,
+ struct string_list *submodules,
+ int max_parallel_jobs);
unsigned is_submodule_modified(const char *path, int ignore_untracked);
int submodule_uses_gitfile(const char *path);
@@ -164,9 +173,7 @@ void submodule_unset_core_worktree(const struct submodule *sub);
*/
void prepare_submodule_repo_env(struct strvec *env);
-#define ABSORB_GITDIR_RECURSE_SUBMODULES (1<<0)
-void absorb_git_dir_into_superproject(const char *path,
- unsigned flags);
+void absorb_git_dir_into_superproject(const char *path);
/*
* Return the absolute path of the working tree of the superproject, which this
diff --git a/t/Makefile b/t/Makefile
index 882782a519..2c2b252240 100644
--- a/t/Makefile
+++ b/t/Makefile
@@ -94,7 +94,7 @@ check-chainlint:
done \
} >'$(CHAINLINTTMP_SQ)'/expect && \
$(CHAINLINT) --emit-all '$(CHAINLINTTMP_SQ)'/tests | \
- grep -v '^[ ]*$$' >'$(CHAINLINTTMP_SQ)'/actual && \
+ sed -e 's/^[1-9][0-9]* //;/^[ ]*$$/d' >'$(CHAINLINTTMP_SQ)'/actual && \
if test -f ../GIT-BUILD-OPTIONS; then \
. ../GIT-BUILD-OPTIONS; \
fi && \
diff --git a/t/README b/t/README
index 979b2d4833..fc0daef2e4 100644
--- a/t/README
+++ b/t/README
@@ -231,6 +231,9 @@ override the location of the dashed-form subcommands (what
GIT_EXEC_PATH would be used for during normal operation).
GIT_TEST_EXEC_PATH defaults to `$GIT_TEST_INSTALLED/git --exec-path`.
+Similar to GIT_TEST_INSTALLED, GIT_TEST_BUILD_DIR can be pointed to
+another git.git checkout's build directory, to test its built binaries
+against the tests in this checkout.
Skipping Tests
--------------
diff --git a/t/chainlint.pl b/t/chainlint.pl
index 976db4b8a0..4e47e808d0 100755
--- a/t/chainlint.pl
+++ b/t/chainlint.pl
@@ -67,6 +67,7 @@ sub new {
bless {
parser => $parser,
buff => $s,
+ lineno => 1,
heretags => []
} => $class;
}
@@ -75,7 +76,9 @@ sub scan_heredoc_tag {
my $self = shift @_;
${$self->{buff}} =~ /\G(-?)/gc;
my $indented = $1;
- my $tag = $self->scan_token();
+ my $token = $self->scan_token();
+ return "<<$indented" unless $token;
+ my $tag = $token->[0];
$tag =~ s/['"\\]//g;
push(@{$self->{heretags}}, $indented ? "\t$tag" : "$tag");
return "<<$indented$tag";
@@ -95,7 +98,9 @@ sub scan_op {
sub scan_sqstring {
my $self = shift @_;
${$self->{buff}} =~ /\G([^']*'|.*\z)/sgc;
- return "'" . $1;
+ my $s = $1;
+ $self->{lineno} += () = $s =~ /\n/sg;
+ return "'" . $s;
}
sub scan_dqstring {
@@ -113,7 +118,7 @@ sub scan_dqstring {
if ($c eq '\\') {
$s .= '\\', last unless $$b =~ /\G(.)/sgc;
$c = $1;
- next if $c eq "\n"; # line splice
+ $self->{lineno}++, next if $c eq "\n"; # line splice
# backslash escapes only $, `, ", \ in dq-string
$s .= '\\' unless $c =~ /^[\$`"\\]$/;
$s .= $c;
@@ -121,6 +126,7 @@ sub scan_dqstring {
}
die("internal error scanning dq-string '$c'\n");
}
+ $self->{lineno} += () = $s =~ /\n/sg;
return $s;
}
@@ -135,6 +141,7 @@ sub scan_balanced {
$depth--;
last if $depth == 0;
}
+ $self->{lineno} += () = $s =~ /\n/sg;
return $s;
}
@@ -149,7 +156,7 @@ sub scan_dollar {
my $self = shift @_;
my $b = $self->{buff};
return $self->scan_balanced('(', ')') if $$b =~ /\G\((?=\()/gc; # $((...))
- return '(' . join(' ', $self->scan_subst()) . ')' if $$b =~ /\G\(/gc; # $(...)
+ return '(' . join(' ', map {$_->[0]} $self->scan_subst()) . ')' if $$b =~ /\G\(/gc; # $(...)
return $self->scan_balanced('{', '}') if $$b =~ /\G\{/gc; # ${...}
return $1 if $$b =~ /\G(\w+)/gc; # $var
return $1 if $$b =~ /\G([@*#?$!0-9-])/gc; # $*, $1, $$, etc.
@@ -161,8 +168,11 @@ sub swallow_heredocs {
my $b = $self->{buff};
my $tags = $self->{heretags};
while (my $tag = shift @$tags) {
+ my $start = pos($$b);
my $indent = $tag =~ s/^\t// ? '\\s*' : '';
$$b =~ /(?:\G|\n)$indent\Q$tag\E(?:\n|\z)/gc;
+ my $body = substr($$b, $start, pos($$b) - $start);
+ $self->{lineno} += () = $body =~ /\n/sg;
}
}
@@ -170,34 +180,37 @@ sub scan_token {
my $self = shift @_;
my $b = $self->{buff};
my $token = '';
+ my ($start, $startln);
RESTART:
+ $startln = $self->{lineno};
$$b =~ /\G[ \t]+/gc; # skip whitespace (but not newline)
- return "\n" if $$b =~ /\G#[^\n]*(?:\n|\z)/gc; # comment
+ $start = pos($$b) || 0;
+ $self->{lineno}++, return ["\n", $start, pos($$b), $startln, $startln] if $$b =~ /\G#[^\n]*(?:\n|\z)/gc; # comment
while (1) {
# slurp up non-special characters
$token .= $1 if $$b =~ /\G([^\\;&|<>(){}'"\$\s]+)/gc;
# handle special characters
last unless $$b =~ /\G(.)/sgc;
my $c = $1;
- last if $c =~ /^[ \t]$/; # whitespace ends token
+ pos($$b)--, last if $c =~ /^[ \t]$/; # whitespace ends token
pos($$b)--, last if length($token) && $c =~ /^[;&|<>(){}\n]$/;
$token .= $self->scan_sqstring(), next if $c eq "'";
$token .= $self->scan_dqstring(), next if $c eq '"';
$token .= $c . $self->scan_dollar(), next if $c eq '$';
- $self->swallow_heredocs(), $token = $c, last if $c eq "\n";
+ $self->{lineno}++, $self->swallow_heredocs(), $token = $c, last if $c eq "\n";
$token = $self->scan_op($c), last if $c =~ /^[;&|<>]$/;
$token = $c, last if $c =~ /^[(){}]$/;
if ($c eq '\\') {
$token .= '\\', last unless $$b =~ /\G(.)/sgc;
$c = $1;
- next if $c eq "\n" && length($token); # line splice
- goto RESTART if $c eq "\n"; # line splice
+ $self->{lineno}++, next if $c eq "\n" && length($token); # line splice
+ $self->{lineno}++, goto RESTART if $c eq "\n"; # line splice
$token .= '\\' . $c;
next;
}
die("internal error scanning character '$c'\n");
}
- return length($token) ? $token : undef;
+ return length($token) ? [$token, $start, pos($$b), $startln, $self->{lineno}] : undef;
}
# ShellParser parses POSIX shell scripts (with minor extensions for Bash). It
@@ -239,14 +252,14 @@ sub stop_at {
my ($self, $token) = @_;
return 1 unless defined($token);
my $stop = ${$self->{stop}}[-1] if @{$self->{stop}};
- return defined($stop) && $token =~ $stop;
+ return defined($stop) && $token->[0] =~ $stop;
}
sub expect {
my ($self, $expect) = @_;
my $token = $self->next_token();
- return $token if defined($token) && $token eq $expect;
- push(@{$self->{output}}, "?!ERR?! expected '$expect' but found '" . (defined($token) ? $token : "<end-of-input>") . "'\n");
+ return $token if defined($token) && $token->[0] eq $expect;
+ push(@{$self->{output}}, "?!ERR?! expected '$expect' but found '" . (defined($token) ? $token->[0] : "<end-of-input>") . "'\n");
$self->untoken($token) if defined($token);
return ();
}
@@ -255,7 +268,7 @@ sub optional_newlines {
my $self = shift @_;
my @tokens;
while (my $token = $self->peek()) {
- last unless $token eq "\n";
+ last unless $token->[0] eq "\n";
push(@tokens, $self->next_token());
}
return @tokens;
@@ -278,7 +291,7 @@ sub parse_case_pattern {
my @tokens;
while (defined(my $token = $self->next_token())) {
push(@tokens, $token);
- last if $token eq ')';
+ last if $token->[0] eq ')';
}
return @tokens;
}
@@ -293,13 +306,13 @@ sub parse_case {
$self->optional_newlines());
while (1) {
my $token = $self->peek();
- last unless defined($token) && $token ne 'esac';
+ last unless defined($token) && $token->[0] ne 'esac';
push(@tokens,
$self->parse_case_pattern(),
$self->optional_newlines(),
$self->parse(qr/^(?:;;|esac)$/)); # item body
$token = $self->peek();
- last unless defined($token) && $token ne 'esac';
+ last unless defined($token) && $token->[0] ne 'esac';
push(@tokens,
$self->expect(';;'),
$self->optional_newlines());
@@ -315,7 +328,7 @@ sub parse_for {
$self->next_token(), # variable
$self->optional_newlines());
my $token = $self->peek();
- if (defined($token) && $token eq 'in') {
+ if (defined($token) && $token->[0] eq 'in') {
push(@tokens,
$self->expect('in'),
$self->optional_newlines());
@@ -339,11 +352,11 @@ sub parse_if {
$self->optional_newlines(),
$self->parse(qr/^(?:elif|else|fi)$/)); # if/elif body
my $token = $self->peek();
- last unless defined($token) && $token eq 'elif';
+ last unless defined($token) && $token->[0] eq 'elif';
push(@tokens, $self->expect('elif'));
}
my $token = $self->peek();
- if (defined($token) && $token eq 'else') {
+ if (defined($token) && $token->[0] eq 'else') {
push(@tokens,
$self->expect('else'),
$self->optional_newlines(),
@@ -380,7 +393,7 @@ sub parse_bash_array_assignment {
my @tokens = $self->expect('(');
while (defined(my $token = $self->next_token())) {
push(@tokens, $token);
- last if $token eq ')';
+ last if $token->[0] eq ')';
}
return @tokens;
}
@@ -398,29 +411,31 @@ sub parse_cmd {
my $self = shift @_;
my $cmd = $self->next_token();
return () unless defined($cmd);
- return $cmd if $cmd eq "\n";
+ return $cmd if $cmd->[0] eq "\n";
my $token;
my @tokens = $cmd;
- if ($cmd eq '!') {
+ if ($cmd->[0] eq '!') {
push(@tokens, $self->parse_cmd());
return @tokens;
- } elsif (my $f = $compound{$cmd}) {
+ } elsif (my $f = $compound{$cmd->[0]}) {
push(@tokens, $self->$f());
- } elsif (defined($token = $self->peek()) && $token eq '(') {
- if ($cmd !~ /\w=$/) {
+ } elsif (defined($token = $self->peek()) && $token->[0] eq '(') {
+ if ($cmd->[0] !~ /\w=$/) {
push(@tokens, $self->parse_func());
return @tokens;
}
- $tokens[-1] .= join(' ', $self->parse_bash_array_assignment());
+ my @array = $self->parse_bash_array_assignment();
+ $tokens[-1]->[0] .= join(' ', map {$_->[0]} @array);
+ $tokens[-1]->[2] = $array[$#array][2] if @array;
}
while (defined(my $token = $self->next_token())) {
$self->untoken($token), last if $self->stop_at($token);
push(@tokens, $token);
- last if $token =~ /^(?:[;&\n|]|&&|\|\|)$/;
+ last if $token->[0] =~ /^(?:[;&\n|]|&&|\|\|)$/;
}
- push(@tokens, $self->next_token()) if $tokens[-1] ne "\n" && defined($token = $self->peek()) && $token eq "\n";
+ push(@tokens, $self->next_token()) if $tokens[-1]->[0] ne "\n" && defined($token = $self->peek()) && $token->[0] eq "\n";
return @tokens;
}
@@ -453,11 +468,18 @@ package TestParser;
use base 'ShellParser';
+sub new {
+ my $class = shift @_;
+ my $self = $class->SUPER::new(@_);
+ $self->{problems} = [];
+ return $self;
+}
+
sub find_non_nl {
my $tokens = shift @_;
my $n = shift @_;
$n = $#$tokens if !defined($n);
- $n-- while $n >= 0 && $$tokens[$n] eq "\n";
+ $n-- while $n >= 0 && $$tokens[$n]->[0] eq "\n";
return $n;
}
@@ -467,7 +489,7 @@ sub ends_with {
for my $needle (reverse(@$needles)) {
return undef if $n < 0;
$n = find_non_nl($tokens, $n), next if $needle eq "\n";
- return undef if $$tokens[$n] !~ $needle;
+ return undef if $$tokens[$n]->[0] !~ $needle;
$n--;
}
return 1;
@@ -486,13 +508,13 @@ sub parse_loop_body {
my $self = shift @_;
my @tokens = $self->SUPER::parse_loop_body(@_);
# did loop signal failure via "|| return" or "|| exit"?
- return @tokens if !@tokens || grep(/^(?:return|exit|\$\?)$/, @tokens);
+ return @tokens if !@tokens || grep {$_->[0] =~ /^(?:return|exit|\$\?)$/} @tokens;
# did loop upstream of a pipe signal failure via "|| echo 'impossible
# text'" as the final command in the loop body?
return @tokens if ends_with(\@tokens, [qr/^\|\|$/, "\n", qr/^echo$/, qr/^.+$/]);
# flag missing "return/exit" handling explicit failure in loop body
my $n = find_non_nl(\@tokens);
- splice(@tokens, $n + 1, 0, '?!LOOP?!');
+ push(@{$self->{problems}}, ['LOOP', $tokens[$n]]);
return @tokens;
}
@@ -505,8 +527,13 @@ my @safe_endings = (
sub accumulate {
my ($self, $tokens, $cmd) = @_;
+ my $problems = $self->{problems};
+
+ # no previous command to check for missing "&&"
goto DONE unless @$tokens;
- goto DONE if @$cmd == 1 && $$cmd[0] eq "\n";
+
+ # new command is empty line; can't yet check if previous is missing "&&"
+ goto DONE if @$cmd == 1 && $$cmd[0]->[0] eq "\n";
# did previous command end with "&&", "|", "|| return" or similar?
goto DONE if match_ending($tokens, \@safe_endings);
@@ -514,20 +541,20 @@ sub accumulate {
# if this command handles "$?" specially, then okay for previous
# command to be missing "&&"
for my $token (@$cmd) {
- goto DONE if $token =~ /\$\?/;
+ goto DONE if $token->[0] =~ /\$\?/;
}
# if this command is "false", "return 1", or "exit 1" (which signal
# failure explicitly), then okay for all preceding commands to be
# missing "&&"
- if ($$cmd[0] =~ /^(?:false|return|exit)$/) {
- @$tokens = grep(!/^\?!AMP\?!$/, @$tokens);
+ if ($$cmd[0]->[0] =~ /^(?:false|return|exit)$/) {
+ @$problems = grep {$_->[0] ne 'AMP'} @$problems;
goto DONE;
}
# flag missing "&&" at end of previous command
my $n = find_non_nl($tokens);
- splice(@$tokens, $n + 1, 0, '?!AMP?!') unless $n < 0;
+ push(@$problems, ['AMP', $tokens->[$n]]) unless $n < 0;
DONE:
$self->SUPER::accumulate($tokens, $cmd);
@@ -553,7 +580,7 @@ sub new {
# composition of multiple strings and non-string character runs; for instance,
# `"test body"` unwraps to `test body`; `word"a b"42'c d'` to `worda b42c d`
sub unwrap {
- my $token = @_ ? shift @_ : $_;
+ my $token = (@_ ? shift @_ : $_)->[0];
# simple case: 'sqstring' or "dqstring"
return $token if $token =~ s/^'([^']*)'$/$1/;
return $token if $token =~ s/^"([^"]*)"$/$1/;
@@ -584,13 +611,25 @@ sub check_test {
$self->{ntests}++;
my $parser = TestParser->new(\$body);
my @tokens = $parser->parse();
- return unless $emit_all || grep(/\?![^?]+\?!/, @tokens);
+ my $problems = $parser->{problems};
+ return unless $emit_all || @$problems;
my $c = main::fd_colors(1);
- my $checked = join(' ', @tokens);
- $checked =~ s/^\n//;
- $checked =~ s/^ //mg;
- $checked =~ s/ $//mg;
+ my $lineno = $_[1]->[3];
+ my $start = 0;
+ my $checked = '';
+ for (sort {$a->[1]->[2] <=> $b->[1]->[2]} @$problems) {
+ my ($label, $token) = @$_;
+ my $pos = $token->[2];
+ $checked .= substr($body, $start, $pos - $start) . " ?!$label?! ";
+ $start = $pos;
+ }
+ $checked .= substr($body, $start);
+ $checked =~ s/^/$lineno++ . ' '/mge;
+ $checked =~ s/^\d+ \n//;
+ $checked =~ s/(\s) \?!/$1?!/mg;
+ $checked =~ s/\?! (\s)/?!$1/mg;
$checked =~ s/(\?![^?]+\?!)/$c->{rev}$c->{red}$1$c->{reset}/mg;
+ $checked =~ s/^\d+/$c->{dim}$&$c->{reset}/mg;
$checked .= "\n" unless $checked =~ /\n$/;
push(@{$self->{output}}, "$c->{blue}# chainlint: $title$c->{reset}\n$checked");
}
@@ -598,9 +637,9 @@ sub check_test {
sub parse_cmd {
my $self = shift @_;
my @tokens = $self->SUPER::parse_cmd();
- return @tokens unless @tokens && $tokens[0] =~ /^test_expect_(?:success|failure)$/;
+ return @tokens unless @tokens && $tokens[0]->[0] =~ /^test_expect_(?:success|failure)$/;
my $n = $#tokens;
- $n-- while $n >= 0 && $tokens[$n] =~ /^(?:[;&\n|]|&&|\|\|)$/;
+ $n-- while $n >= 0 && $tokens[$n]->[0] =~ /^(?:[;&\n|]|&&|\|\|)$/;
$self->check_test($tokens[1], $tokens[2]) if $n == 2; # title body
$self->check_test($tokens[2], $tokens[3]) if $n > 2; # prereq title body
return @tokens;
@@ -622,25 +661,39 @@ if (eval {require Time::HiRes; Time::HiRes->import(); 1;}) {
# thread and ignore %ENV changes in subthreads.
$ENV{TERM} = $ENV{USER_TERM} if $ENV{USER_TERM};
-my @NOCOLORS = (bold => '', rev => '', reset => '', blue => '', green => '', red => '');
+my @NOCOLORS = (bold => '', rev => '', dim => '', reset => '', blue => '', green => '', red => '');
my %COLORS = ();
sub get_colors {
return \%COLORS if %COLORS;
- if (exists($ENV{NO_COLOR}) ||
- system("tput sgr0 >/dev/null 2>&1") != 0 ||
- system("tput bold >/dev/null 2>&1") != 0 ||
- system("tput rev >/dev/null 2>&1") != 0 ||
- system("tput setaf 1 >/dev/null 2>&1") != 0) {
+ if (exists($ENV{NO_COLOR})) {
%COLORS = @NOCOLORS;
return \%COLORS;
}
- %COLORS = (bold => `tput bold`,
- rev => `tput rev`,
- reset => `tput sgr0`,
- blue => `tput setaf 4`,
- green => `tput setaf 2`,
- red => `tput setaf 1`);
- chomp(%COLORS);
+ if ($ENV{TERM} =~ /xterm|xterm-\d+color|xterm-new|xterm-direct|nsterm|nsterm-\d+color|nsterm-direct/) {
+ %COLORS = (bold => "\e[1m",
+ rev => "\e[7m",
+ dim => "\e[2m",
+ reset => "\e[0m",
+ blue => "\e[34m",
+ green => "\e[32m",
+ red => "\e[31m");
+ return \%COLORS;
+ }
+ if (system("tput sgr0 >/dev/null 2>&1") == 0 &&
+ system("tput bold >/dev/null 2>&1") == 0 &&
+ system("tput rev >/dev/null 2>&1") == 0 &&
+ system("tput dim >/dev/null 2>&1") == 0 &&
+ system("tput setaf 1 >/dev/null 2>&1") == 0) {
+ %COLORS = (bold => `tput bold`,
+ rev => `tput rev`,
+ dim => `tput dim`,
+ reset => `tput sgr0`,
+ blue => `tput setaf 4`,
+ green => `tput setaf 2`,
+ red => `tput setaf 1`);
+ return \%COLORS;
+ }
+ %COLORS = @NOCOLORS;
return \%COLORS;
}
diff --git a/t/chainlint/block-comment.expect b/t/chainlint/block-comment.expect
index d10b2eeaf2..df2beea888 100644
--- a/t/chainlint/block-comment.expect
+++ b/t/chainlint/block-comment.expect
@@ -1,6 +1,8 @@
(
{
+ # show a
echo a &&
+ # show b
echo b
}
)
diff --git a/t/chainlint/case-comment.expect b/t/chainlint/case-comment.expect
index 1e4b054bda..641c157b98 100644
--- a/t/chainlint/case-comment.expect
+++ b/t/chainlint/case-comment.expect
@@ -1,7 +1,10 @@
(
case "$x" in
+ # found foo
x) foo ;;
+ # found other
*)
+ # treat it as bar
bar
;;
esac
diff --git a/t/chainlint/close-subshell.expect b/t/chainlint/close-subshell.expect
index 0f87db9ae6..2192a2870a 100644
--- a/t/chainlint/close-subshell.expect
+++ b/t/chainlint/close-subshell.expect
@@ -15,7 +15,8 @@
) | wuzzle &&
(
bop
-) | fazz fozz &&
+) | fazz \
+ fozz &&
(
bup
) |
diff --git a/t/chainlint/comment.expect b/t/chainlint/comment.expect
index f76fde1ffb..a68f1f9d7c 100644
--- a/t/chainlint/comment.expect
+++ b/t/chainlint/comment.expect
@@ -1,4 +1,8 @@
(
+ # comment 1
nothing &&
+ # comment 2
something
+ # comment 3
+ # comment 4
)
diff --git a/t/chainlint/double-here-doc.expect b/t/chainlint/double-here-doc.expect
index 75477bb1ad..cd584a4357 100644
--- a/t/chainlint/double-here-doc.expect
+++ b/t/chainlint/double-here-doc.expect
@@ -1,2 +1,12 @@
-run_sub_test_lib_test_err run-inv-range-start "--run invalid range start" --run="a-5" <<-EOF &&
-check_sub_test_lib_test_err run-inv-range-start <<-EOF_OUT 3 <<-EOF_ERR
+run_sub_test_lib_test_err run-inv-range-start \
+ "--run invalid range start" \
+ --run="a-5" <<-\EOF &&
+test_expect_success "passing test #1" "true"
+test_done
+EOF
+check_sub_test_lib_test_err run-inv-range-start \
+ <<-\EOF_OUT 3<<-EOF_ERR
+> FATAL: Unexpected exit with code 1
+EOF_OUT
+> error: --run: invalid non-numeric in range start: ${SQ}a-5${SQ}
+EOF_ERR
diff --git a/t/chainlint/empty-here-doc.expect b/t/chainlint/empty-here-doc.expect
index f42f2d41ba..e8733c97c6 100644
--- a/t/chainlint/empty-here-doc.expect
+++ b/t/chainlint/empty-here-doc.expect
@@ -1,3 +1,4 @@
git ls-tree $tree path > current &&
-cat > expected <<EOF &&
+cat > expected <<\EOF &&
+EOF
test_output
diff --git a/t/chainlint/for-loop.expect b/t/chainlint/for-loop.expect
index a5810c9bdd..d65c82129a 100644
--- a/t/chainlint/for-loop.expect
+++ b/t/chainlint/for-loop.expect
@@ -2,7 +2,9 @@
for i in a b c
do
echo $i ?!AMP?!
- cat <<-EOF ?!LOOP?!
+ cat <<-\EOF ?!LOOP?!
+ bar
+ EOF
done ?!AMP?!
for i in a b c; do
echo $i &&
diff --git a/t/chainlint/here-doc-close-subshell.expect b/t/chainlint/here-doc-close-subshell.expect
index 2af9ced71c..7d9c2b5607 100644
--- a/t/chainlint/here-doc-close-subshell.expect
+++ b/t/chainlint/here-doc-close-subshell.expect
@@ -1,2 +1,4 @@
(
- cat <<-INPUT)
+ cat <<-\INPUT)
+ fizz
+ INPUT
diff --git a/t/chainlint/here-doc-indent-operator.expect b/t/chainlint/here-doc-indent-operator.expect
index fb6cf7285d..f92a7ce999 100644
--- a/t/chainlint/here-doc-indent-operator.expect
+++ b/t/chainlint/here-doc-indent-operator.expect
@@ -1,5 +1,11 @@
-cat > expect <<-EOF &&
+cat >expect <<- EOF &&
+header: 43475048 1 $(test_oid oid_version) $NUM_CHUNKS 0
+num_commits: $1
+chunks: oid_fanout oid_lookup commit_metadata generation_data bloom_indexes bloom_data
+EOF
-cat > expect <<-EOF ?!AMP?!
+cat >expect << -EOF ?!AMP?!
+this is not indented
+-EOF
cleanup
diff --git a/t/chainlint/here-doc-multi-line-command-subst.expect b/t/chainlint/here-doc-multi-line-command-subst.expect
index f8b3aa73c4..b7364c82c8 100644
--- a/t/chainlint/here-doc-multi-line-command-subst.expect
+++ b/t/chainlint/here-doc-multi-line-command-subst.expect
@@ -1,5 +1,8 @@
(
- x=$(bobble <<-END &&
+ x=$(bobble <<-\END &&
+ fossil
+ vegetable
+ END
wiffle) ?!AMP?!
echo $x
)
diff --git a/t/chainlint/here-doc-multi-line-string.expect b/t/chainlint/here-doc-multi-line-string.expect
index be64b26869..6c13bdcbfb 100644
--- a/t/chainlint/here-doc-multi-line-string.expect
+++ b/t/chainlint/here-doc-multi-line-string.expect
@@ -1,5 +1,7 @@
(
- cat <<-TXT && echo "multi-line
+ cat <<-\TXT && echo "multi-line
string" ?!AMP?!
+ fizzle
+ TXT
bap
)
diff --git a/t/chainlint/here-doc.expect b/t/chainlint/here-doc.expect
index 110059ba58..1df3f78282 100644
--- a/t/chainlint/here-doc.expect
+++ b/t/chainlint/here-doc.expect
@@ -1,7 +1,25 @@
-boodle wobba gorgo snoot wafta snurb <<EOF &&
+boodle wobba \
+ gorgo snoot \
+ wafta snurb <<EOF &&
+quoth the raven,
+nevermore...
+EOF
cat <<-Arbitrary_Tag_42 >foo &&
+snoz
+boz
+woz
+Arbitrary_Tag_42
-cat <<zump >boo &&
+cat <<"zump" >boo &&
+snoz
+boz
+woz
+zump
-horticulture <<EOF
+horticulture <<\EOF
+gomez
+morticia
+wednesday
+pugsly
+EOF
diff --git a/t/chainlint/if-then-else.expect b/t/chainlint/if-then-else.expect
index 44d86c3597..cbaaf857d4 100644
--- a/t/chainlint/if-then-else.expect
+++ b/t/chainlint/if-then-else.expect
@@ -8,7 +8,9 @@
echo foo
else
echo foo &&
- cat <<-EOF
+ cat <<-\EOF
+ bar
+ EOF
fi ?!AMP?!
echo poodle
) &&
diff --git a/t/chainlint/incomplete-line.expect b/t/chainlint/incomplete-line.expect
index ffac8f9018..134d3a14f5 100644
--- a/t/chainlint/incomplete-line.expect
+++ b/t/chainlint/incomplete-line.expect
@@ -1,4 +1,10 @@
-line 1 line 2 line 3 line 4 &&
+line 1 \
+line 2 \
+line 3 \
+line 4 &&
(
- line 5 line 6 line 7 line 8
+ line 5 \
+ line 6 \
+ line 7 \
+ line 8
)
diff --git a/t/chainlint/inline-comment.expect b/t/chainlint/inline-comment.expect
index dd0dace077..6bad218530 100644
--- a/t/chainlint/inline-comment.expect
+++ b/t/chainlint/inline-comment.expect
@@ -1,6 +1,6 @@
(
- foobar &&
- barfoo ?!AMP?!
+ foobar && # comment 1
+ barfoo ?!AMP?! # wrong position for &&
flibble "not a # comment"
) &&
diff --git a/t/chainlint/loop-detect-status.expect b/t/chainlint/loop-detect-status.expect
index 0ad23bb35e..24da9e86d5 100644
--- a/t/chainlint/loop-detect-status.expect
+++ b/t/chainlint/loop-detect-status.expect
@@ -2,7 +2,7 @@
do
printf "Generating blob $i/$blobcount\r" >& 2 &&
printf "blob\nmark :$i\ndata $blobsize\n" &&
-
+ #test-tool genrandom $i $blobsize &&
printf "%-${blobsize}s" $i &&
echo "M 100644 :$i $i" >> commit &&
i=$(($i+1)) ||
diff --git a/t/chainlint/nested-here-doc.expect b/t/chainlint/nested-here-doc.expect
index e3bef63f75..29b3832a98 100644
--- a/t/chainlint/nested-here-doc.expect
+++ b/t/chainlint/nested-here-doc.expect
@@ -1,7 +1,30 @@
cat <<ARBITRARY >foop &&
+naddle
+fub <<EOF
+ nozzle
+ noodle
+EOF
+formp
+ARBITRARY
(
- cat <<-INPUT_END &&
- cat <<-EOT ?!AMP?!
+ cat <<-\INPUT_END &&
+ fish are mice
+ but geese go slow
+ data <<EOF
+ perl is lerp
+ and nothing else
+ EOF
+ toink
+ INPUT_END
+
+ cat <<-\EOT ?!AMP?!
+ text goes here
+ data <<EOF
+ data goes here
+ EOF
+ more test here
+ EOT
+
foobar
)
diff --git a/t/chainlint/nested-subshell-comment.expect b/t/chainlint/nested-subshell-comment.expect
index be4b27a305..9138cf386d 100644
--- a/t/chainlint/nested-subshell-comment.expect
+++ b/t/chainlint/nested-subshell-comment.expect
@@ -2,6 +2,8 @@
foo &&
(
bar &&
+ # bottles wobble while fiddles gobble
+ # minor numbers of cows (or do they?)
baz &&
snaff
) ?!AMP?!
diff --git a/t/chainlint/subshell-here-doc.expect b/t/chainlint/subshell-here-doc.expect
index 029d129299..52789278d1 100644
--- a/t/chainlint/subshell-here-doc.expect
+++ b/t/chainlint/subshell-here-doc.expect
@@ -1,10 +1,30 @@
(
- echo wobba gorgo snoot wafta snurb <<-EOF &&
+ echo wobba \
+ gorgo snoot \
+ wafta snurb <<-EOF &&
+ quoth the raven,
+ nevermore...
+ EOF
+
cat <<EOF >bip ?!AMP?!
- echo <<-EOF >bop
+ fish fly high
+EOF
+
+ echo <<-\EOF >bop
+ gomez
+ morticia
+ wednesday
+ pugsly
+ EOF
) &&
(
- cat <<-ARBITRARY >bup &&
- cat <<-ARBITRARY3 >bup3 &&
+ cat <<-\ARBITRARY >bup &&
+ glink
+ FIZZ
+ ARBITRARY
+ cat <<-"ARBITRARY3" >bup3 &&
+ glink
+ FIZZ
+ ARBITRARY3
meep
)
diff --git a/t/chainlint/t7900-subtree.expect b/t/chainlint/t7900-subtree.expect
index 69167da2f2..71b3b3bc20 100644
--- a/t/chainlint/t7900-subtree.expect
+++ b/t/chainlint/t7900-subtree.expect
@@ -4,12 +4,16 @@ sub2
sub3
sub4" &&
chks_sub=$(cat <<TXT | sed "s,^,sub dir/,"
+$chks
+TXT
) &&
chkms="main-sub1
main-sub2
main-sub3
main-sub4" &&
chkms_sub=$(cat <<TXT | sed "s,^,sub dir/,"
+$chkms
+TXT
) &&
subfiles=$(git ls-files) &&
check_equal "$subfiles" "$chkms
diff --git a/t/chainlint/while-loop.expect b/t/chainlint/while-loop.expect
index f272aa21fe..1f5eaea0fd 100644
--- a/t/chainlint/while-loop.expect
+++ b/t/chainlint/while-loop.expect
@@ -2,7 +2,9 @@
while true
do
echo foo ?!AMP?!
- cat <<-EOF ?!LOOP?!
+ cat <<-\EOF ?!LOOP?!
+ bar
+ EOF
done ?!AMP?!
while true; do
echo foo &&
diff --git a/t/check-non-portable-shell.pl b/t/check-non-portable-shell.pl
index fd3303552b..dd8107cd7d 100755
--- a/t/check-non-portable-shell.pl
+++ b/t/check-non-portable-shell.pl
@@ -45,6 +45,7 @@ while (<>) {
/\bhead\s+-c\b/ and err 'head -c is not portable (use test_copy_bytes BYTES <file >out)';
/(?:\$\(seq|^\s*seq\b)/ and err 'seq is not portable (use test_seq)';
/\bgrep\b.*--file\b/ and err 'grep --file FILE is not portable (use grep -f FILE)';
+ /\b[ef]grep\b/ and err 'egrep/fgrep obsolescent (use grep -E/-F)';
/\bexport\s+[A-Za-z0-9_]*=/ and err '"export FOO=bar" is not portable (use FOO=bar && export FOO)';
/^\s*([A-Z0-9_]+=(\w*|(["']).*?\3)\s+)+(\w+)/ and exists($func{$4}) and
err '"FOO=bar shell_func" assignment extends beyond "shell_func"';
diff --git a/t/helper/test-bundle-uri.c b/t/helper/test-bundle-uri.c
new file mode 100644
index 0000000000..25afd39342
--- /dev/null
+++ b/t/helper/test-bundle-uri.c
@@ -0,0 +1,95 @@
+#include "test-tool.h"
+#include "parse-options.h"
+#include "bundle-uri.h"
+#include "strbuf.h"
+#include "string-list.h"
+
+enum input_mode {
+ KEY_VALUE_PAIRS,
+ CONFIG_FILE,
+};
+
+static int cmd__bundle_uri_parse(int argc, const char **argv, enum input_mode mode)
+{
+ const char *key_value_usage[] = {
+ "test-tool bundle-uri parse-key-values <input>",
+ NULL
+ };
+ const char *config_usage[] = {
+ "test-tool bundle-uri parse-config <input>",
+ NULL
+ };
+ const char **usage = key_value_usage;
+ struct option options[] = {
+ OPT_END(),
+ };
+ struct strbuf sb = STRBUF_INIT;
+ struct bundle_list list;
+ int err = 0;
+ FILE *fp;
+
+ if (mode == CONFIG_FILE)
+ usage = config_usage;
+
+ argc = parse_options(argc, argv, NULL, options, usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ init_bundle_list(&list);
+
+ switch (mode) {
+ case KEY_VALUE_PAIRS:
+ if (argc != 1)
+ goto usage;
+ fp = fopen(argv[0], "r");
+ if (!fp)
+ die("failed to open '%s'", argv[0]);
+ while (strbuf_getline(&sb, fp) != EOF) {
+ if (bundle_uri_parse_line(&list, sb.buf))
+ err = error("bad line: '%s'", sb.buf);
+ }
+ fclose(fp);
+ break;
+
+ case CONFIG_FILE:
+ if (argc != 1)
+ goto usage;
+ err = bundle_uri_parse_config_format("<uri>", argv[0], &list);
+ break;
+ }
+ strbuf_release(&sb);
+
+ print_bundle_list(stdout, &list);
+
+ clear_bundle_list(&list);
+
+ return !!err;
+
+usage:
+ usage_with_options(usage, options);
+}
+
+int cmd__bundle_uri(int argc, const char **argv)
+{
+ const char *usage[] = {
+ "test-tool bundle-uri <subcommand> [<options>]",
+ NULL
+ };
+ struct option options[] = {
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, NULL, options, usage,
+ PARSE_OPT_STOP_AT_NON_OPTION |
+ PARSE_OPT_KEEP_ARGV0);
+ if (argc == 1)
+ goto usage;
+
+ if (!strcmp(argv[1], "parse-key-values"))
+ return cmd__bundle_uri_parse(argc - 1, argv + 1, KEY_VALUE_PAIRS);
+ if (!strcmp(argv[1], "parse-config"))
+ return cmd__bundle_uri_parse(argc - 1, argv + 1, CONFIG_FILE);
+ error("there is no test-tool bundle-uri tool '%s'", argv[1]);
+
+usage:
+ usage_with_options(usage, options);
+}
diff --git a/t/helper/test-cache-tree.c b/t/helper/test-cache-tree.c
new file mode 100644
index 0000000000..93051b25f5
--- /dev/null
+++ b/t/helper/test-cache-tree.c
@@ -0,0 +1,64 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "tree.h"
+#include "cache-tree.h"
+#include "parse-options.h"
+
+static char const * const test_cache_tree_usage[] = {
+ N_("test-tool cache-tree <options> (control|prime|update)"),
+ NULL
+};
+
+int cmd__cache_tree(int argc, const char **argv)
+{
+ struct object_id oid;
+ struct tree *tree;
+ int empty = 0;
+ int invalidate_qty = 0;
+ int i;
+
+ struct option options[] = {
+ OPT_BOOL(0, "empty", &empty,
+ N_("clear the cache tree before each iteration")),
+ OPT_INTEGER_F(0, "invalidate", &invalidate_qty,
+ N_("number of entries in the cache tree to invalidate (default 0)"),
+ PARSE_OPT_NONEG),
+ OPT_END()
+ };
+
+ setup_git_directory();
+
+ argc = parse_options(argc, argv, NULL, options, test_cache_tree_usage, 0);
+
+ if (read_cache() < 0)
+ die(_("unable to read index file"));
+
+ oidcpy(&oid, &the_index.cache_tree->oid);
+ tree = parse_tree_indirect(&oid);
+ if (!tree)
+ die(_("not a tree object: %s"), oid_to_hex(&oid));
+
+ if (empty) {
+ /* clear the cache tree & allocate a new one */
+ cache_tree_free(&the_index.cache_tree);
+ the_index.cache_tree = cache_tree();
+ } else if (invalidate_qty) {
+ /* invalidate the specified number of unique paths */
+ float f_interval = (float)the_index.cache_nr / invalidate_qty;
+ int interval = f_interval < 1.0 ? 1 : (int)f_interval;
+ for (i = 0; i < invalidate_qty && i * interval < the_index.cache_nr; i++)
+ cache_tree_invalidate_path(&the_index, the_index.cache[i * interval]->name);
+ }
+
+ if (argc != 1)
+ usage_with_options(test_cache_tree_usage, options);
+ else if (!strcmp(argv[0], "prime"))
+ prime_cache_tree(the_repository, &the_index, tree);
+ else if (!strcmp(argv[0], "update"))
+ cache_tree_update(&the_index, WRITE_TREE_SILENT | WRITE_TREE_REPAIR);
+ /* use "control" subcommand to specify no-op */
+ else if (!!strcmp(argv[0], "control"))
+ die(_("Unhandled subcommand '%s'"), argv[0]);
+
+ return 0;
+}
diff --git a/t/helper/test-fake-ssh.c b/t/helper/test-fake-ssh.c
index 12beee99ad..27323cb367 100644
--- a/t/helper/test-fake-ssh.c
+++ b/t/helper/test-fake-ssh.c
@@ -8,7 +8,7 @@ int cmd_main(int argc, const char **argv)
struct strbuf buf = STRBUF_INIT;
FILE *f;
int i;
- const char *child_argv[] = { NULL, NULL };
+ struct child_process cmd = CHILD_PROCESS_INIT;
/* First, print all parameters into $TRASH_DIRECTORY/ssh-output */
if (!trash_directory)
@@ -17,6 +17,7 @@ int cmd_main(int argc, const char **argv)
f = fopen(buf.buf, "w");
if (!f)
die("Could not write to %s", buf.buf);
+ strbuf_release(&buf);
for (i = 0; i < argc; i++)
fprintf(f, "%s%s", i > 0 ? " " : "", i > 0 ? argv[i] : "ssh:");
fprintf(f, "\n");
@@ -25,6 +26,7 @@ int cmd_main(int argc, const char **argv)
/* Now, evaluate the *last* parameter */
if (argc < 2)
return 0;
- child_argv[0] = argv[argc - 1];
- return run_command_v_opt(child_argv, RUN_USING_SHELL);
+ cmd.use_shell = 1;
+ strvec_push(&cmd.args, argv[argc - 1]);
+ return run_command(&cmd);
}
diff --git a/t/helper/test-path-utils.c b/t/helper/test-path-utils.c
index d20e1b7a18..f69709d674 100644
--- a/t/helper/test-path-utils.c
+++ b/t/helper/test-path-utils.c
@@ -8,7 +8,8 @@
* GIT_CEILING_DIRECTORIES. If the path is unusable for some reason,
* die with an explanation.
*/
-static int normalize_ceiling_entry(struct string_list_item *item, void *unused)
+static int normalize_ceiling_entry(struct string_list_item *item,
+ void *data UNUSED)
{
char *ceil = item->string;
diff --git a/t/helper/test-proc-receive.c b/t/helper/test-proc-receive.c
index cc08506cf0..a4b305f494 100644
--- a/t/helper/test-proc-receive.c
+++ b/t/helper/test-proc-receive.c
@@ -6,7 +6,7 @@
#include "test-tool.h"
static const char *proc_receive_usage[] = {
- "test-tool proc-receive [<options>...]",
+ "test-tool proc-receive [<options>]",
NULL
};
diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c
index c9283b47af..40dd329e02 100644
--- a/t/helper/test-run-command.c
+++ b/t/helper/test-run-command.c
@@ -52,6 +52,21 @@ static int no_job(struct child_process *cp,
return 0;
}
+static void duplicate_output(struct strbuf *process_out,
+ struct strbuf *out,
+ void *pp_cb,
+ void *pp_task_cb)
+{
+ struct string_list list = STRING_LIST_INIT_DUP;
+
+ string_list_split(&list, process_out->buf, '\n', -1);
+ for (size_t i = 0; i < list.nr; i++) {
+ if (strlen(list.items[i].string) > 0)
+ fprintf(stderr, "duplicate_output: %s\n", list.items[i].string);
+ }
+ string_list_clear(&list, 0);
+}
+
static int task_finished(int result,
struct strbuf *err,
void *pp_cb,
@@ -136,7 +151,7 @@ static const char * const testsuite_usage[] = {
static int testsuite(int argc, const char **argv)
{
struct testsuite suite = TESTSUITE_INIT;
- int max_jobs = 1, i, ret;
+ int max_jobs = 1, i, ret = 0;
DIR *dir;
struct dirent *d;
struct option options[] = {
@@ -152,6 +167,12 @@ static int testsuite(int argc, const char **argv)
"write JUnit-style XML files"),
OPT_END()
};
+ struct run_process_parallel_opts opts = {
+ .get_next_task = next_test,
+ .start_failure = test_failed,
+ .task_finished = test_finished,
+ .data = &suite,
+ };
argc = parse_options(argc, argv, NULL, options,
testsuite_usage, PARSE_OPT_STOP_AT_NON_OPTION);
@@ -192,8 +213,8 @@ static int testsuite(int argc, const char **argv)
fprintf(stderr, "Running %"PRIuMAX" tests (%d at a time)\n",
(uintmax_t)suite.tests.nr, max_jobs);
- ret = run_processes_parallel(max_jobs, next_test, test_failed,
- test_finished, &suite);
+ opts.processes = max_jobs;
+ run_processes_parallel(&opts);
if (suite.failed.nr > 0) {
ret = 1;
@@ -206,7 +227,7 @@ static int testsuite(int argc, const char **argv)
string_list_clear(&suite.tests, 0);
string_list_clear(&suite.failed, 0);
- return !!ret;
+ return ret;
}
static uint64_t my_random_next = 1234;
@@ -381,13 +402,17 @@ int cmd__run_command(int argc, const char **argv)
{
struct child_process proc = CHILD_PROCESS_INIT;
int jobs;
+ int ret;
+ struct run_process_parallel_opts opts = {
+ .data = &proc,
+ };
if (argc > 1 && !strcmp(argv[1], "testsuite"))
- exit(testsuite(argc - 1, argv + 1));
+ return testsuite(argc - 1, argv + 1);
if (!strcmp(argv[1], "inherited-handle"))
- exit(inherit_handle(argv[0]));
+ return inherit_handle(argv[0]);
if (!strcmp(argv[1], "inherited-handle-child"))
- exit(inherit_handle_child());
+ return inherit_handle_child();
if (argc >= 2 && !strcmp(argv[1], "quote-stress-test"))
return !!quote_stress_test(argc - 1, argv + 1);
@@ -404,41 +429,58 @@ int cmd__run_command(int argc, const char **argv)
argv += 2;
argc -= 2;
}
- if (argc < 3)
- return 1;
+ if (argc < 3) {
+ ret = 1;
+ goto cleanup;
+ }
strvec_pushv(&proc.args, (const char **)argv + 2);
if (!strcmp(argv[1], "start-command-ENOENT")) {
- if (start_command(&proc) < 0 && errno == ENOENT)
- return 0;
+ if (start_command(&proc) < 0 && errno == ENOENT) {
+ ret = 0;
+ goto cleanup;
+ }
fprintf(stderr, "FAIL %s\n", argv[1]);
return 1;
}
- if (!strcmp(argv[1], "run-command"))
- exit(run_command(&proc));
+ if (!strcmp(argv[1], "run-command")) {
+ ret = run_command(&proc);
+ goto cleanup;
+ }
if (!strcmp(argv[1], "--ungroup")) {
argv += 1;
argc -= 1;
- run_processes_parallel_ungroup = 1;
+ opts.ungroup = 1;
+ }
+
+ if (!strcmp(argv[1], "--duplicate-output")) {
+ argv += 1;
+ argc -= 1;
+ opts.duplicate_output = duplicate_output;
}
jobs = atoi(argv[2]);
strvec_clear(&proc.args);
strvec_pushv(&proc.args, (const char **)argv + 3);
- if (!strcmp(argv[1], "run-command-parallel"))
- exit(run_processes_parallel(jobs, parallel_next,
- NULL, NULL, &proc));
-
- if (!strcmp(argv[1], "run-command-abort"))
- exit(run_processes_parallel(jobs, parallel_next,
- NULL, task_finished, &proc));
-
- if (!strcmp(argv[1], "run-command-no-jobs"))
- exit(run_processes_parallel(jobs, no_job,
- NULL, task_finished, &proc));
-
- fprintf(stderr, "check usage\n");
- return 1;
+ if (!strcmp(argv[1], "run-command-parallel")) {
+ opts.get_next_task = parallel_next;
+ } else if (!strcmp(argv[1], "run-command-abort")) {
+ opts.get_next_task = parallel_next;
+ opts.task_finished = task_finished;
+ } else if (!strcmp(argv[1], "run-command-no-jobs")) {
+ opts.get_next_task = no_job;
+ opts.task_finished = task_finished;
+ } else {
+ ret = 1;
+ fprintf(stderr, "check usage\n");
+ goto cleanup;
+ }
+ opts.processes = jobs;
+ run_processes_parallel(&opts);
+ ret = 0;
+cleanup:
+ child_process_clear(&proc);
+ return ret;
}
diff --git a/t/helper/test-sha1.c b/t/helper/test-sha1.c
index d860c387c3..71fe5c6145 100644
--- a/t/helper/test-sha1.c
+++ b/t/helper/test-sha1.c
@@ -5,3 +5,11 @@ int cmd__sha1(int ac, const char **av)
{
return cmd_hash_impl(ac, av, GIT_HASH_SHA1);
}
+
+int cmd__sha1_is_sha1dc(int argc UNUSED, const char **argv UNUSED)
+{
+#ifdef platform_SHA_IS_SHA1DC
+ return 0;
+#endif
+ return 1;
+}
diff --git a/t/helper/test-submodule.c b/t/helper/test-submodule.c
index e0e0c53d38..e060cc6226 100644
--- a/t/helper/test-submodule.c
+++ b/t/helper/test-submodule.c
@@ -85,10 +85,17 @@ static int cmd__submodule_is_active(int argc, const char **argv)
return !is_submodule_active(the_repository, argv[0]);
}
-static int resolve_relative_url(int argc, const char **argv)
+static int cmd__submodule_resolve_relative_url(int argc, const char **argv)
{
char *remoteurl, *res;
const char *up_path, *url;
+ struct option options[] = {
+ OPT_END()
+ };
+ argc = parse_options(argc, argv, "test-tools", options,
+ submodule_resolve_relative_url_usage, 0);
+ if (argc != 3)
+ usage_with_options(submodule_resolve_relative_url_usage, options);
up_path = argv[0];
remoteurl = xstrdup(argv[1]);
@@ -104,23 +111,94 @@ static int resolve_relative_url(int argc, const char **argv)
return 0;
}
-static int cmd__submodule_resolve_relative_url(int argc, const char **argv)
+static int cmd__submodule_config_list(int argc, const char **argv)
{
struct option options[] = {
OPT_END()
};
- argc = parse_options(argc, argv, "test-tools", options,
- submodule_resolve_relative_url_usage, 0);
- if (argc != 3)
- usage_with_options(submodule_resolve_relative_url_usage, options);
+ const char *const usage[] = {
+ "test-tool submodule config-list <key>",
+ NULL
+ };
+ argc = parse_options(argc, argv, "test-tools", options, usage,
+ PARSE_OPT_KEEP_ARGV0);
+
+ setup_git_directory();
+
+ if (argc == 2)
+ return print_config_from_gitmodules(the_repository, argv[1]);
+ usage_with_options(usage, options);
+}
+
+static int cmd__submodule_config_set(int argc, const char **argv)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *const usage[] = {
+ "test-tool submodule config-set <key> <value>",
+ NULL
+ };
+ argc = parse_options(argc, argv, "test-tools", options, usage,
+ PARSE_OPT_KEEP_ARGV0);
+
+ setup_git_directory();
+
+ /* Equivalent to ACTION_SET in builtin/config.c */
+ if (argc == 3) {
+ if (!is_writing_gitmodules_ok())
+ die("please make sure that the .gitmodules file is in the working tree");
+
+ return config_set_in_gitmodules_file_gently(argv[1], argv[2]);
+ }
+ usage_with_options(usage, options);
+}
+
+static int cmd__submodule_config_unset(int argc, const char **argv)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *const usage[] = {
+ "test-tool submodule config-unset <key>",
+ NULL
+ };
+
+ setup_git_directory();
+
+ if (argc == 2) {
+ if (!is_writing_gitmodules_ok())
+ die("please make sure that the .gitmodules file is in the working tree");
+ return config_set_in_gitmodules_file_gently(argv[1], NULL);
+ }
+ usage_with_options(usage, options);
+}
+
+static int cmd__submodule_config_writeable(int argc, const char **argv)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *const usage[] = {
+ "test-tool submodule config-writeable",
+ NULL
+ };
+ setup_git_directory();
+
+ if (argc == 1)
+ return is_writing_gitmodules_ok() ? 0 : -1;
- return resolve_relative_url(argc, argv);
+ usage_with_options(usage, options);
}
static struct test_cmd cmds[] = {
{ "check-name", cmd__submodule_check_name },
{ "is-active", cmd__submodule_is_active },
{ "resolve-relative-url", cmd__submodule_resolve_relative_url},
+ { "config-list", cmd__submodule_config_list },
+ { "config-set", cmd__submodule_config_set },
+ { "config-unset", cmd__submodule_config_unset },
+ { "config-writeable", cmd__submodule_config_writeable },
};
int cmd__submodule(int argc, const char **argv)
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
index d1d013bcd9..7eb1a26a30 100644
--- a/t/helper/test-tool.c
+++ b/t/helper/test-tool.c
@@ -13,6 +13,8 @@ static struct test_cmd cmds[] = {
{ "advise", cmd__advise_if_enabled },
{ "bitmap", cmd__bitmap },
{ "bloom", cmd__bloom },
+ { "bundle-uri", cmd__bundle_uri },
+ { "cache-tree", cmd__cache_tree },
{ "chmtime", cmd__chmtime },
{ "config", cmd__config },
{ "crontab", cmd__crontab },
@@ -72,6 +74,7 @@ static struct test_cmd cmds[] = {
{ "scrap-cache-tree", cmd__scrap_cache_tree },
{ "serve-v2", cmd__serve_v2 },
{ "sha1", cmd__sha1 },
+ { "sha1-is-sha1dc", cmd__sha1_is_sha1dc },
{ "sha256", cmd__sha256 },
{ "sigchain", cmd__sigchain },
{ "simple-ipc", cmd__simple_ipc },
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
index 6b46b6444b..da7cd6351a 100644
--- a/t/helper/test-tool.h
+++ b/t/helper/test-tool.h
@@ -7,6 +7,8 @@
int cmd__advise_if_enabled(int argc, const char **argv);
int cmd__bitmap(int argc, const char **argv);
int cmd__bloom(int argc, const char **argv);
+int cmd__bundle_uri(int argc, const char **argv);
+int cmd__cache_tree(int argc, const char **argv);
int cmd__chmtime(int argc, const char **argv);
int cmd__config(int argc, const char **argv);
int cmd__crontab(int argc, const char **argv);
@@ -65,6 +67,7 @@ int cmd__run_command(int argc, const char **argv);
int cmd__scrap_cache_tree(int argc, const char **argv);
int cmd__serve_v2(int argc, const char **argv);
int cmd__sha1(int argc, const char **argv);
+int cmd__sha1_is_sha1dc(int argc, const char **argv);
int cmd__oid_array(int argc, const char **argv);
int cmd__sha256(int argc, const char **argv);
int cmd__sigchain(int argc, const char **argv);
diff --git a/t/helper/test-trace2.c b/t/helper/test-trace2.c
index a714130ece..f374c21ec3 100644
--- a/t/helper/test-trace2.c
+++ b/t/helper/test-trace2.c
@@ -132,6 +132,7 @@ static int ut_003error(int argc, const char **argv)
*/
static int ut_004child(int argc, const char **argv)
{
+ struct child_process cmd = CHILD_PROCESS_INIT;
int result;
/*
@@ -141,7 +142,8 @@ static int ut_004child(int argc, const char **argv)
if (!argc)
return 0;
- result = run_command_v_opt(argv, 0);
+ strvec_pushv(&cmd.args, argv);
+ result = run_command(&cmd);
exit(result);
}
@@ -229,6 +231,187 @@ static int ut_010bug_BUG(int argc, const char **argv)
}
/*
+ * Single-threaded timer test. Create several intervals using the
+ * TEST1 timer. The test script can verify that an aggregate Trace2
+ * "timer" event is emitted indicating that we started+stopped the
+ * timer the requested number of times.
+ */
+static int ut_100timer(int argc, const char **argv)
+{
+ const char *usage_error =
+ "expect <count> <ms_delay>";
+
+ int count = 0;
+ int delay = 0;
+ int k;
+
+ if (argc != 2)
+ die("%s", usage_error);
+ if (get_i(&count, argv[0]))
+ die("%s", usage_error);
+ if (get_i(&delay, argv[1]))
+ die("%s", usage_error);
+
+ for (k = 0; k < count; k++) {
+ trace2_timer_start(TRACE2_TIMER_ID_TEST1);
+ sleep_millisec(delay);
+ trace2_timer_stop(TRACE2_TIMER_ID_TEST1);
+ }
+
+ return 0;
+}
+
+struct ut_101_data {
+ int count;
+ int delay;
+};
+
+static void *ut_101timer_thread_proc(void *_ut_101_data)
+{
+ struct ut_101_data *data = _ut_101_data;
+ int k;
+
+ trace2_thread_start("ut_101");
+
+ for (k = 0; k < data->count; k++) {
+ trace2_timer_start(TRACE2_TIMER_ID_TEST2);
+ sleep_millisec(data->delay);
+ trace2_timer_stop(TRACE2_TIMER_ID_TEST2);
+ }
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+/*
+ * Multi-threaded timer test. Create several threads that each create
+ * several intervals using the TEST2 timer. The test script can verify
+ * that an individual Trace2 "th_timer" events for each thread and an
+ * aggregate "timer" event are generated.
+ */
+static int ut_101timer(int argc, const char **argv)
+{
+ const char *usage_error =
+ "expect <count> <ms_delay> <threads>";
+
+ struct ut_101_data data = { 0, 0 };
+ int nr_threads = 0;
+ int k;
+ pthread_t *pids = NULL;
+
+ if (argc != 3)
+ die("%s", usage_error);
+ if (get_i(&data.count, argv[0]))
+ die("%s", usage_error);
+ if (get_i(&data.delay, argv[1]))
+ die("%s", usage_error);
+ if (get_i(&nr_threads, argv[2]))
+ die("%s", usage_error);
+
+ CALLOC_ARRAY(pids, nr_threads);
+
+ for (k = 0; k < nr_threads; k++) {
+ if (pthread_create(&pids[k], NULL, ut_101timer_thread_proc, &data))
+ die("failed to create thread[%d]", k);
+ }
+
+ for (k = 0; k < nr_threads; k++) {
+ if (pthread_join(pids[k], NULL))
+ die("failed to join thread[%d]", k);
+ }
+
+ free(pids);
+
+ return 0;
+}
+
+/*
+ * Single-threaded counter test. Add several values to the TEST1 counter.
+ * The test script can verify that the final sum is reported in the "counter"
+ * event.
+ */
+static int ut_200counter(int argc, const char **argv)
+{
+ const char *usage_error =
+ "expect <v1> [<v2> [...]]";
+ int value;
+ int k;
+
+ if (argc < 1)
+ die("%s", usage_error);
+
+ for (k = 0; k < argc; k++) {
+ if (get_i(&value, argv[k]))
+ die("invalid value[%s] -- %s",
+ argv[k], usage_error);
+ trace2_counter_add(TRACE2_COUNTER_ID_TEST1, value);
+ }
+
+ return 0;
+}
+
+/*
+ * Multi-threaded counter test. Create seveal threads that each increment
+ * the TEST2 global counter. The test script can verify that an individual
+ * "th_counter" event is generated with a partial sum for each thread and
+ * that a final aggregate "counter" event is generated.
+ */
+
+struct ut_201_data {
+ int v1;
+ int v2;
+};
+
+static void *ut_201counter_thread_proc(void *_ut_201_data)
+{
+ struct ut_201_data *data = _ut_201_data;
+
+ trace2_thread_start("ut_201");
+
+ trace2_counter_add(TRACE2_COUNTER_ID_TEST2, data->v1);
+ trace2_counter_add(TRACE2_COUNTER_ID_TEST2, data->v2);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+static int ut_201counter(int argc, const char **argv)
+{
+ const char *usage_error =
+ "expect <v1> <v2> <threads>";
+
+ struct ut_201_data data = { 0, 0 };
+ int nr_threads = 0;
+ int k;
+ pthread_t *pids = NULL;
+
+ if (argc != 3)
+ die("%s", usage_error);
+ if (get_i(&data.v1, argv[0]))
+ die("%s", usage_error);
+ if (get_i(&data.v2, argv[1]))
+ die("%s", usage_error);
+ if (get_i(&nr_threads, argv[2]))
+ die("%s", usage_error);
+
+ CALLOC_ARRAY(pids, nr_threads);
+
+ for (k = 0; k < nr_threads; k++) {
+ if (pthread_create(&pids[k], NULL, ut_201counter_thread_proc, &data))
+ die("failed to create thread[%d]", k);
+ }
+
+ for (k = 0; k < nr_threads; k++) {
+ if (pthread_join(pids[k], NULL))
+ die("failed to join thread[%d]", k);
+ }
+
+ free(pids);
+
+ return 0;
+}
+
+/*
* Usage:
* test-tool trace2 <ut_name_1> <ut_usage_1>
* test-tool trace2 <ut_name_2> <ut_usage_2>
@@ -248,6 +431,12 @@ static struct unit_test ut_table[] = {
{ ut_008bug, "008bug", "" },
{ ut_009bug_BUG, "009bug_BUG","" },
{ ut_010bug_BUG, "010bug_BUG","" },
+
+ { ut_100timer, "100timer", "<count> <ms_delay>" },
+ { ut_101timer, "101timer", "<count> <ms_delay> <threads>" },
+
+ { ut_200counter, "200counter", "<v1> [<v2> [<v3> [...]]]" },
+ { ut_201counter, "201counter", "<v1> <v2> <threads>" },
};
/* clang-format on */
diff --git a/t/lib-gettext.sh b/t/lib-gettext.sh
index cc6bb2cdea..dcd6e9c3f7 100644
--- a/t/lib-gettext.sh
+++ b/t/lib-gettext.sh
@@ -7,7 +7,7 @@
. ./test-lib.sh
GIT_TEXTDOMAINDIR="$GIT_BUILD_DIR/po/build/locale"
-GIT_PO_PATH="$GIT_BUILD_DIR/po"
+GIT_PO_PATH="$GIT_SOURCE_DIR/po"
export GIT_TEXTDOMAINDIR GIT_PO_PATH
if test -n "$GIT_TEST_INSTALLED"
diff --git a/t/lib-gitweb.sh b/t/lib-gitweb.sh
index 1f32ca66ea..6f68df247a 100644
--- a/t/lib-gitweb.sh
+++ b/t/lib-gitweb.sh
@@ -49,7 +49,7 @@ EOF
error "Cannot find gitweb at $GITWEB_TEST_INSTALLED."
say "# Testing $SCRIPT_NAME"
else # normal case, use source version of gitweb
- SCRIPT_NAME="$GIT_BUILD_DIR/gitweb/gitweb.perl"
+ SCRIPT_NAME="$GIT_SOURCE_DIR/gitweb/gitweb.perl"
fi
export SCRIPT_NAME
}
diff --git a/t/lib-httpd.sh b/t/lib-httpd.sh
index 1f6b9b08d1..ba9fe36772 100644
--- a/t/lib-httpd.sh
+++ b/t/lib-httpd.sh
@@ -174,6 +174,11 @@ prepare_httpd() {
fi
}
+enable_http2 () {
+ HTTPD_PARA="$HTTPD_PARA -DHTTP2"
+ test_set_prereq HTTP2
+}
+
start_httpd() {
prepare_httpd >&3 2>&4
diff --git a/t/lib-httpd/apache.conf b/t/lib-httpd/apache.conf
index 497b9b9d92..0294739a77 100644
--- a/t/lib-httpd/apache.conf
+++ b/t/lib-httpd/apache.conf
@@ -29,6 +29,11 @@ ErrorLog error.log
LoadModule setenvif_module modules/mod_setenvif.so
</IfModule>
+<IfDefine HTTP2>
+LoadModule http2_module modules/mod_http2.so
+Protocols h2c
+</IfDefine>
+
<IfVersion < 2.4>
LockFile accept.lock
</IfVersion>
@@ -64,12 +69,20 @@ LockFile accept.lock
<IfModule !mod_access_compat.c>
LoadModule access_compat_module modules/mod_access_compat.so
</IfModule>
-<IfModule !mod_mpm_prefork.c>
- LoadModule mpm_prefork_module modules/mod_mpm_prefork.so
-</IfModule>
<IfModule !mod_unixd.c>
LoadModule unixd_module modules/mod_unixd.so
</IfModule>
+
+<IfDefine HTTP2>
+<IfModule !mod_mpm_event.c>
+ LoadModule mpm_event_module modules/mod_mpm_event.so
+</IfModule>
+</IfDefine>
+<IfDefine !HTTP2>
+<IfModule !mod_mpm_prefork.c>
+ LoadModule mpm_prefork_module modules/mod_mpm_prefork.so
+</IfModule>
+</IfDefine>
</IfVersion>
PassEnv GIT_VALGRIND
@@ -80,6 +93,8 @@ PassEnv LSAN_OPTIONS
PassEnv GIT_TRACE
PassEnv GIT_CONFIG_NOSYSTEM
PassEnv GIT_TEST_SIDEBAND_ALL
+PassEnv LANG
+PassEnv LC_ALL
Alias /dumb/ www/
Alias /auth/dumb/ www/auth/dumb/
diff --git a/t/perf/p0006-read-tree-checkout.sh b/t/perf/p0006-read-tree-checkout.sh
index c481c012d2..325566e18e 100755
--- a/t/perf/p0006-read-tree-checkout.sh
+++ b/t/perf/p0006-read-tree-checkout.sh
@@ -49,6 +49,14 @@ test_perf "read-tree br_base br_ballast ($nr_files)" '
git read-tree -n -m br_base br_ballast
'
+test_perf "read-tree br_ballast_plus_1 ($nr_files)" '
+ # Run read-tree 100 times for clearer performance results & comparisons
+ for i in $(test_seq 100)
+ do
+ git read-tree -n -m br_ballast_plus_1 || return 1
+ done
+'
+
test_perf "switch between br_base br_ballast ($nr_files)" '
git checkout -q br_base &&
git checkout -q br_ballast
diff --git a/t/perf/p0090-cache-tree.sh b/t/perf/p0090-cache-tree.sh
new file mode 100755
index 0000000000..a8eabca2c4
--- /dev/null
+++ b/t/perf/p0090-cache-tree.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+
+test_description="Tests performance of cache tree update operations"
+
+. ./perf-lib.sh
+
+test_perf_large_repo
+test_checkout_worktree
+
+count=100
+
+test_expect_success 'setup cache tree' '
+ git write-tree
+'
+
+test_cache_tree () {
+ test_perf "$1, $3" "
+ for i in \$(test_seq $count)
+ do
+ test-tool cache-tree $4 $2
+ done
+ "
+}
+
+test_cache_tree_update_functions () {
+ test_cache_tree 'no-op' 'control' "$1" "$2"
+ test_cache_tree 'prime_cache_tree' 'prime' "$1" "$2"
+ test_cache_tree 'cache_tree_update' 'update' "$1" "$2"
+}
+
+test_cache_tree_update_functions "clean" ""
+test_cache_tree_update_functions "invalidate 2" "--invalidate 2"
+test_cache_tree_update_functions "invalidate 50" "--invalidate 50"
+test_cache_tree_update_functions "empty" "--empty"
+
+test_done
diff --git a/t/perf/p1401-ref-operations.sh b/t/perf/p1401-ref-operations.sh
new file mode 100755
index 0000000000..0b88a2f531
--- /dev/null
+++ b/t/perf/p1401-ref-operations.sh
@@ -0,0 +1,52 @@
+#!/bin/sh
+
+test_description="Tests performance of ref operations"
+
+. ./perf-lib.sh
+
+test_perf_large_repo
+
+test_perf 'git pack-refs (v1)' '
+ git commit --allow-empty -m "change one ref" &&
+ git pack-refs --all
+'
+
+test_perf 'git for-each-ref (v1)' '
+ git for-each-ref --format="%(refname)" >/dev/null
+'
+
+test_perf 'git for-each-ref prefix (v1)' '
+ git for-each-ref --format="%(refname)" refs/tags/ >/dev/null
+'
+
+test_expect_success 'configure packed-refs v2' '
+ git config core.repositoryFormatVersion 1 &&
+ git config --add extensions.refFormat files &&
+ git config --add extensions.refFormat packed &&
+ git config --add extensions.refFormat packed-v2 &&
+ git config refs.packedRefsVersion 2 &&
+ git commit --allow-empty -m "change one ref" &&
+ git pack-refs --all &&
+ test_copy_bytes 16 .git/packed-refs | xxd >actual &&
+ grep PREF actual
+'
+
+test_perf 'git pack-refs (v2)' '
+ git commit --allow-empty -m "change one ref" &&
+ git pack-refs --all
+'
+
+test_perf 'git pack-refs (v2;hashing)' '
+ git commit --allow-empty -m "change one ref" &&
+ git -c refs.hashPackedRefs=true pack-refs --all
+'
+
+test_perf 'git for-each-ref (v2)' '
+ git for-each-ref --format="%(refname)" >/dev/null
+'
+
+test_perf 'git for-each-ref prefix (v2)' '
+ git for-each-ref --format="%(refname)" refs/tags/ >/dev/null
+'
+
+test_done
diff --git a/t/perf/p2000-sparse-operations.sh b/t/perf/p2000-sparse-operations.sh
index fce8151d41..3242cfe91a 100755
--- a/t/perf/p2000-sparse-operations.sh
+++ b/t/perf/p2000-sparse-operations.sh
@@ -124,5 +124,6 @@ test_perf_on_all git read-tree -mu HEAD
test_perf_on_all git checkout-index -f --all
test_perf_on_all git update-index --add --remove $SPARSE_CONE/a
test_perf_on_all "git rm -f $SPARSE_CONE/a && git checkout HEAD -- $SPARSE_CONE/a"
+test_perf_on_all git grep --cached --sparse bogus -- "f2/f1/f1/*"
test_done
diff --git a/t/perf/p7102-reset.sh b/t/perf/p7102-reset.sh
new file mode 100755
index 0000000000..9b039e8691
--- /dev/null
+++ b/t/perf/p7102-reset.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+test_description='performance of reset'
+. ./perf-lib.sh
+
+test_perf_default_repo
+test_checkout_worktree
+
+test_perf 'reset --hard with change in tree' '
+ base=$(git rev-parse HEAD) &&
+ test_commit --no-tag A &&
+ new=$(git rev-parse HEAD) &&
+
+ for i in $(test_seq 10)
+ do
+ git reset --hard $new &&
+ git reset --hard $base || return $?
+ done
+'
+
+test_done
diff --git a/t/perf/run b/t/perf/run
index 33da4d2aba..34115edec3 100755
--- a/t/perf/run
+++ b/t/perf/run
@@ -232,10 +232,10 @@ then
)
elif test -n "$GIT_PERF_SUBSECTION"
then
- egrep "^$GIT_PERF_SUBSECTION\$" "$TEST_RESULTS_DIR"/run_subsections.names >/dev/null ||
+ grep -E "^$GIT_PERF_SUBSECTION\$" "$TEST_RESULTS_DIR"/run_subsections.names >/dev/null ||
die "subsection '$GIT_PERF_SUBSECTION' not found in '$GIT_PERF_CONFIG_FILE'"
- egrep "^$GIT_PERF_SUBSECTION\$" "$TEST_RESULTS_DIR"/run_subsections.names | while read -r subsec
+ grep -E "^$GIT_PERF_SUBSECTION\$" "$TEST_RESULTS_DIR"/run_subsections.names | while read -r subsec
do
(
GIT_PERF_SUBSECTION="$subsec"
diff --git a/t/t0013-sha1dc.sh b/t/t0013-sha1dc.sh
index 9ad76080aa..5324047689 100755
--- a/t/t0013-sha1dc.sh
+++ b/t/t0013-sha1dc.sh
@@ -6,9 +6,11 @@ TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
TEST_DATA="$TEST_DIRECTORY/t0013"
-if test -z "$DC_SHA1"
+test_lazy_prereq SHA1_IS_SHA1DC 'test-tool sha1-is-sha1dc'
+
+if ! test_have_prereq SHA1_IS_SHA1DC
then
- skip_all='skipping sha1 collision tests, DC_SHA1 not set'
+ skip_all='skipping sha1 collision tests, not using sha1collisiondetection'
test_done
fi
diff --git a/t/t0033-safe-directory.sh b/t/t0033-safe-directory.sh
index aecb308cf6..dc3496897a 100755
--- a/t/t0033-safe-directory.sh
+++ b/t/t0033-safe-directory.sh
@@ -71,4 +71,13 @@ test_expect_success 'safe.directory=*, but is reset' '
expect_rejected_dir
'
+test_expect_success 'safe.directory in included file' '
+ cat >gitconfig-include <<-EOF &&
+ [safe]
+ directory = "$(pwd)"
+ EOF
+ git config --global --add include.path "$(pwd)/gitconfig-include" &&
+ git status
+'
+
test_done
diff --git a/t/t0035-safe-bare-repository.sh b/t/t0035-safe-bare-repository.sh
index ecbdc8238d..11c15a48aa 100755
--- a/t/t0035-safe-bare-repository.sh
+++ b/t/t0035-safe-bare-repository.sh
@@ -51,4 +51,13 @@ test_expect_success 'safe.bareRepository on the command line' '
-c safe.bareRepository=all
'
+test_expect_success 'safe.bareRepository in included file' '
+ cat >gitconfig-include <<-\EOF &&
+ [safe]
+ bareRepository = explicit
+ EOF
+ git config --global --add include.path "$(pwd)/gitconfig-include" &&
+ expect_rejected -C outer-repo/bare-repo
+'
+
test_done
diff --git a/t/t0061-run-command.sh b/t/t0061-run-command.sh
index 7b5423eebd..879e536638 100755
--- a/t/t0061-run-command.sh
+++ b/t/t0061-run-command.sh
@@ -130,10 +130,20 @@ World
EOF
test_expect_success 'run_command runs in parallel with more jobs available than tasks' '
- test-tool run-command run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test-tool run-command run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>actual &&
+ test_must_be_empty out &&
test_cmp expect actual
'
+test_expect_success 'run_command runs in parallel with more jobs available than tasks --duplicate-output' '
+ test-tool run-command --duplicate-output run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
+ test_must_be_empty out &&
+ test 4 = $(grep -c "duplicate_output: Hello" err) &&
+ test 4 = $(grep -c "duplicate_output: World" err) &&
+ sed "/duplicate_output/d" err > err1 &&
+ test_cmp expect err1
+'
+
test_expect_success 'run_command runs ungrouped in parallel with more jobs available than tasks' '
test-tool run-command --ungroup run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
test_line_count = 8 out &&
@@ -141,10 +151,20 @@ test_expect_success 'run_command runs ungrouped in parallel with more jobs avail
'
test_expect_success 'run_command runs in parallel with as many jobs as tasks' '
- test-tool run-command run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test-tool run-command run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>actual &&
+ test_must_be_empty out &&
test_cmp expect actual
'
+test_expect_success 'run_command runs in parallel with as many jobs as tasks --duplicate-output' '
+ test-tool run-command --duplicate-output run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
+ test_must_be_empty out &&
+ test 4 = $(grep -c "duplicate_output: Hello" err) &&
+ test 4 = $(grep -c "duplicate_output: World" err) &&
+ sed "/duplicate_output/d" err > err1 &&
+ test_cmp expect err1
+'
+
test_expect_success 'run_command runs ungrouped in parallel with as many jobs as tasks' '
test-tool run-command --ungroup run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
test_line_count = 8 out &&
@@ -152,10 +172,20 @@ test_expect_success 'run_command runs ungrouped in parallel with as many jobs as
'
test_expect_success 'run_command runs in parallel with more tasks than jobs available' '
- test-tool run-command run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test-tool run-command run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>actual &&
+ test_must_be_empty out &&
test_cmp expect actual
'
+test_expect_success 'run_command runs in parallel with more tasks than jobs available --duplicate-output' '
+ test-tool run-command --duplicate-output run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
+ test_must_be_empty out &&
+ test 4 = $(grep -c "duplicate_output: Hello" err) &&
+ test 4 = $(grep -c "duplicate_output: World" err) &&
+ sed "/duplicate_output/d" err > err1 &&
+ test_cmp expect err1
+'
+
test_expect_success 'run_command runs ungrouped in parallel with more tasks than jobs available' '
test-tool run-command --ungroup run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
test_line_count = 8 out &&
@@ -172,10 +202,17 @@ asking for a quick stop
EOF
test_expect_success 'run_command is asked to abort gracefully' '
- test-tool run-command run-command-abort 3 false 2>actual &&
+ test-tool run-command run-command-abort 3 false >out 2>actual &&
+ test_must_be_empty out &&
test_cmp expect actual
'
+test_expect_success 'run_command is asked to abort gracefully --duplicate-output' '
+ test-tool run-command --duplicate-output run-command-abort 3 false >out 2>err &&
+ test_must_be_empty out &&
+ test_cmp expect err
+'
+
test_expect_success 'run_command is asked to abort gracefully (ungroup)' '
test-tool run-command --ungroup run-command-abort 3 false >out 2>err &&
test_must_be_empty out &&
@@ -187,10 +224,17 @@ no further jobs available
EOF
test_expect_success 'run_command outputs ' '
- test-tool run-command run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test-tool run-command run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>actual &&
+ test_must_be_empty out &&
test_cmp expect actual
'
+test_expect_success 'run_command outputs --duplicate-output' '
+ test-tool run-command --duplicate-output run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
+ test_must_be_empty out &&
+ test_cmp expect err
+'
+
test_expect_success 'run_command outputs (ungroup) ' '
test-tool run-command --ungroup run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
test_must_be_empty out &&
diff --git a/t/t0068-for-each-repo.sh b/t/t0068-for-each-repo.sh
index 4675e85251..3648d439a8 100755
--- a/t/t0068-for-each-repo.sh
+++ b/t/t0068-for-each-repo.sh
@@ -2,15 +2,18 @@
test_description='git for-each-repo builtin'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'run based on configured value' '
git init one &&
git init two &&
git init three &&
+ git init ~/four &&
git -C two commit --allow-empty -m "DID NOT RUN" &&
git config run.key "$TRASH_DIRECTORY/one" &&
git config --add run.key "$TRASH_DIRECTORY/three" &&
+ git config --add run.key "~/four" &&
git for-each-repo --config=run.key commit --allow-empty -m "ran" &&
git -C one log -1 --pretty=format:%s >message &&
grep ran message &&
@@ -18,12 +21,16 @@ test_expect_success 'run based on configured value' '
! grep ran message &&
git -C three log -1 --pretty=format:%s >message &&
grep ran message &&
+ git -C ~/four log -1 --pretty=format:%s >message &&
+ grep ran message &&
git for-each-repo --config=run.key -- commit --allow-empty -m "ran again" &&
git -C one log -1 --pretty=format:%s >message &&
grep again message &&
git -C two log -1 --pretty=format:%s >message &&
! grep again message &&
git -C three log -1 --pretty=format:%s >message &&
+ grep again message &&
+ git -C ~/four log -1 --pretty=format:%s >message &&
grep again message
'
diff --git a/t/t0070-fundamental.sh b/t/t0070-fundamental.sh
index 8d59905ef0..574de34198 100755
--- a/t/t0070-fundamental.sh
+++ b/t/t0070-fundamental.sh
@@ -6,6 +6,7 @@ test_description='check that the most basic functions work
Verify wrappers and compatibility functions.
'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'character classes (isspace, isalpha etc.)' '
diff --git a/t/t0211-trace2-perf.sh b/t/t0211-trace2-perf.sh
index 22d0845544..0b3436e8ca 100755
--- a/t/t0211-trace2-perf.sh
+++ b/t/t0211-trace2-perf.sh
@@ -173,4 +173,99 @@ test_expect_success 'using global config, perf stream, return code 0' '
test_cmp expect actual
'
+# Exercise the stopwatch timers in a loop and confirm that we have
+# as many start/stop intervals as expected. We cannot really test the
+# actual (total, min, max) timer values, so we have to assume that they
+# are good, but we can verify the interval count.
+#
+# The timer "test/test1" should only emit a global summary "timer" event.
+# The timer "test/test2" should emit per-thread "th_timer" events and a
+# global summary "timer" event.
+
+have_timer_event () {
+ thread=$1 event=$2 category=$3 name=$4 intervals=$5 file=$6 &&
+
+ pattern="d0|${thread}|${event}||||${category}|name:${name} intervals:${intervals}" &&
+
+ grep "${pattern}" ${file}
+}
+
+test_expect_success 'stopwatch timer test/test1' '
+ test_when_finished "rm trace.perf actual" &&
+ test_config_global trace2.perfBrief 1 &&
+ test_config_global trace2.perfTarget "$(pwd)/trace.perf" &&
+
+ # Use the timer "test1" 5 times from "main".
+ test-tool trace2 100timer 5 10 &&
+
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+
+ have_timer_event "main" "timer" "test" "test1" 5 actual
+'
+
+test_expect_success 'stopwatch timer test/test2' '
+ test_when_finished "rm trace.perf actual" &&
+ test_config_global trace2.perfBrief 1 &&
+ test_config_global trace2.perfTarget "$(pwd)/trace.perf" &&
+
+ # Use the timer "test2" 5 times each in 3 threads.
+ test-tool trace2 101timer 5 10 3 &&
+
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+
+ # So we should have 3 per-thread events of 5 each.
+ have_timer_event "th01:ut_101" "th_timer" "test" "test2" 5 actual &&
+ have_timer_event "th02:ut_101" "th_timer" "test" "test2" 5 actual &&
+ have_timer_event "th03:ut_101" "th_timer" "test" "test2" 5 actual &&
+
+ # And we should have 15 total uses.
+ have_timer_event "main" "timer" "test" "test2" 15 actual
+'
+
+# Exercise the global counters and confirm that we get the expected values.
+#
+# The counter "test/test1" should only emit a global summary "counter" event.
+# The counter "test/test2" could emit per-thread "th_counter" events and a
+# global summary "counter" event.
+
+have_counter_event () {
+ thread=$1 event=$2 category=$3 name=$4 value=$5 file=$6 &&
+
+ pattern="d0|${thread}|${event}||||${category}|name:${name} value:${value}" &&
+
+ grep "${patern}" ${file}
+}
+
+test_expect_success 'global counter test/test1' '
+ test_when_finished "rm trace.perf actual" &&
+ test_config_global trace2.perfBrief 1 &&
+ test_config_global trace2.perfTarget "$(pwd)/trace.perf" &&
+
+ # Use the counter "test1" and add n integers.
+ test-tool trace2 200counter 1 2 3 4 5 &&
+
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+
+ have_counter_event "main" "counter" "test" "test1" 15 actual
+'
+
+test_expect_success 'global counter test/test2' '
+ test_when_finished "rm trace.perf actual" &&
+ test_config_global trace2.perfBrief 1 &&
+ test_config_global trace2.perfTarget "$(pwd)/trace.perf" &&
+
+ # Add 2 integers to the counter "test2" in each of 3 threads.
+ test-tool trace2 201counter 7 13 3 &&
+
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+
+ # So we should have 3 per-thread events of 5 each.
+ have_counter_event "th01:ut_201" "th_counter" "test" "test2" 20 actual &&
+ have_counter_event "th02:ut_201" "th_counter" "test" "test2" 20 actual &&
+ have_counter_event "th03:ut_201" "th_counter" "test" "test2" 20 actual &&
+
+ # And we should have a single event with the total across all threads.
+ have_counter_event "main" "counter" "test" "test2" 60 actual
+'
+
test_done
diff --git a/t/t0211/scrub_perf.perl b/t/t0211/scrub_perf.perl
index 299999f0f8..7a50bae646 100644
--- a/t/t0211/scrub_perf.perl
+++ b/t/t0211/scrub_perf.perl
@@ -64,6 +64,12 @@ while (<>) {
goto SKIP_LINE;
}
}
+ elsif ($tokens[$col_event] =~ m/timer/) {
+ # This also captures "th_timer" events
+ $tokens[$col_rest] =~ s/ total:\d+\.\d*/ total:_T_TOTAL_/;
+ $tokens[$col_rest] =~ s/ min:\d+\.\d*/ min:_T_MIN_/;
+ $tokens[$col_rest] =~ s/ max:\d+\.\d*/ max:_T_MAX_/;
+ }
# t_abs and t_rel are either blank or a float. Replace the float
# with a constant for matching the HEREDOC in the test script.
diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh
index 1e864cf317..5b7bee888d 100755
--- a/t/t0410-partial-clone.sh
+++ b/t/t0410-partial-clone.sh
@@ -215,6 +215,20 @@ test_expect_success 'fetching of missing objects' '
grep "$HASH" out
'
+test_expect_success 'fetching of a promised object that promisor remote no longer has' '
+ rm -f err &&
+ test_create_repo unreliable-server &&
+ git -C unreliable-server config uploadpack.allowanysha1inwant 1 &&
+ git -C unreliable-server config uploadpack.allowfilter 1 &&
+ test_commit -C unreliable-server foo &&
+
+ git clone --filter=blob:none --no-checkout "file://$(pwd)/unreliable-server" unreliable-client &&
+
+ rm -rf unreliable-server/.git/objects/* &&
+ test_must_fail git -C unreliable-client checkout HEAD 2>err &&
+ grep "could not fetch.*from promisor remote" err
+'
+
test_expect_success 'fetching of missing objects works with ref-in-want enabled' '
# ref-in-want requires protocol version 2
git -C server config protocol.version 2 &&
diff --git a/t/t0450-txt-doc-vs-help.sh b/t/t0450-txt-doc-vs-help.sh
new file mode 100755
index 0000000000..cd3969e852
--- /dev/null
+++ b/t/t0450-txt-doc-vs-help.sh
@@ -0,0 +1,172 @@
+#!/bin/sh
+
+test_description='assert (unbuilt) Documentation/*.txt and -h output
+
+Run this with --debug to see a summary of where we still fail to make
+the two versions consistent with one another.'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+test_expect_success 'setup: list of builtins' '
+ git --list-cmds=builtins >builtins
+'
+
+test_expect_success 'list of txt and help mismatches is sorted' '
+ sort -u "$TEST_DIRECTORY"/t0450/txt-help-mismatches >expect &&
+ if ! test_cmp expect "$TEST_DIRECTORY"/t0450/txt-help-mismatches
+ then
+ BUG "please keep the list of txt and help mismatches sorted"
+ fi
+'
+
+help_to_synopsis () {
+ builtin="$1" &&
+ out_dir="out/$builtin" &&
+ out="$out_dir/help.synopsis" &&
+ if test -f "$out"
+ then
+ echo "$out" &&
+ return 0
+ fi &&
+ mkdir -p "$out_dir" &&
+ test_expect_code 129 git $builtin -h >"$out.raw" 2>&1 &&
+ sed -n \
+ -e '1,/^$/ {
+ /^$/d;
+ s/^usage: //;
+ s/^ *or: //;
+ p;
+ }' <"$out.raw" >"$out" &&
+ echo "$out"
+}
+
+builtin_to_txt () {
+ echo "$GIT_BUILD_DIR/Documentation/git-$1.txt"
+}
+
+txt_to_synopsis () {
+ builtin="$1" &&
+ out_dir="out/$builtin" &&
+ out="$out_dir/txt.synopsis" &&
+ if test -f "$out"
+ then
+ echo "$out" &&
+ return 0
+ fi &&
+ b2t="$(builtin_to_txt "$builtin")" &&
+ sed -n \
+ -e '/^\[verse\]$/,/^$/ {
+ /^$/d;
+ /^\[verse\]$/d;
+
+ s/{litdd}/--/g;
+ s/'\''\(git[ a-z-]*\)'\''/\1/g;
+
+ p;
+ }' \
+ <"$b2t" >"$out" &&
+ echo "$out"
+}
+
+check_dashed_labels () {
+ ! grep -E "<[^>_-]+_" "$1"
+}
+
+HT=" "
+
+align_after_nl () {
+ builtin="$1" &&
+ len=$(printf "git %s " "$builtin" | wc -c) &&
+ pad=$(printf "%${len}s" "") &&
+
+ sed "s/^[ $HT][ $HT]*/$pad/"
+}
+
+test_debug '>failing'
+while read builtin
+do
+ # -h output assertions
+ test_expect_success "$builtin -h output has no \t" '
+ h2s="$(help_to_synopsis "$builtin")" &&
+ ! grep "$HT" "$h2s"
+ '
+
+ test_expect_success "$builtin -h output has dashed labels" '
+ check_dashed_labels "$(help_to_synopsis "$builtin")"
+ '
+
+ test_expect_success "$builtin -h output has consistent spacing" '
+ h2s="$(help_to_synopsis "$builtin")" &&
+ sed -n \
+ -e "/^ / {
+ s/[^ ].*//;
+ p;
+ }" \
+ <"$h2s" >help &&
+ sort -u help >help.ws &&
+ if test -s help.ws
+ then
+ test_line_count = 1 help.ws
+ fi
+ '
+
+ txt="$(builtin_to_txt "$builtin")" &&
+ preq="$(echo BUILTIN_TXT_$builtin | tr '[:lower:]-' '[:upper:]_')" &&
+
+ if test -f "$txt"
+ then
+ test_set_prereq "$preq"
+ fi &&
+
+ # *.txt output assertions
+ test_expect_success "$preq" "$builtin *.txt SYNOPSIS has dashed labels" '
+ check_dashed_labels "$(txt_to_synopsis "$builtin")"
+ '
+
+ # *.txt output consistency assertions
+ result=
+ if grep -q "^$builtin$" "$TEST_DIRECTORY"/t0450/txt-help-mismatches
+ then
+ result=failure
+ else
+ result=success
+ fi &&
+ test_expect_$result "$preq" "$builtin -h output and SYNOPSIS agree" '
+ t2s="$(txt_to_synopsis "$builtin")" &&
+ if test "$builtin" = "merge-tree"
+ then
+ test_when_finished "rm -f t2s.new" &&
+ sed -e '\''s/ (deprecated)$//g'\'' <"$t2s" >t2s.new
+ t2s=t2s.new
+ fi &&
+ h2s="$(help_to_synopsis "$builtin")" &&
+
+ # The *.txt and -h use different spacing for the
+ # alignment of continued usage output, normalize it.
+ align_after_nl "$builtin" <"$t2s" >txt &&
+ align_after_nl "$builtin" <"$h2s" >help &&
+ test_cmp txt help
+ '
+
+ if test_have_prereq "$preq" && test -e txt && test -e help
+ then
+ test_debug '
+ if test_cmp txt help >cmp 2>/dev/null
+ then
+ echo "=== DONE: $builtin ==="
+ else
+ echo "=== TODO: $builtin ===" &&
+ cat cmp
+ fi >>failing
+ '
+
+ # Not in test_expect_success in case --run is being
+ # used with --debug
+ rm -f txt help tmp 2>/dev/null
+ fi
+done <builtins
+
+test_debug 'say "$(cat failing)"'
+
+test_done
diff --git a/t/t0450/txt-help-mismatches b/t/t0450/txt-help-mismatches
new file mode 100644
index 0000000000..a0777acd66
--- /dev/null
+++ b/t/t0450/txt-help-mismatches
@@ -0,0 +1,58 @@
+add
+am
+apply
+archive
+bisect
+blame
+branch
+check-ref-format
+checkout
+checkout-index
+clone
+column
+config
+credential
+credential-cache
+credential-store
+fast-export
+fast-import
+fetch-pack
+fmt-merge-msg
+for-each-ref
+format-patch
+fsck-objects
+fsmonitor--daemon
+gc
+grep
+index-pack
+init-db
+log
+ls-files
+ls-tree
+mailinfo
+mailsplit
+maintenance
+merge
+merge-file
+merge-index
+merge-one-file
+multi-pack-index
+name-rev
+notes
+pack-objects
+push
+range-diff
+rebase
+remote
+remote-ext
+remote-fd
+repack
+reset
+restore
+rev-parse
+show
+stage
+switch
+update-index
+update-ref
+whatchanged
diff --git a/t/t1002-read-tree-m-u-2way.sh b/t/t1002-read-tree-m-u-2way.sh
index bd5313caec..cdc077ce12 100755
--- a/t/t1002-read-tree-m-u-2way.sh
+++ b/t/t1002-read-tree-m-u-2way.sh
@@ -154,7 +154,7 @@ test_expect_success \
read_tree_u_must_succeed --reset -u $treeH &&
echo frotz frotz >frotz &&
git update-index --add frotz &&
- if read_tree_u_must_succeed -m -u $treeH $treeM; then false; else :; fi'
+ ! read_tree_u_must_succeed -m -u $treeH $treeM'
test_expect_success \
'9 - conflicting addition.' \
@@ -163,7 +163,7 @@ test_expect_success \
echo frotz frotz >frotz &&
git update-index --add frotz &&
echo frotz >frotz &&
- if read_tree_u_must_succeed -m -u $treeH $treeM; then false; else :; fi'
+ ! read_tree_u_must_succeed -m -u $treeH $treeM'
test_expect_success \
'10 - path removed.' \
@@ -186,7 +186,7 @@ test_expect_success \
echo rezrov >rezrov &&
git update-index --add rezrov &&
echo rezrov rezrov >rezrov &&
- if read_tree_u_must_succeed -m -u $treeH $treeM; then false; else :; fi'
+ ! read_tree_u_must_succeed -m -u $treeH $treeM'
test_expect_success \
'12 - unmatching local changes being removed.' \
@@ -194,7 +194,7 @@ test_expect_success \
read_tree_u_must_succeed --reset -u $treeH &&
echo rezrov rezrov >rezrov &&
git update-index --add rezrov &&
- if read_tree_u_must_succeed -m -u $treeH $treeM; then false; else :; fi'
+ ! read_tree_u_must_succeed -m -u $treeH $treeM'
test_expect_success \
'13 - unmatching local changes being removed.' \
@@ -203,7 +203,7 @@ test_expect_success \
echo rezrov rezrov >rezrov &&
git update-index --add rezrov &&
echo rezrov >rezrov &&
- if read_tree_u_must_succeed -m -u $treeH $treeM; then false; else :; fi'
+ ! read_tree_u_must_succeed -m -u $treeH $treeM'
cat >expected <<EOF
-100644 X 0 nitfol
@@ -251,7 +251,7 @@ test_expect_success \
read_tree_u_must_succeed --reset -u $treeH &&
echo bozbar bozbar >bozbar &&
git update-index --add bozbar &&
- if read_tree_u_must_succeed -m -u $treeH $treeM; then false; else :; fi'
+ ! read_tree_u_must_succeed -m -u $treeH $treeM'
test_expect_success \
'17 - conflicting local change.' \
@@ -260,7 +260,7 @@ test_expect_success \
echo bozbar bozbar >bozbar &&
git update-index --add bozbar &&
echo bozbar bozbar bozbar >bozbar &&
- if read_tree_u_must_succeed -m -u $treeH $treeM; then false; else :; fi'
+ ! read_tree_u_must_succeed -m -u $treeH $treeM'
test_expect_success \
'18 - local change already having a good result.' \
@@ -316,7 +316,7 @@ test_expect_success \
echo bozbar >bozbar &&
git update-index --add bozbar &&
echo gnusto gnusto >bozbar &&
- if read_tree_u_must_succeed -m -u $treeH $treeM; then false; else :; fi'
+ ! read_tree_u_must_succeed -m -u $treeH $treeM'
# Also make sure we did not break DF vs DF/DF case.
test_expect_success \
diff --git a/t/t1011-read-tree-sparse-checkout.sh b/t/t1011-read-tree-sparse-checkout.sh
index 742f0fa909..595b24c0ad 100755
--- a/t/t1011-read-tree-sparse-checkout.sh
+++ b/t/t1011-read-tree-sparse-checkout.sh
@@ -12,6 +12,7 @@ test_description='sparse checkout tests
'
TEST_CREATE_REPO_NO_TEMPLATE=1
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-read-tree.sh
diff --git a/t/t1022-read-tree-partial-clone.sh b/t/t1022-read-tree-partial-clone.sh
index a9953b6a71..cca4380e43 100755
--- a/t/t1022-read-tree-partial-clone.sh
+++ b/t/t1022-read-tree-partial-clone.sh
@@ -3,7 +3,7 @@
test_description='git read-tree in partial clones'
TEST_NO_CREATE_REPO=1
-
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'read-tree in partial clone prefetches in one batch' '
@@ -19,7 +19,7 @@ test_expect_success 'read-tree in partial clone prefetches in one batch' '
git -C server config uploadpack.allowfilter 1 &&
git -C server config uploadpack.allowanysha1inwant 1 &&
git clone --bare --filter=blob:none "file://$(pwd)/server" client &&
- GIT_TRACE_PACKET="$(pwd)/trace" git -C client read-tree $TREE &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client read-tree $TREE $TREE &&
# "done" marks the end of negotiation (once per fetch). Expect that
# only one fetch occurs.
diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index 4844922e57..801919009e 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -162,6 +162,19 @@ init_repos () {
git -C sparse-index sparse-checkout set deep
}
+init_repos_as_submodules () {
+ git reset --hard &&
+ init_repos &&
+ git submodule add ./full-checkout &&
+ git submodule add ./sparse-checkout &&
+ git submodule add ./sparse-index &&
+
+ git submodule status >actual &&
+ grep full-checkout actual &&
+ grep sparse-checkout actual &&
+ grep sparse-index actual
+}
+
run_on_sparse () {
(
cd sparse-checkout &&
@@ -1983,4 +1996,63 @@ test_expect_success 'sparse index is not expanded: rm' '
ensure_not_expanded rm -r deep
'
+test_expect_success 'grep with and --cached' '
+ init_repos &&
+
+ test_all_match git grep --cached a &&
+ test_all_match git grep --cached a -- "folder1/*"
+'
+
+test_expect_success 'grep is not expanded' '
+ init_repos &&
+
+ ensure_not_expanded grep a &&
+ ensure_not_expanded grep a -- deep/* &&
+
+ # All files within the folder1/* pathspec are sparse,
+ # so this command does not find any matches
+ ensure_not_expanded ! grep a -- folder1/* &&
+
+ # test out-of-cone pathspec with or without wildcard
+ ensure_not_expanded grep --cached a -- "folder1/a" &&
+ ensure_not_expanded grep --cached a -- "folder1/*" &&
+
+ # test in-cone pathspec with or without wildcard
+ ensure_not_expanded grep --cached a -- "deep/a" &&
+ ensure_not_expanded grep --cached a -- "deep/*"
+'
+
+# NEEDSWORK: when running `grep` in the superproject with --recurse-submodules,
+# Git expands the index of the submodules unexpectedly. Even though `grep`
+# builtin is marked as "command_requires_full_index = 0", this config is only
+# useful for the superproject. Namely, the submodules have their own configs,
+# which are _not_ populated by the one-time sparse-index feature switch.
+test_expect_failure 'grep within submodules is not expanded' '
+ init_repos_as_submodules &&
+
+ # do not use ensure_not_expanded() here, becasue `grep` should be
+ # run in the superproject, not in "./sparse-index"
+ GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \
+ git grep --cached --recurse-submodules a -- "*/folder1/*" &&
+ test_region ! index ensure_full_index trace2.txt
+'
+
+# NEEDSWORK: this test is not actually testing the code. The design purpose
+# of this test is to verify the grep result when the submodules are using a
+# sparse-index. Namely, we want "folder1/" as a tree (a sparse directory); but
+# because of the index expansion, we are now grepping the "folder1/a" blob.
+# Because of the problem stated above 'grep within submodules is not expanded',
+# we don't have the ideal test environment yet.
+test_expect_success 'grep sparse directory within submodules' '
+ init_repos_as_submodules &&
+
+ cat >expect <<-\EOF &&
+ full-checkout/folder1/a:a
+ sparse-checkout/folder1/a:a
+ sparse-index/folder1/a:a
+ EOF
+ git grep --cached --recurse-submodules a -- "*/folder1/*" >actual &&
+ test_cmp actual expect
+'
+
test_done
diff --git a/t/t1304-default-acl.sh b/t/t1304-default-acl.sh
index 335d3f3211..c69ae41306 100755
--- a/t/t1304-default-acl.sh
+++ b/t/t1304-default-acl.sh
@@ -18,7 +18,7 @@ test_expect_success 'checking for a working acl setup' '
if setfacl -m d:m:rwx -m u:root:rwx . &&
getfacl . | grep user:root:rwx &&
touch should-have-readable-acl &&
- getfacl should-have-readable-acl | egrep "mask::?rw-"
+ getfacl should-have-readable-acl | grep -E "mask::?rw-"
then
test_set_prereq SETFACL
fi
@@ -34,7 +34,7 @@ check_perms_and_acl () {
getfacl "$1" > actual &&
grep -q "user:root:rwx" actual &&
grep -q "user:${LOGNAME}:rwx" actual &&
- egrep "mask::?r--" actual > /dev/null 2>&1 &&
+ grep -E "mask::?r--" actual > /dev/null 2>&1 &&
grep -q "group::---" actual || false
}
diff --git a/t/t1401-symbolic-ref.sh b/t/t1401-symbolic-ref.sh
index 0c204089b8..d708acdb81 100755
--- a/t/t1401-symbolic-ref.sh
+++ b/t/t1401-symbolic-ref.sh
@@ -175,4 +175,18 @@ test_expect_success 'symbolic-ref allows top-level target for non-HEAD' '
test_cmp_rev top-level HEAD
'
+test_expect_success 'symbolic-ref pointing at another' '
+ git update-ref refs/heads/maint-2.37 HEAD &&
+ git symbolic-ref refs/heads/maint refs/heads/maint-2.37 &&
+ git checkout maint &&
+
+ git symbolic-ref HEAD >actual &&
+ echo refs/heads/maint-2.37 >expect &&
+ test_cmp expect actual &&
+
+ git symbolic-ref --no-recurse HEAD >actual &&
+ echo refs/heads/maint >expect &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t1404-update-ref-errors.sh b/t/t1404-update-ref-errors.sh
index 13c2b43bba..b5606d93b5 100755
--- a/t/t1404-update-ref-errors.sh
+++ b/t/t1404-update-ref-errors.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='Test git update-ref error handling'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Create some references, perhaps run pack-refs --all, then try to
diff --git a/t/t1409-avoid-packing-refs.sh b/t/t1409-avoid-packing-refs.sh
index be12fb6350..74dedd57e9 100755
--- a/t/t1409-avoid-packing-refs.sh
+++ b/t/t1409-avoid-packing-refs.sh
@@ -2,19 +2,36 @@
test_description='avoid rewriting packed-refs unnecessarily'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Add an identifying mark to the packed-refs file header line. This
# shouldn't upset readers, and it should be omitted if the file is
# ever rewritten.
mark_packed_refs () {
- sed -e "s/^\(#.*\)/\1 t1409 /" .git/packed-refs >.git/packed-refs.new &&
- mv .git/packed-refs.new .git/packed-refs
+ if test "$GIT_TEST_PACKED_REFS_VERSION" = "2"
+ then
+ size=$(wc -c < .git/packed-refs) &&
+ pos=$(expr $size - 4) &&
+ printf "FAKE" | dd of=".git/packed-refs" bs=1 seek="$pos" conv=notrunc
+ else
+ sed -e "s/^\(#.*\)/\1 t1409 /" .git/packed-refs >.git/packed-refs.new &&
+ mv .git/packed-refs.new .git/packed-refs
+ fi
}
# Verify that the packed-refs file is still marked.
check_packed_refs_marked () {
- grep -q '^#.* t1409 ' .git/packed-refs
+ if test "$GIT_TEST_PACKED_REFS_VERSION" = "2"
+ then
+ size=$(wc -c < .git/packed-refs) &&
+ pos=$(expr $size - 4) &&
+ tail -c 4 .git/packed-refs >actual &&
+ printf "FAKE" >expect &&
+ test_cmp expect actual
+ else
+ grep -q '^#.* t1409 ' .git/packed-refs
+ fi
}
test_expect_success 'setup' '
diff --git a/t/t1413-reflog-detach.sh b/t/t1413-reflog-detach.sh
index 934688a1ee..d2a4822d46 100755
--- a/t/t1413-reflog-detach.sh
+++ b/t/t1413-reflog-detach.sh
@@ -4,6 +4,7 @@ test_description='Test reflog interaction with detached HEAD'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
reset_state () {
diff --git a/t/t1501-work-tree.sh b/t/t1501-work-tree.sh
index b75558040f..ae6528aece 100755
--- a/t/t1501-work-tree.sh
+++ b/t/t1501-work-tree.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test separate work tree'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t1600-index.sh b/t/t1600-index.sh
index 010989f90e..24ab90ca04 100755
--- a/t/t1600-index.sh
+++ b/t/t1600-index.sh
@@ -103,4 +103,12 @@ test_expect_success 'index version config precedence' '
test_index_version 0 true 2 2
'
+test_expect_success 'index.computeHash config option' '
+ (
+ rm -f .git/index &&
+ git -c index.computeHash=false add a &&
+ git fsck
+ )
+'
+
test_done
diff --git a/t/t1800-hook.sh b/t/t1800-hook.sh
index 43fcb7c0bf..2ef3579fa7 100755
--- a/t/t1800-hook.sh
+++ b/t/t1800-hook.sh
@@ -95,7 +95,7 @@ test_expect_success 'git hook run -- out-of-repo runs excluded' '
test_expect_success 'git -c core.hooksPath=<PATH> hook run' '
mkdir my-hooks &&
write_script my-hooks/test-hook <<-\EOF &&
- echo Hook ran $1 >>actual
+ echo Hook ran $1
EOF
cat >expect <<-\EOF &&
diff --git a/t/t2012-checkout-last.sh b/t/t2012-checkout-last.sh
index 1f6c4ed042..4b6372f4c3 100755
--- a/t/t2012-checkout-last.sh
+++ b/t/t2012-checkout-last.sh
@@ -5,6 +5,7 @@ test_description='checkout can switch to last branch and merge base'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t2018-checkout-branch.sh b/t/t2018-checkout-branch.sh
index 771c3c3c50..8581ad3437 100755
--- a/t/t2018-checkout-branch.sh
+++ b/t/t2018-checkout-branch.sh
@@ -3,6 +3,7 @@
test_description='checkout'
TEST_CREATE_REPO_NO_TEMPLATE=1
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Arguments: [!] <branch> <oid> [<checkout options>]
diff --git a/t/t2025-checkout-no-overlay.sh b/t/t2025-checkout-no-overlay.sh
index 8f13341cf8..3832c3de81 100755
--- a/t/t2025-checkout-no-overlay.sh
+++ b/t/t2025-checkout-no-overlay.sh
@@ -2,6 +2,7 @@
test_description='checkout --no-overlay <tree-ish> -- <pathspec>'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t2400-worktree-add.sh b/t/t2400-worktree-add.sh
index d587e0b20d..93c340f4af 100755
--- a/t/t2400-worktree-add.sh
+++ b/t/t2400-worktree-add.sh
@@ -310,6 +310,26 @@ test_expect_success '"add" -B/--detach mutually exclusive' '
test_must_fail git worktree add -B poodle --detach bamboo main
'
+test_expect_success '"add" --orphan/-b mutually exclusive' '
+ test_must_fail git worktree add --orphan poodle -b poodle bamboo
+'
+
+test_expect_success '"add" --orphan/-B mutually exclusive' '
+ test_must_fail git worktree add --orphan poodle -B poodle bamboo
+'
+
+test_expect_success '"add" --orphan/--detach mutually exclusive' '
+ test_must_fail git worktree add --orphan poodle --detach bamboo
+'
+
+test_expect_success '"add" --orphan/--no-checkout mutually exclusive' '
+ test_must_fail git worktree add --orphan poodle --no-checkout bamboo
+'
+
+test_expect_success '"add" -B/--detach mutually exclusive' '
+ test_must_fail git worktree add -B poodle --detach bamboo main
+'
+
test_expect_success '"add -B" fails if the branch is checked out' '
git rev-parse newmain >before &&
test_must_fail git worktree add -B newmain bamboo main &&
@@ -330,6 +350,31 @@ test_expect_success 'add --quiet' '
test_must_be_empty actual
'
+test_expect_success '"add --orphan"' '
+ test_when_finished "git worktree remove -f -f orphandir" &&
+ git worktree add --orphan neworphan orphandir &&
+ echo refs/heads/neworphan >expected &&
+ git -C orphandir symbolic-ref HEAD >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success '"add --orphan" fails if the branch already exists' '
+ test_when_finished "git branch -D existingbranch" &&
+ test_when_finished "git worktree remove -f -f orphandir" &&
+ git worktree add -b existingbranch orphandir main &&
+ test_must_fail git worktree add --orphan existingbranch orphandir2 &&
+ test ! -d orphandir2
+'
+
+test_expect_success '"add --orphan" with empty repository' '
+ test_when_finished "rm -rf empty_repo" &&
+ echo refs/heads/newbranch >expected &&
+ GIT_DIR="empty_repo" git init --bare &&
+ git -C empty_repo worktree add --orphan newbranch worktreedir &&
+ git -C empty_repo/worktreedir symbolic-ref HEAD >actual &&
+ test_cmp expected actual
+'
+
test_expect_success 'local clone from linked checkout' '
git clone --local here here-clone &&
( cd here-clone && git fsck )
diff --git a/t/t3009-ls-files-others-nonsubmodule.sh b/t/t3009-ls-files-others-nonsubmodule.sh
index 963f3462b7..14218b3424 100755
--- a/t/t3009-ls-files-others-nonsubmodule.sh
+++ b/t/t3009-ls-files-others-nonsubmodule.sh
@@ -18,6 +18,7 @@ This test runs git ls-files --others with the following working tree:
git repository with a commit and an untracked file
'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup: directories' '
diff --git a/t/t3010-ls-files-killed-modified.sh b/t/t3010-ls-files-killed-modified.sh
index 580e158f99..054178703d 100755
--- a/t/t3010-ls-files-killed-modified.sh
+++ b/t/t3010-ls-files-killed-modified.sh
@@ -41,6 +41,8 @@ Also for modification test, the cache and working tree have:
We should report path0, path1, path2/file2, path3/file3, path7 and path8
modified without reporting path9 and path10. submod1 is also modified.
'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'git update-index --add to add various paths.' '
diff --git a/t/t3050-subprojects-fetch.sh b/t/t3050-subprojects-fetch.sh
index f1f09abdd9..3884694165 100755
--- a/t/t3050-subprojects-fetch.sh
+++ b/t/t3050-subprojects-fetch.sh
@@ -2,6 +2,7 @@
test_description='fetching and pushing project with subproject'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
diff --git a/t/t3060-ls-files-with-tree.sh b/t/t3060-ls-files-with-tree.sh
index 52f76f7b57..c4a72ae446 100755
--- a/t/t3060-ls-files-with-tree.sh
+++ b/t/t3060-ls-files-with-tree.sh
@@ -8,6 +8,8 @@ test_description='git ls-files test (--with-tree).
This test runs git ls-files --with-tree and in particular in
a scenario known to trigger a crash with some versions of git.
'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t3200-branch.sh b/t/t3200-branch.sh
index 88b9c56e5c..5a169b68d6 100755
--- a/t/t3200-branch.sh
+++ b/t/t3200-branch.sh
@@ -201,8 +201,8 @@ test_expect_success 'git branch -M baz bam should succeed when baz is checked ou
test_expect_success 'git branch -M baz bam should add entries to .git/logs/HEAD' '
msg="Branch: renamed refs/heads/baz to refs/heads/bam" &&
- grep " 0\{40\}.*$msg$" .git/logs/HEAD &&
- grep "^0\{40\}.*$msg$" .git/logs/HEAD
+ grep " $ZERO_OID.*$msg$" .git/logs/HEAD &&
+ grep "^$ZERO_OID.*$msg$" .git/logs/HEAD
'
test_expect_success 'git branch -M should leave orphaned HEAD alone' '
@@ -268,6 +268,53 @@ test_expect_success 'git branch -M topic topic should work when main is checked
git branch -M topic topic
'
+test_expect_success 'git branch -M and -C fail on detached HEAD' '
+ git checkout HEAD^{} &&
+ test_when_finished git checkout - &&
+ echo "fatal: cannot rename the current branch while not on any." >expect &&
+ test_must_fail git branch -M must-fail 2>err &&
+ test_cmp expect err &&
+ echo "fatal: cannot copy the current branch while not on any." >expect &&
+ test_must_fail git branch -C must-fail 2>err &&
+ test_cmp expect err
+'
+
+test_expect_success 'git branch -d on orphan HEAD (merged)' '
+ test_when_finished git checkout main &&
+ git checkout --orphan orphan &&
+ test_when_finished "rm -rf .git/objects/commit-graph*" &&
+ git commit-graph write --reachable &&
+ git branch --track to-delete main &&
+ git branch -d to-delete
+'
+
+test_expect_success 'git branch -d on orphan HEAD (merged, graph)' '
+ test_when_finished git checkout main &&
+ git checkout --orphan orphan &&
+ git branch --track to-delete main &&
+ git branch -d to-delete
+'
+
+test_expect_success 'git branch -d on orphan HEAD (unmerged)' '
+ test_when_finished git checkout main &&
+ git checkout --orphan orphan &&
+ test_when_finished "git branch -D to-delete" &&
+ git branch to-delete main &&
+ test_must_fail git branch -d to-delete 2>err &&
+ grep "not fully merged" err
+'
+
+test_expect_success 'git branch -d on orphan HEAD (unmerged, graph)' '
+ test_when_finished git checkout main &&
+ git checkout --orphan orphan &&
+ test_when_finished "git branch -D to-delete" &&
+ git branch to-delete main &&
+ test_when_finished "rm -rf .git/objects/commit-graph*" &&
+ git commit-graph write --reachable &&
+ test_must_fail git branch -d to-delete 2>err &&
+ grep "not fully merged" err
+'
+
test_expect_success 'git branch -v -d t should work' '
git branch t &&
git rev-parse --verify refs/heads/t &&
@@ -1382,6 +1429,9 @@ test_expect_success 'branch --delete --force removes dangling branch' '
'
test_expect_success 'use --edit-description' '
+ EDITOR=: git branch --edit-description &&
+ test_expect_code 1 git config branch.main.description &&
+
write_script editor <<-\EOF &&
echo "New contents" >"$1"
EOF
diff --git a/t/t3202-show-branch.sh b/t/t3202-show-branch.sh
index f2b9199007..ea7cfd1951 100755
--- a/t/t3202-show-branch.sh
+++ b/t/t3202-show-branch.sh
@@ -7,6 +7,28 @@ test_description='test show-branch'
# arbitrary reference time: 2009-08-30 19:20:00
GIT_TEST_DATE_NOW=1251660000; export GIT_TEST_DATE_NOW
+test_expect_success 'error descriptions on empty repository' '
+ current=$(git branch --show-current) &&
+ cat >expect <<-EOF &&
+ error: No commit on branch '\''$current'\'' yet.
+ EOF
+ test_must_fail git branch --edit-description 2>actual &&
+ test_cmp expect actual &&
+ test_must_fail git branch --edit-description $current 2>actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'fatal descriptions on empty repository' '
+ current=$(git branch --show-current) &&
+ cat >expect <<-EOF &&
+ fatal: No commit on branch '\''$current'\'' yet.
+ EOF
+ test_must_fail git branch --set-upstream-to=non-existent 2>actual &&
+ test_cmp expect actual &&
+ test_must_fail git branch -c new-branch 2>actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'setup' '
test_commit initial &&
for i in $(test_seq 1 10)
@@ -175,4 +197,28 @@ done <<\EOF
--reflog --current
EOF
+test_expect_success 'error descriptions on non-existent branch' '
+ cat >expect <<-EOF &&
+ error: No branch named '\''non-existent'\'.'
+ EOF
+ test_must_fail git branch --edit-description non-existent 2>actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'fatal descriptions on non-existent branch' '
+ cat >expect <<-EOF &&
+ fatal: branch '\''non-existent'\'' does not exist
+ EOF
+ test_must_fail git branch --set-upstream-to=non-existent non-existent 2>actual &&
+ test_cmp expect actual &&
+
+ cat >expect <<-EOF &&
+ fatal: No branch named '\''non-existent'\''.
+ EOF
+ test_must_fail git branch -c non-existent new-branch 2>actual &&
+ test_cmp expect actual &&
+ test_must_fail git branch -m non-existent new-branch 2>actual &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t3204-branch-name-interpretation.sh b/t/t3204-branch-name-interpretation.sh
index 993a6b5eff..793bf4d269 100755
--- a/t/t3204-branch-name-interpretation.sh
+++ b/t/t3204-branch-name-interpretation.sh
@@ -133,4 +133,28 @@ test_expect_success 'checkout does not treat remote @{upstream} as a branch' '
expect_branch HEAD one
'
+test_expect_success 'edit-description via @{-1}' '
+ git checkout -b desc-branch &&
+ git checkout -b non-desc-branch &&
+ write_script editor <<-\EOF &&
+ echo "Branch description" >"$1"
+ EOF
+ EDITOR=./editor git branch --edit-description @{-1} &&
+ test_must_fail git config branch.non-desc-branch.description &&
+ git config branch.desc-branch.description >actual &&
+ printf "Branch description\n\n" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'modify branch upstream via "@{-1}" and "@{-1}@{upstream}"' '
+ git checkout -b upstream-branch &&
+ git checkout -b upstream-other -t upstream-branch &&
+ git branch --set-upstream-to upstream-other @{-1} &&
+ git config branch.upstream-branch.merge >actual &&
+ echo "refs/heads/upstream-other" >expect &&
+ test_cmp expect actual &&
+ git branch --unset-upstream @{-1}@{upstream} &&
+ test_must_fail git config branch.upstream-other.merge
+'
+
test_done
diff --git a/t/t3210-pack-refs.sh b/t/t3210-pack-refs.sh
index 577f32dc71..76251dfe05 100755
--- a/t/t3210-pack-refs.sh
+++ b/t/t3210-pack-refs.sh
@@ -159,7 +159,7 @@ test_expect_success 'delete ref while another dangling packed ref' '
test_expect_success 'pack ref directly below refs/' '
git update-ref refs/top HEAD &&
git pack-refs --all --prune &&
- grep refs/top .git/packed-refs &&
+ git rev-parse refs/top &&
test_path_is_missing .git/refs/top
'
@@ -197,7 +197,7 @@ test_expect_success 'notice d/f conflict with existing ref' '
test_must_fail git branch foo/bar/baz/lots/of/extra/components
'
-test_expect_success 'reject packed-refs with unterminated line' '
+test_expect_success PACKED_REFS_V1 'reject packed-refs with unterminated line' '
cp .git/packed-refs .git/packed-refs.bak &&
test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
printf "%s" "$HEAD refs/zzzzz" >>.git/packed-refs &&
@@ -206,7 +206,7 @@ test_expect_success 'reject packed-refs with unterminated line' '
test_cmp expected_err err
'
-test_expect_success 'reject packed-refs containing junk' '
+test_expect_success PACKED_REFS_V1 'reject packed-refs containing junk' '
cp .git/packed-refs .git/packed-refs.bak &&
test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
printf "%s\n" "bogus content" >>.git/packed-refs &&
@@ -215,7 +215,7 @@ test_expect_success 'reject packed-refs containing junk' '
test_cmp expected_err err
'
-test_expect_success 'reject packed-refs with a short SHA-1' '
+test_expect_success PACKED_REFS_V1 'reject packed-refs with a short SHA-1' '
cp .git/packed-refs .git/packed-refs.bak &&
test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
printf "%.7s %s\n" $HEAD refs/zzzzz >>.git/packed-refs &&
diff --git a/t/t3212-ref-formats.sh b/t/t3212-ref-formats.sh
new file mode 100755
index 0000000000..5583f16db4
--- /dev/null
+++ b/t/t3212-ref-formats.sh
@@ -0,0 +1,100 @@
+#!/bin/sh
+
+test_description='test across ref formats'
+
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
+. ./test-lib.sh
+
+test_expect_success 'extensions.refFormat requires core.repositoryFormatVersion=1' '
+ test_when_finished rm -rf broken &&
+
+ # Force sha1 to ensure GIT_TEST_DEFAULT_HASH does
+ # not imply a value of core.repositoryFormatVersion.
+ git init --object-format=sha1 broken &&
+ git -C broken config extensions.refFormat files &&
+ test_must_fail git -C broken status 2>err &&
+ grep "repo version is 0, but v1-only extension found" err
+'
+
+test_expect_success 'invalid extensions.refFormat' '
+ test_when_finished rm -rf broken &&
+ git init broken &&
+ git -C broken config core.repositoryFormatVersion 1 &&
+ git -C broken config extensions.refFormat bogus &&
+ test_must_fail git -C broken status 2>err &&
+ grep "invalid value for '\''extensions.refFormat'\'': '\''bogus'\''" err
+'
+
+test_expect_success 'extensions.refFormat=packed only' '
+ git init only-packed &&
+ (
+ cd only-packed &&
+ git config core.repositoryFormatVersion 1 &&
+ git config extensions.refFormat packed &&
+ test_commit A &&
+ test_path_exists .git/packed-refs &&
+ test_path_is_missing .git/refs/tags/A
+ )
+'
+
+test_expect_success 'extensions.refFormat=files only' '
+ test_commit T &&
+ git pack-refs --all &&
+ git init only-loose &&
+ (
+ cd only-loose &&
+ git config core.repositoryFormatVersion 1 &&
+ git config extensions.refFormat files &&
+ test_commit A &&
+ test_commit B &&
+ test_must_fail git pack-refs 2>err &&
+ grep "refusing to create" err &&
+ test_path_is_missing .git/packed-refs &&
+
+ # Refuse to parse a packed-refs file.
+ cp ../.git/packed-refs .git/packed-refs &&
+ test_must_fail git rev-parse refs/tags/T
+ )
+'
+
+test_expect_success 'extensions.refFormat=files,packed-v2' '
+ test_commit Q &&
+ git pack-refs --all &&
+ git init no-packed-v1 &&
+ (
+ cd no-packed-v1 &&
+ git config core.repositoryFormatVersion 1 &&
+ git config extensions.refFormat files &&
+ git config --add extensions.refFormat packed-v2 &&
+ test_commit A &&
+ test_commit B &&
+
+ # Refuse to parse a v1 packed-refs file.
+ cp ../.git/packed-refs .git/packed-refs &&
+ test_must_fail git rev-parse refs/tags/Q &&
+ rm -f .git/packed-refs &&
+
+ git for-each-ref --format="%(refname) %(objectname)" >expect-all &&
+ git for-each-ref --format="%(refname) %(objectname)" \
+ refs/tags/* >expect-tags &&
+
+ # Create a v2 packed-refs file
+ git pack-refs --all &&
+ test_path_exists .git/packed-refs &&
+ for t in A B
+ do
+ test_path_is_missing .git/refs/tags/$t &&
+ git rev-parse refs/tags/$t || return 1
+ done &&
+
+ git for-each-ref --format="%(refname) %(objectname)" >actual-all &&
+ test_cmp expect-all actual-all &&
+ git for-each-ref --format="%(refname) %(objectname)" \
+ refs/tags/* >actual-tags &&
+ test_cmp expect-tags actual-tags
+ )
+'
+
+test_done
diff --git a/t/t3301-notes.sh b/t/t3301-notes.sh
index 3288aaec7d..dedad93a2f 100755
--- a/t/t3301-notes.sh
+++ b/t/t3301-notes.sh
@@ -521,12 +521,25 @@ test_expect_success 'listing non-existing notes fails' '
test_must_be_empty actual
'
+test_expect_success 'append to existing note without a beginning blank line' '
+ test_when_finished git notes remove HEAD &&
+ cat >expect <<-\EOF &&
+ Initial set of notes
+ Appended notes
+ EOF
+ git notes add -m "Initial set of notes" &&
+ git notes append --no-blank-line -m "Appended notes" &&
+ git notes show >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'append to existing note with "git notes append"' '
cat >expect <<-EOF &&
Initial set of notes
More notes appended with git notes append
EOF
+
git notes add -m "Initial set of notes" &&
git notes append -m "More notes appended with git notes append" &&
git notes show >actual &&
@@ -552,6 +565,7 @@ test_expect_success 'appending empty string does not change existing note' '
'
test_expect_success 'git notes append == add when there is no existing note' '
+ test_when_finished git notes remove HEAD &&
git notes remove HEAD &&
test_must_fail git notes list HEAD &&
git notes append -m "Initial set of notes${LF}${LF}More notes appended with git notes append" &&
@@ -560,9 +574,9 @@ test_expect_success 'git notes append == add when there is no existing note' '
'
test_expect_success 'appending empty string to non-existing note does not create note' '
- git notes remove HEAD &&
test_must_fail git notes list HEAD &&
- git notes append -m "" &&
+ git notes append -m "" >output 2>&1 &&
+ grep "Both original and appended notes are empty" output &&
test_must_fail git notes list HEAD
'
diff --git a/t/t3305-notes-fanout.sh b/t/t3305-notes-fanout.sh
index 22ffe5bcb9..1ec1fb6715 100755
--- a/t/t3305-notes-fanout.sh
+++ b/t/t3305-notes-fanout.sh
@@ -9,7 +9,7 @@ path_has_fanout() {
path=$1 &&
fanout=$2 &&
after_last_slash=$(($(test_oid hexsz) - $fanout * 2)) &&
- echo $path | grep -q "^\([0-9a-f]\{2\}/\)\{$fanout\}[0-9a-f]\{$after_last_slash\}$"
+ echo $path | grep -q -E "^([0-9a-f]{2}/){$fanout}[0-9a-f]{$after_last_slash}$"
}
touched_one_note_with_fanout() {
diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh
index 688b01e3eb..462cefd25d 100755
--- a/t/t3404-rebase-interactive.sh
+++ b/t/t3404-rebase-interactive.sh
@@ -1244,9 +1244,9 @@ test_expect_success 'short commit ID collide' '
test $colliding_id = "$(git rev-parse HEAD | cut -c 1-4)" &&
grep "^pick $colliding_id " \
.git/rebase-merge/git-rebase-todo.tmp &&
- grep "^pick [0-9a-f]\{$hexsz\}" \
+ grep -E "^pick [0-9a-f]{$hexsz}" \
.git/rebase-merge/git-rebase-todo &&
- grep "^pick [0-9a-f]\{$hexsz\}" \
+ grep -E "^pick [0-9a-f]{$hexsz}" \
.git/rebase-merge/git-rebase-todo.backup &&
git rebase --continue
) &&
@@ -1261,7 +1261,7 @@ test_expect_success 'respect core.abbrev' '
set_cat_todo_editor &&
test_must_fail git rebase -i HEAD~4 >todo-list
) &&
- test 4 = $(grep -c "pick [0-9a-f]\{12,\}" todo-list)
+ test 4 = $(grep -c -E "pick [0-9a-f]{12,}" todo-list)
'
test_expect_success 'todo count' '
@@ -1964,6 +1964,113 @@ test_expect_success 'respect user edits to update-ref steps' '
test_cmp_rev HEAD refs/heads/no-conflict-branch
'
+test_expect_success '--update-refs: all update-ref lines removed' '
+ git checkout -b test-refs-not-removed no-conflict-branch &&
+ git branch -f base HEAD~4 &&
+ git branch -f first HEAD~3 &&
+ git branch -f second HEAD~3 &&
+ git branch -f third HEAD~1 &&
+ git branch -f tip &&
+
+ test_commit test-refs-not-removed &&
+ git commit --amend --fixup first &&
+
+ git rev-parse first second third tip no-conflict-branch >expect-oids &&
+
+ (
+ set_cat_todo_editor &&
+ test_must_fail git rebase -i --update-refs base >todo.raw &&
+ sed -e "/^update-ref/d" <todo.raw >todo
+ ) &&
+ (
+ set_replace_editor todo &&
+ git rebase -i --update-refs base
+ ) &&
+
+ # Ensure refs are not deleted and their OIDs have not changed
+ git rev-parse first second third tip no-conflict-branch >actual-oids &&
+ test_cmp expect-oids actual-oids
+'
+
+test_expect_success '--update-refs: all update-ref lines removed, then some re-added' '
+ git checkout -b test-refs-not-removed2 no-conflict-branch &&
+ git branch -f base HEAD~4 &&
+ git branch -f first HEAD~3 &&
+ git branch -f second HEAD~3 &&
+ git branch -f third HEAD~1 &&
+ git branch -f tip &&
+
+ test_commit test-refs-not-removed2 &&
+ git commit --amend --fixup first &&
+
+ git rev-parse first second third >expect-oids &&
+
+ (
+ set_cat_todo_editor &&
+ test_must_fail git rebase -i \
+ --autosquash --update-refs \
+ base >todo.raw &&
+ sed -e "/^update-ref/d" <todo.raw >todo
+ ) &&
+
+ # Add a break to the end of the todo so we can edit later
+ echo "break" >>todo &&
+
+ (
+ set_replace_editor todo &&
+ git rebase -i --autosquash --update-refs base &&
+ echo "update-ref refs/heads/tip" >todo &&
+ git rebase --edit-todo &&
+ git rebase --continue
+ ) &&
+
+ # Ensure first/second/third are unchanged, but tip is updated
+ git rev-parse first second third >actual-oids &&
+ test_cmp expect-oids actual-oids &&
+ test_cmp_rev HEAD tip
+'
+
+test_expect_success '--update-refs: --edit-todo with no update-ref lines' '
+ git checkout -b test-refs-not-removed3 no-conflict-branch &&
+ git branch -f base HEAD~4 &&
+ git branch -f first HEAD~3 &&
+ git branch -f second HEAD~3 &&
+ git branch -f third HEAD~1 &&
+ git branch -f tip &&
+
+ test_commit test-refs-not-removed3 &&
+ git commit --amend --fixup first &&
+
+ git rev-parse first second third tip no-conflict-branch >expect-oids &&
+
+ (
+ set_cat_todo_editor &&
+ test_must_fail git rebase -i \
+ --autosquash --update-refs \
+ base >todo.raw &&
+ sed -e "/^update-ref/d" <todo.raw >todo
+ ) &&
+
+ # Add a break to the beginning of the todo so we can resume with no
+ # update-ref lines
+ echo "break" >todo.new &&
+ cat todo >>todo.new &&
+
+ (
+ set_replace_editor todo.new &&
+ git rebase -i --autosquash --update-refs base &&
+
+ # Make no changes when editing so update-refs is still empty
+ cat todo >todo.new &&
+ git rebase --edit-todo &&
+ git rebase --continue
+ ) &&
+
+ # Ensure refs are not deleted and their OIDs have not changed
+ git rev-parse first second third tip no-conflict-branch >actual-oids &&
+ test_cmp expect-oids actual-oids
+'
+
test_expect_success '--update-refs: check failed ref update' '
git checkout -B update-refs-error no-conflict-branch &&
git branch -f base HEAD~4 &&
diff --git a/t/t3406-rebase-message.sh b/t/t3406-rebase-message.sh
index d17b450e81..ceca160005 100755
--- a/t/t3406-rebase-message.sh
+++ b/t/t3406-rebase-message.sh
@@ -10,10 +10,16 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
test_expect_success 'setup' '
test_commit O fileO &&
test_commit X fileX &&
+ git branch fast-forward &&
test_commit A fileA &&
test_commit B fileB &&
test_commit Y fileY &&
+ git checkout -b conflicts O &&
+ test_commit P &&
+ test_commit conflict-X fileX &&
+ test_commit Q &&
+
git checkout -b topic O &&
git cherry-pick A B &&
test_commit Z fileZ &&
@@ -79,54 +85,165 @@ test_expect_success 'error out early upon -C<n> or --whitespace=<bad>' '
test_i18ngrep "Invalid whitespace option" err
'
-test_expect_success 'GIT_REFLOG_ACTION' '
- git checkout start &&
- test_commit reflog-onto &&
- git checkout -b reflog-topic start &&
- test_commit reflog-to-rebase &&
+write_reflog_expect () {
+ if test $mode = --apply
+ then
+ sed 's/(continue)/(pick)/'
+ else
+ cat
+ fi >expect
+}
- git rebase reflog-onto &&
- git log -g --format=%gs -3 >actual &&
- cat >expect <<-\EOF &&
- rebase (finish): returning to refs/heads/reflog-topic
- rebase (pick): reflog-to-rebase
- rebase (start): checkout reflog-onto
+test_reflog () {
+ mode=$1
+ reflog_action="$2"
+
+ test_expect_success "rebase $mode reflog${reflog_action:+ GIT_REFLOG_ACTION=$reflog_action}" '
+ git checkout conflicts &&
+ test_when_finished "git reset --hard Q" &&
+
+ (
+ if test -n "$reflog_action"
+ then
+ GIT_REFLOG_ACTION="$reflog_action" &&
+ export GIT_REFLOG_ACTION
+ fi &&
+ test_must_fail git rebase $mode main &&
+ echo resolved >fileX &&
+ git add fileX &&
+ git rebase --continue
+ ) &&
+
+ git log -g --format=%gs -5 >actual &&
+ write_reflog_expect <<-EOF &&
+ ${reflog_action:-rebase} (finish): returning to refs/heads/conflicts
+ ${reflog_action:-rebase} (pick): Q
+ ${reflog_action:-rebase} (continue): conflict-X
+ ${reflog_action:-rebase} (pick): P
+ ${reflog_action:-rebase} (start): checkout main
EOF
test_cmp expect actual &&
- git checkout -b reflog-prefix reflog-to-rebase &&
- GIT_REFLOG_ACTION=change-the-reflog git rebase reflog-onto &&
- git log -g --format=%gs -3 >actual &&
- cat >expect <<-\EOF &&
- change-the-reflog (finish): returning to refs/heads/reflog-prefix
- change-the-reflog (pick): reflog-to-rebase
- change-the-reflog (start): checkout reflog-onto
+ git log -g --format=%gs -1 conflicts >actual &&
+ write_reflog_expect <<-EOF &&
+ ${reflog_action:-rebase} (finish): refs/heads/conflicts onto $(git rev-parse main)
+ EOF
+ test_cmp expect actual &&
+
+ # check there is only one new entry in the branch reflog
+ test_cmp_rev conflicts@{1} Q
+ '
+
+ test_expect_success "rebase $mode fast-forward reflog${reflog_action:+ GIT_REFLOG_ACTION=$reflog_action}" '
+ git checkout fast-forward &&
+ test_when_finished "git reset --hard X" &&
+
+ (
+ if test -n "$reflog_action"
+ then
+ GIT_REFLOG_ACTION="$reflog_action" &&
+ export GIT_REFLOG_ACTION
+ fi &&
+ git rebase $mode main
+ ) &&
+
+ git log -g --format=%gs -2 >actual &&
+ write_reflog_expect <<-EOF &&
+ ${reflog_action:-rebase} (finish): returning to refs/heads/fast-forward
+ ${reflog_action:-rebase} (start): checkout main
+ EOF
+ test_cmp expect actual &&
+
+ git log -g --format=%gs -1 fast-forward >actual &&
+ write_reflog_expect <<-EOF &&
+ ${reflog_action:-rebase} (finish): refs/heads/fast-forward onto $(git rev-parse main)
+ EOF
+ test_cmp expect actual &&
+
+ # check there is only one new entry in the branch reflog
+ test_cmp_rev fast-forward@{1} X
+ '
+
+ test_expect_success "rebase $mode --skip reflog${reflog_action:+ GIT_REFLOG_ACTION=$reflog_action}" '
+ git checkout conflicts &&
+ test_when_finished "git reset --hard Q" &&
+
+ (
+ if test -n "$reflog_action"
+ then
+ GIT_REFLOG_ACTION="$reflog_action" &&
+ export GIT_REFLOG_ACTION
+ fi &&
+ test_must_fail git rebase $mode main &&
+ git rebase --skip
+ ) &&
+
+ git log -g --format=%gs -4 >actual &&
+ write_reflog_expect <<-EOF &&
+ ${reflog_action:-rebase} (finish): returning to refs/heads/conflicts
+ ${reflog_action:-rebase} (pick): Q
+ ${reflog_action:-rebase} (pick): P
+ ${reflog_action:-rebase} (start): checkout main
EOF
test_cmp expect actual
-'
+ '
-test_expect_success 'rebase --apply reflog' '
- git checkout -b reflog-apply start &&
- old_head_reflog="$(git log -g --format=%gs -1 HEAD)" &&
+ test_expect_success "rebase $mode --abort reflog${reflog_action:+ GIT_REFLOG_ACTION=$reflog_action}" '
+ git checkout conflicts &&
+ test_when_finished "git reset --hard Q" &&
- git rebase --apply Y &&
+ git log -g -1 conflicts >branch-expect &&
+ (
+ if test -n "$reflog_action"
+ then
+ GIT_REFLOG_ACTION="$reflog_action" &&
+ export GIT_REFLOG_ACTION
+ fi &&
+ test_must_fail git rebase $mode main &&
+ git rebase --abort
+ ) &&
- git log -g --format=%gs -4 HEAD >actual &&
- cat >expect <<-EOF &&
- rebase finished: returning to refs/heads/reflog-apply
- rebase: Z
- rebase: checkout Y
- $old_head_reflog
+ git log -g --format=%gs -3 >actual &&
+ write_reflog_expect <<-EOF &&
+ ${reflog_action:-rebase} (abort): returning to refs/heads/conflicts
+ ${reflog_action:-rebase} (pick): P
+ ${reflog_action:-rebase} (start): checkout main
EOF
test_cmp expect actual &&
- git log -g --format=%gs -2 reflog-apply >actual &&
- cat >expect <<-EOF &&
- rebase finished: refs/heads/reflog-apply onto $(git rev-parse Y)
- branch: Created from start
+ # check branch reflog is unchanged
+ git log -g -1 conflicts >branch-actual &&
+ test_cmp branch-expect branch-actual
+ '
+
+ test_expect_success "rebase $mode --abort detached HEAD reflog${reflog_action:+ GIT_REFLOG_ACTION=$reflog_action}" '
+ git checkout Q &&
+ test_when_finished "git reset --hard Q" &&
+
+ (
+ if test -n "$reflog_action"
+ then
+ GIT_REFLOG_ACTION="$reflog_action" &&
+ export GIT_REFLOG_ACTION
+ fi &&
+ test_must_fail git rebase $mode main &&
+ git rebase --abort
+ ) &&
+
+ git log -g --format=%gs -3 >actual &&
+ write_reflog_expect <<-EOF &&
+ ${reflog_action:-rebase} (abort): returning to $(git rev-parse Q)
+ ${reflog_action:-rebase} (pick): P
+ ${reflog_action:-rebase} (start): checkout main
EOF
test_cmp expect actual
-'
+ '
+}
+
+test_reflog --merge
+test_reflog --merge my-reflog-action
+test_reflog --apply
+test_reflog --apply my-reflog-action
test_expect_success 'rebase -i onto unrelated history' '
git init unrelated &&
diff --git a/t/t3409-rebase-environ.sh b/t/t3409-rebase-environ.sh
index 83ffb39d9f..acaf5558db 100755
--- a/t/t3409-rebase-environ.sh
+++ b/t/t3409-rebase-environ.sh
@@ -2,6 +2,7 @@
test_description='git rebase interactive environment'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t3413-rebase-hook.sh b/t/t3413-rebase-hook.sh
index 9fab0d779b..e8456831e8 100755
--- a/t/t3413-rebase-hook.sh
+++ b/t/t3413-rebase-hook.sh
@@ -5,6 +5,7 @@ test_description='git rebase with its hook(s)'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
diff --git a/t/t3415-rebase-autosquash.sh b/t/t3415-rebase-autosquash.sh
index 78c27496d6..a364530d76 100755
--- a/t/t3415-rebase-autosquash.sh
+++ b/t/t3415-rebase-autosquash.sh
@@ -232,6 +232,19 @@ test_expect_success 'auto squash that matches longer sha1' '
test_line_count = 1 actual
'
+test_expect_success 'auto squash of fixup commit that matches branch name which points back to fixup commit' '
+ git reset --hard base &&
+ git commit --allow-empty -m "fixup! self-cycle" &&
+ git branch self-cycle &&
+ GIT_SEQUENCE_EDITOR="cat >tmp" git rebase --autosquash -i HEAD^^ &&
+ sed -ne "/^[^#]/{s/[0-9a-f]\{7,\}/HASH/g;p;}" tmp >actual &&
+ cat <<-EOF >expect &&
+ pick HASH second commit
+ pick HASH fixup! self-cycle # empty
+ EOF
+ test_cmp expect actual
+'
+
test_auto_commit_flags () {
git reset --hard base &&
echo 1 >file1 &&
diff --git a/t/t3416-rebase-onto-threedots.sh b/t/t3416-rebase-onto-threedots.sh
index 3e04802cb0..ea501f2b42 100755
--- a/t/t3416-rebase-onto-threedots.sh
+++ b/t/t3416-rebase-onto-threedots.sh
@@ -79,8 +79,10 @@ test_expect_success 'rebase -i --onto main...topic' '
git reset --hard &&
git checkout topic &&
git reset --hard G &&
- set_fake_editor &&
- EXPECT_COUNT=1 git rebase -i --onto main...topic F &&
+ (
+ set_fake_editor &&
+ EXPECT_COUNT=1 git rebase -i --onto main...topic F
+ ) &&
git rev-parse HEAD^1 >actual &&
git rev-parse C^0 >expect &&
test_cmp expect actual
@@ -90,20 +92,22 @@ test_expect_success 'rebase -i --onto main...' '
git reset --hard &&
git checkout topic &&
git reset --hard G &&
- set_fake_editor &&
- EXPECT_COUNT=1 git rebase -i --onto main... F &&
+ (
+ set_fake_editor &&
+ EXPECT_COUNT=1 git rebase -i --onto main... F
+ ) &&
git rev-parse HEAD^1 >actual &&
git rev-parse C^0 >expect &&
test_cmp expect actual
'
-test_expect_success 'rebase -i --onto main...side' '
+test_expect_success 'rebase --onto main...side requires a single merge-base' '
git reset --hard &&
git checkout side &&
git reset --hard K &&
- set_fake_editor &&
- test_must_fail git rebase -i --onto main...side J
+ test_must_fail git rebase -i --onto main...side J 2>err &&
+ grep "need exactly one merge base" err
'
test_expect_success 'rebase --keep-base --onto incompatible' '
@@ -156,8 +160,10 @@ test_expect_success 'rebase -i --keep-base main from topic' '
git checkout topic &&
git reset --hard G &&
- set_fake_editor &&
- EXPECT_COUNT=2 git rebase -i --keep-base main &&
+ (
+ set_fake_editor &&
+ EXPECT_COUNT=2 git rebase -i --keep-base main
+ ) &&
git rev-parse C >base.expect &&
git merge-base main HEAD >base.actual &&
test_cmp base.expect base.actual &&
@@ -171,8 +177,10 @@ test_expect_success 'rebase -i --keep-base main topic from main' '
git checkout main &&
git branch -f topic G &&
- set_fake_editor &&
- EXPECT_COUNT=2 git rebase -i --keep-base main topic &&
+ (
+ set_fake_editor &&
+ EXPECT_COUNT=2 git rebase -i --keep-base main topic
+ ) &&
git rev-parse C >base.expect &&
git merge-base main HEAD >base.actual &&
test_cmp base.expect base.actual &&
@@ -182,13 +190,39 @@ test_expect_success 'rebase -i --keep-base main topic from main' '
test_cmp expect actual
'
-test_expect_success 'rebase -i --keep-base main from side' '
+test_expect_success 'rebase --keep-base requires a single merge base' '
git reset --hard &&
git checkout side &&
git reset --hard K &&
- set_fake_editor &&
- test_must_fail git rebase -i --keep-base main
+ test_must_fail git rebase -i --keep-base main 2>err &&
+ grep "need exactly one merge base with branch" err
+'
+
+test_expect_success 'rebase --keep-base keeps cherry picks' '
+ git checkout -f -B main E &&
+ git cherry-pick F &&
+ (
+ set_fake_editor &&
+ EXPECT_COUNT=2 git rebase -i --keep-base HEAD G
+ ) &&
+ test_cmp_rev HEAD G
+'
+
+test_expect_success 'rebase --keep-base --no-reapply-cherry-picks' '
+ git checkout -f -B main E &&
+ git cherry-pick F &&
+ (
+ set_fake_editor &&
+ EXPECT_COUNT=1 git rebase -i --keep-base \
+ --no-reapply-cherry-picks HEAD G
+ ) &&
+ test_cmp_rev HEAD^ C
+'
+
+# This must be the last test in this file
+test_expect_success '$EDITOR and friends are unchanged' '
+ test_editor_unchanged
'
test_done
diff --git a/t/t3419-rebase-patch-id.sh b/t/t3419-rebase-patch-id.sh
index 295040f2fe..7181f176b8 100755
--- a/t/t3419-rebase-patch-id.sh
+++ b/t/t3419-rebase-patch-id.sh
@@ -43,15 +43,26 @@ test_expect_success 'setup: 500 lines' '
git add newfile &&
git commit -q -m "add small file" &&
- git cherry-pick main >/dev/null 2>&1
-'
+ git cherry-pick main >/dev/null 2>&1 &&
+
+ git branch -f squashed main &&
+ git checkout -q -f squashed &&
+ git reset -q --soft HEAD~2 &&
+ git commit -q -m squashed &&
+
+ git branch -f mode main &&
+ git checkout -q -f mode &&
+ test_chmod +x file &&
+ git commit -q -a --amend &&
-test_expect_success 'setup attributes' '
- echo "file binary" >.gitattributes
+ git branch -f modeother other &&
+ git checkout -q -f modeother &&
+ test_chmod +x file &&
+ git commit -q -a --amend
'
test_expect_success 'detect upstream patch' '
- git checkout -q main &&
+ git checkout -q main^{} &&
scramble file &&
git add file &&
git commit -q -m "change big file again" &&
@@ -61,14 +72,46 @@ test_expect_success 'detect upstream patch' '
test_must_be_empty revs
'
+test_expect_success 'detect upstream patch binary' '
+ echo "file binary" >.gitattributes &&
+ git checkout -q other^{} &&
+ git rebase main &&
+ git rev-list main...HEAD~ >revs &&
+ test_must_be_empty revs &&
+ test_when_finished "rm .gitattributes"
+'
+
+test_expect_success 'detect upstream patch modechange' '
+ git checkout -q modeother^{} &&
+ git rebase mode &&
+ git rev-list mode...HEAD~ >revs &&
+ test_must_be_empty revs
+'
+
test_expect_success 'do not drop patch' '
- git branch -f squashed main &&
- git checkout -q -f squashed &&
- git reset -q --soft HEAD~2 &&
- git commit -q -m squashed &&
git checkout -q other^{} &&
test_must_fail git rebase squashed &&
- git rebase --quit
+ test_when_finished "git rebase --abort"
+'
+
+test_expect_success 'do not drop patch binary' '
+ echo "file binary" >.gitattributes &&
+ git checkout -q other^{} &&
+ test_must_fail git rebase squashed &&
+ test_when_finished "git rebase --abort" &&
+ test_when_finished "rm .gitattributes"
+'
+
+test_expect_success 'do not drop patch modechange' '
+ git checkout -q modeother^{} &&
+ git rebase other &&
+ cat >expected <<-\EOF &&
+ diff --git a/file b/file
+ old mode 100644
+ new mode 100755
+ EOF
+ git diff HEAD~ >modediff &&
+ test_cmp expected modediff
'
test_done
diff --git a/t/t3428-rebase-signoff.sh b/t/t3428-rebase-signoff.sh
index f6993b7e14..e1b1e94764 100755
--- a/t/t3428-rebase-signoff.sh
+++ b/t/t3428-rebase-signoff.sh
@@ -5,6 +5,7 @@ test_description='git rebase --signoff
This test runs git rebase --signoff and make sure that it works.
'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# A simple file to commit
diff --git a/t/t3429-rebase-edit-todo.sh b/t/t3429-rebase-edit-todo.sh
index abd66f3602..8e0d03969a 100755
--- a/t/t3429-rebase-edit-todo.sh
+++ b/t/t3429-rebase-edit-todo.sh
@@ -2,6 +2,7 @@
test_description='rebase should reread the todo file if an exec modifies it'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-rebase.sh
diff --git a/t/t3430-rebase-merges.sh b/t/t3430-rebase-merges.sh
index f351701fec..fa2a06c19f 100755
--- a/t/t3430-rebase-merges.sh
+++ b/t/t3430-rebase-merges.sh
@@ -138,6 +138,23 @@ test_expect_success '`reset` refuses to overwrite untracked files' '
git rebase --abort
'
+test_expect_success '`reset` rejects trees' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ test_must_fail env GIT_SEQUENCE_EDITOR="echo reset A^{tree} >" \
+ git rebase -i B C >out 2>err &&
+ grep "object .* is a tree" err &&
+ test_must_be_empty out
+'
+
+test_expect_success '`reset` only looks for labels under refs/rewritten/' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ git branch refs/rewritten/my-label A &&
+ test_must_fail env GIT_SEQUENCE_EDITOR="echo reset my-label >" \
+ git rebase -i B C >out 2>err &&
+ grep "could not resolve ${SQ}my-label${SQ}" err &&
+ test_must_be_empty out
+'
+
test_expect_success 'failed `merge -C` writes patch (may be rescheduled, too)' '
test_when_finished "test_might_fail git rebase --abort" &&
git checkout -b conflicting-merge A &&
diff --git a/t/t3431-rebase-fork-point.sh b/t/t3431-rebase-fork-point.sh
index 1d0b15380e..70e8136356 100755
--- a/t/t3431-rebase-fork-point.sh
+++ b/t/t3431-rebase-fork-point.sh
@@ -50,7 +50,7 @@ test_rebase () {
test_rebase 'G F E D B A'
test_rebase 'G F D B A' --onto D
-test_rebase 'G F B A' --keep-base
+test_rebase 'G F C B A' --keep-base
test_rebase 'G F C E D B A' --no-fork-point
test_rebase 'G F C D B A' --no-fork-point --onto D
test_rebase 'G F C B A' --no-fork-point --keep-base
diff --git a/t/t3433-rebase-across-mode-change.sh b/t/t3433-rebase-across-mode-change.sh
index 05df964670..c8172b0852 100755
--- a/t/t3433-rebase-across-mode-change.sh
+++ b/t/t3433-rebase-across-mode-change.sh
@@ -2,6 +2,7 @@
test_description='git rebase across mode change'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t3435-rebase-gpg-sign.sh b/t/t3435-rebase-gpg-sign.sh
index 5f8ba2c739..6aa2aeb628 100755
--- a/t/t3435-rebase-gpg-sign.sh
+++ b/t/t3435-rebase-gpg-sign.sh
@@ -64,14 +64,6 @@ test_rebase_gpg_sign ! true -i --no-gpg-sign
test_rebase_gpg_sign ! true -i --gpg-sign --no-gpg-sign
test_rebase_gpg_sign false -i --no-gpg-sign --gpg-sign
-test_expect_failure 'rebase -p --no-gpg-sign override commit.gpgsign' '
- test_when_finished "git clean -f" &&
- git reset --hard merged &&
- git config commit.gpgsign true &&
- git rebase -p --no-gpg-sign --onto=one fork-point main &&
- test_must_fail git verify-commit HEAD
-'
-
test_expect_success 'rebase -r, merge strategy, --gpg-sign will sign commit' '
git reset --hard merged &&
test_unconfig commit.gpgsign &&
diff --git a/t/t3438-rebase-broken-files.sh b/t/t3438-rebase-broken-files.sh
new file mode 100755
index 0000000000..b92a3ce46b
--- /dev/null
+++ b/t/t3438-rebase-broken-files.sh
@@ -0,0 +1,59 @@
+#!/bin/sh
+
+test_description='rebase behavior when on-disk files are broken'
+. ./test-lib.sh
+
+test_expect_success 'set up conflicting branches' '
+ test_commit base file &&
+ git checkout -b branch1 &&
+ test_commit one file &&
+ git checkout -b branch2 HEAD^ &&
+ test_commit two file
+'
+
+create_conflict () {
+ test_when_finished "git rebase --abort" &&
+ git checkout -B tmp branch2 &&
+ test_must_fail git rebase branch1
+}
+
+check_resolve_fails () {
+ echo resolved >file &&
+ git add file &&
+ test_must_fail git rebase --continue
+}
+
+for item in NAME EMAIL DATE
+do
+ test_expect_success "detect missing GIT_AUTHOR_$item" '
+ create_conflict &&
+
+ grep -v $item .git/rebase-merge/author-script >tmp &&
+ mv tmp .git/rebase-merge/author-script &&
+
+ check_resolve_fails
+ '
+done
+
+for item in NAME EMAIL DATE
+do
+ test_expect_success "detect duplicate GIT_AUTHOR_$item" '
+ create_conflict &&
+
+ grep -i $item .git/rebase-merge/author-script >tmp &&
+ cat tmp >>.git/rebase-merge/author-script &&
+
+ check_resolve_fails
+ '
+done
+
+test_expect_success 'unknown key in author-script' '
+ create_conflict &&
+
+ echo "GIT_AUTHOR_BOGUS=${SQ}whatever${SQ}" \
+ >>.git/rebase-merge/author-script &&
+
+ check_resolve_fails
+'
+
+test_done
diff --git a/t/t3700-add.sh b/t/t3700-add.sh
index 8689b48589..51afbd7b24 100755
--- a/t/t3700-add.sh
+++ b/t/t3700-add.sh
@@ -291,7 +291,7 @@ test_expect_success BSLASHPSPEC "git add 'fo\\[ou\\]bar' ignores foobar" '
git reset --hard &&
touch fo\[ou\]bar foobar &&
git add '\''fo\[ou\]bar'\'' &&
- git ls-files fo\[ou\]bar | fgrep fo\[ou\]bar &&
+ git ls-files fo\[ou\]bar | grep -F fo\[ou\]bar &&
! ( git ls-files foobar | grep foobar )
'
diff --git a/t/t3702-add-edit.sh b/t/t3702-add-edit.sh
index a1801a8cbd..82bfb2fd2a 100755
--- a/t/t3702-add-edit.sh
+++ b/t/t3702-add-edit.sh
@@ -100,7 +100,7 @@ EOF
echo "#!$SHELL_PATH" >fake-editor.sh
cat >> fake-editor.sh <<\EOF
-egrep -v '^index' "$1" >orig-patch &&
+grep -E -v '^index' "$1" >orig-patch &&
mv -f patch "$1"
EOF
diff --git a/t/t4012-diff-binary.sh b/t/t4012-diff-binary.sh
index c509143c81..c64d9d2f40 100755
--- a/t/t4012-diff-binary.sh
+++ b/t/t4012-diff-binary.sh
@@ -113,20 +113,20 @@ test_expect_success 'diff --no-index with binary creation' '
'
cat >expect <<EOF
- binfile | Bin 0 -> 1026 bytes
- textfile | 10000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ binfilë | Bin 0 -> 1026 bytes
+ tëxtfilë | 10000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
EOF
test_expect_success 'diff --stat with binary files and big change count' '
- printf "\01\00%1024d" 1 >binfile &&
- git add binfile &&
+ printf "\01\00%1024d" 1 >binfilë &&
+ git add binfilë &&
i=0 &&
while test $i -lt 10000; do
echo $i &&
i=$(($i + 1)) || return 1
- done >textfile &&
- git add textfile &&
- git diff --cached --stat binfile textfile >output &&
+ done >tëxtfilë &&
+ git add tëxtfilë &&
+ git -c core.quotepath=false diff --cached --stat binfilë tëxtfilë >output &&
grep " | " output >actual &&
test_cmp expect actual
'
diff --git a/t/t4014-format-patch.sh b/t/t4014-format-patch.sh
index ad5c029279..de1da4673d 100755
--- a/t/t4014-format-patch.sh
+++ b/t/t4014-format-patch.sh
@@ -1457,7 +1457,7 @@ append_signoff()
C=$(git commit-tree HEAD^^{tree} -p HEAD) &&
git format-patch --stdout --signoff $C^..$C >append_signoff.patch &&
sed -n -e "1,/^---$/p" append_signoff.patch |
- egrep -n "^Subject|Sign|^$"
+ grep -E -n "^Subject|Sign|^$"
}
test_expect_success 'signoff: commit with no body' '
@@ -2274,10 +2274,10 @@ test_expect_success 'format-patch --base with --attach' '
test_expect_success 'format-patch --attach cover-letter only is non-multipart' '
test_when_finished "rm -fr patches" &&
git format-patch -o patches --cover-letter --attach=mimemime --base=HEAD~ -1 &&
- ! egrep "^--+mimemime" patches/0000*.patch &&
- egrep "^--+mimemime$" patches/0001*.patch >output &&
+ ! grep -E "^--+mimemime" patches/0000*.patch &&
+ grep -E "^--+mimemime$" patches/0001*.patch >output &&
test_line_count = 2 output &&
- egrep "^--+mimemime--$" patches/0001*.patch >output &&
+ grep -E "^--+mimemime--$" patches/0001*.patch >output &&
test_line_count = 1 output
'
diff --git a/t/t4015-diff-whitespace.sh b/t/t4015-diff-whitespace.sh
index f3e20dd5bb..b298f220e0 100755
--- a/t/t4015-diff-whitespace.sh
+++ b/t/t4015-diff-whitespace.sh
@@ -1638,7 +1638,7 @@ test_expect_success 'no effect on diff from --color-moved with --word-diff' '
test_cmp expect actual
'
-test_expect_success !SANITIZE_LEAK 'no effect on show from --color-moved with --word-diff' '
+test_expect_success 'no effect on show from --color-moved with --word-diff' '
git show --color-moved --word-diff >actual &&
git show --word-diff >expect &&
test_cmp expect actual
@@ -2024,7 +2024,7 @@ test_expect_success '--color-moved rewinds for MIN_ALNUM_COUNT' '
test_cmp expected actual
'
-test_expect_success !SANITIZE_LEAK 'move detection with submodules' '
+test_expect_success 'move detection with submodules' '
test_create_repo bananas &&
echo ripe >bananas/recipe &&
git -C bananas add recipe &&
diff --git a/t/t4027-diff-submodule.sh b/t/t4027-diff-submodule.sh
index 40164ae07d..e08ee315a7 100755
--- a/t/t4027-diff-submodule.sh
+++ b/t/t4027-diff-submodule.sh
@@ -34,6 +34,25 @@ test_expect_success setup '
subtip=$3 subprev=$2
'
+test_expect_success 'diff in superproject with submodules respects parallel settings' '
+ test_when_finished "rm -f trace.out" &&
+ (
+ GIT_TRACE=$(pwd)/trace.out git diff &&
+ grep "1 tasks" trace.out &&
+ >trace.out &&
+
+ git config submodule.diffJobs 8 &&
+ GIT_TRACE=$(pwd)/trace.out git diff &&
+ grep "8 tasks" trace.out &&
+ >trace.out &&
+
+ GIT_TRACE=$(pwd)/trace.out git -c submodule.diffJobs=0 diff &&
+ grep "preparing to run up to [0-9]* tasks" trace.out &&
+ ! grep "up to 0 tasks" trace.out &&
+ >trace.out
+ )
+'
+
test_expect_success 'git diff --raw HEAD' '
hexsz=$(test_oid hexsz) &&
git diff --raw --abbrev=$hexsz HEAD >actual &&
diff --git a/t/t4038-diff-combined.sh b/t/t4038-diff-combined.sh
index 9a292bac70..2ce26e585c 100755
--- a/t/t4038-diff-combined.sh
+++ b/t/t4038-diff-combined.sh
@@ -80,11 +80,21 @@ test_expect_success 'check combined output (1)' '
verify_helper sidewithone
'
+test_expect_success 'check combined output (1) with git diff <rev>^!' '
+ git diff sidewithone^! -- >sidewithone &&
+ verify_helper sidewithone
+'
+
test_expect_success 'check combined output (2)' '
git show sidesansone -- >sidesansone &&
verify_helper sidesansone
'
+test_expect_success 'check combined output (2) with git diff <rev>^!' '
+ git diff sidesansone^! -- >sidesansone &&
+ verify_helper sidesansone
+'
+
test_expect_success 'diagnose truncated file' '
>file &&
git add file &&
diff --git a/t/t4045-diff-relative.sh b/t/t4045-diff-relative.sh
index fab351b48a..198dfc9190 100755
--- a/t/t4045-diff-relative.sh
+++ b/t/t4045-diff-relative.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='diff --relative tests'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t4052-stat-output.sh b/t/t4052-stat-output.sh
index b5c281edaa..3ee27e277d 100755
--- a/t/t4052-stat-output.sh
+++ b/t/t4052-stat-output.sh
@@ -8,6 +8,7 @@ test_description='test --stat output of various commands'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-terminal.sh
diff --git a/t/t4053-diff-no-index.sh b/t/t4053-diff-no-index.sh
index 3feadf0e35..4e9fa0403d 100755
--- a/t/t4053-diff-no-index.sh
+++ b/t/t4053-diff-no-index.sh
@@ -2,6 +2,7 @@
test_description='diff --no-index'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t4067-diff-partial-clone.sh b/t/t4067-diff-partial-clone.sh
index 28f42a4046..f60f5cbd65 100755
--- a/t/t4067-diff-partial-clone.sh
+++ b/t/t4067-diff-partial-clone.sh
@@ -2,6 +2,7 @@
test_description='behavior of diff when reading objects in a partial clone'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'git show batches blobs' '
diff --git a/t/t4111-apply-subdir.sh b/t/t4111-apply-subdir.sh
index 1618a6dbc7..e9a87d761d 100755
--- a/t/t4111-apply-subdir.sh
+++ b/t/t4111-apply-subdir.sh
@@ -2,6 +2,7 @@
test_description='patching from inconvenient places'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t4135-apply-weird-filenames.sh b/t/t4135-apply-weird-filenames.sh
index 6bc3fb97a7..d3502c6fdd 100755
--- a/t/t4135-apply-weird-filenames.sh
+++ b/t/t4135-apply-weird-filenames.sh
@@ -2,6 +2,7 @@
test_description='git apply with weird postimage filenames'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t4141-apply-too-large.sh b/t/t4141-apply-too-large.sh
new file mode 100755
index 0000000000..58742d4fc5
--- /dev/null
+++ b/t/t4141-apply-too-large.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+test_description='git apply with too-large patch'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+test_expect_success EXPENSIVE 'git apply rejects patches that are too large' '
+ sz=$((1024 * 1024 * 1023)) &&
+ {
+ cat <<-\EOF &&
+ diff --git a/file b/file
+ new file mode 100644
+ --- /dev/null
+ +++ b/file
+ @@ -0,0 +1 @@
+ EOF
+ test-tool genzeros
+ } | test_copy_bytes $sz | test_must_fail git apply 2>err &&
+ grep "git apply: failed to read" err
+'
+
+test_done
diff --git a/t/t4201-shortlog.sh b/t/t4201-shortlog.sh
index 3095b1b2ff..8e4effebdb 100755
--- a/t/t4201-shortlog.sh
+++ b/t/t4201-shortlog.sh
@@ -83,6 +83,13 @@ test_expect_success 'pretty format' '
test_cmp expect log.predictable
'
+test_expect_success 'pretty format (with --date)' '
+ sed "s/SUBJECT/2005-04-07 OBJECT_NAME/" expect.template >expect &&
+ git shortlog --format="%ad %H" --date=short HEAD >log &&
+ fuzz log >log.predictable &&
+ test_cmp expect log.predictable
+'
+
test_expect_success '--abbrev' '
sed s/SUBJECT/OBJID/ expect.template >expect &&
git shortlog --format="%h" --abbrev=35 HEAD >log &&
@@ -237,6 +244,26 @@ test_expect_success 'shortlog --group=trailer:signed-off-by' '
test_cmp expect actual
'
+test_expect_success 'shortlog --group=format' '
+ git shortlog -s --date="format:%Y" --group="format:%cN (%cd)" \
+ HEAD >actual &&
+ cat >expect <<-\EOF &&
+ 4 C O Mitter (2005)
+ 1 Sin Nombre (2005)
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'shortlog --group=<format> DWIM' '
+ git shortlog -s --date="format:%Y" --group="%cN (%cd)" HEAD >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'shortlog bogus --group' '
+ test_must_fail git shortlog --group=bogus HEAD 2>err &&
+ grep "unknown group type" err
+'
+
test_expect_success 'trailer idents are split' '
cat >expect <<-\EOF &&
2 C O Mitter
@@ -319,6 +346,18 @@ test_expect_success 'shortlog can match multiple groups' '
test_cmp expect actual
'
+test_expect_success 'shortlog can match multiple format groups' '
+ GIT_COMMITTER_NAME="$GIT_AUTHOR_NAME" \
+ git commit --allow-empty -m "identical names" &&
+ test_tick &&
+ cat >expect <<-\EOF &&
+ 2 A U Thor
+ 1 C O Mitter
+ EOF
+ git shortlog -ns --group="%cn" --group="%an" -2 HEAD >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'set up option selection tests' '
git commit --allow-empty -F - <<-\EOF
subject
diff --git a/t/t4202-log.sh b/t/t4202-log.sh
index cc15cb4ff6..2ce2b41174 100755
--- a/t/t4202-log.sh
+++ b/t/t4202-log.sh
@@ -249,6 +249,15 @@ test_expect_success 'log --grep' '
test_cmp expect actual
'
+for noop_opt in --invert-grep --all-match
+do
+ test_expect_success "log $noop_opt without --grep is a NOOP" '
+ git log >expect &&
+ git log $noop_opt >actual &&
+ test_cmp expect actual
+ '
+done
+
cat > expect << EOF
second
initial
diff --git a/t/t4204-patch-id.sh b/t/t4204-patch-id.sh
index a730c0db98..a7fa94ce0a 100755
--- a/t/t4204-patch-id.sh
+++ b/t/t4204-patch-id.sh
@@ -8,13 +8,13 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
test_expect_success 'setup' '
- as="a a a a a a a a" && # eight a
- test_write_lines $as >foo &&
- test_write_lines $as >bar &&
+ str="ab cd ef gh ij kl mn op" &&
+ test_write_lines $str >foo &&
+ test_write_lines $str >bar &&
git add foo bar &&
git commit -a -m initial &&
- test_write_lines $as b >foo &&
- test_write_lines $as b >bar &&
+ test_write_lines $str b >foo &&
+ test_write_lines $str b >bar &&
git commit -a -m first &&
git checkout -b same main &&
git commit --amend -m same-msg &&
@@ -22,8 +22,23 @@ test_expect_success 'setup' '
echo c >foo &&
echo c >bar &&
git commit --amend -a -m notsame-msg &&
+ git checkout -b with_space main~ &&
+ cat >foo <<-\EOF &&
+ a b
+ c d
+ e f
+ g h
+ i j
+ k l
+ m n
+ op
+ EOF
+ cp foo bar &&
+ git add foo bar &&
+ git commit --amend -m "with spaces" &&
test_write_lines bar foo >bar-then-foo &&
test_write_lines foo bar >foo-then-bar
+
'
test_expect_success 'patch-id output is well-formed' '
@@ -42,7 +57,7 @@ calc_patch_id () {
}
get_top_diff () {
- git log -p -1 "$@" -O bar-then-foo --
+ git log -p -1 "$@" -O bar-then-foo --full-index --
}
get_patch_id () {
@@ -61,6 +76,33 @@ test_expect_success 'patch-id detects inequality' '
get_patch_id notsame &&
! test_cmp patch-id_main patch-id_notsame
'
+test_expect_success 'patch-id detects equality binary' '
+ cat >.gitattributes <<-\EOF &&
+ foo binary
+ bar binary
+ EOF
+ get_patch_id main &&
+ get_patch_id same &&
+ git log -p -1 --binary main >top-diff.output &&
+ calc_patch_id <top-diff.output main_binpatch &&
+ git log -p -1 --binary same >top-diff.output &&
+ calc_patch_id <top-diff.output same_binpatch &&
+ test_cmp patch-id_main patch-id_main_binpatch &&
+ test_cmp patch-id_same patch-id_same_binpatch &&
+ test_cmp patch-id_main patch-id_same &&
+ test_when_finished "rm .gitattributes"
+'
+
+test_expect_success 'patch-id detects inequality binary' '
+ cat >.gitattributes <<-\EOF &&
+ foo binary
+ bar binary
+ EOF
+ get_patch_id main &&
+ get_patch_id notsame &&
+ ! test_cmp patch-id_main patch-id_notsame &&
+ test_when_finished "rm .gitattributes"
+'
test_expect_success 'patch-id supports git-format-patch output' '
get_patch_id main &&
@@ -101,9 +143,21 @@ test_patch_id_file_order () {
git format-patch -1 --stdout -O foo-then-bar >format-patch.output &&
calc_patch_id <format-patch.output "ordered-$name" "$@" &&
cmp_patch_id $relevant "$name" "ordered-$name"
+}
+test_patch_id_whitespace () {
+ relevant="$1"
+ shift
+ name="ws-${1}-$relevant"
+ shift
+ get_top_diff "main~" >top-diff.output &&
+ calc_patch_id <top-diff.output "$name" "$@" &&
+ get_top_diff "with_space" >top-diff.output &&
+ calc_patch_id <top-diff.output "ws-$name" "$@" &&
+ cmp_patch_id $relevant "$name" "ws-$name"
}
+
# combined test for options: add more tests here to make them
# run with all options
test_patch_id () {
@@ -119,6 +173,14 @@ test_expect_success 'file order is relevant with --unstable' '
test_patch_id_file_order relevant --unstable --unstable
'
+test_expect_success 'whitespace is relevant with --verbatim' '
+ test_patch_id_whitespace relevant --verbatim --verbatim
+'
+
+test_expect_success 'whitespace is irrelevant without --verbatim' '
+ test_patch_id_whitespace irrelevant --stable --stable
+'
+
#Now test various option combinations.
test_expect_success 'default is unstable' '
test_patch_id relevant default
@@ -134,6 +196,17 @@ test_expect_success 'patchid.stable = false is unstable' '
test_patch_id relevant patchid.stable=false
'
+test_expect_success 'patchid.verbatim = true is correct and stable' '
+ test_config patchid.verbatim true &&
+ test_patch_id_whitespace relevant patchid.verbatim=true &&
+ test_patch_id irrelevant patchid.verbatim=true
+'
+
+test_expect_success 'patchid.verbatim = false is unstable' '
+ test_config patchid.verbatim false &&
+ test_patch_id relevant patchid.verbatim=false
+'
+
test_expect_success '--unstable overrides patchid.stable = true' '
test_config patchid.stable true &&
test_patch_id relevant patchid.stable=true--unstable --unstable
@@ -144,6 +217,11 @@ test_expect_success '--stable overrides patchid.stable = false' '
test_patch_id irrelevant patchid.stable=false--stable --stable
'
+test_expect_success '--verbatim overrides patchid.stable = false' '
+ test_config patchid.stable false &&
+ test_patch_id_whitespace relevant stable=false--verbatim --verbatim
+'
+
test_expect_success 'patch-id supports git-format-patch MIME output' '
get_patch_id main &&
git checkout same &&
@@ -198,7 +276,10 @@ test_expect_success 'patch-id handles no-nl-at-eof markers' '
EOF
calc_patch_id nonl <nonl &&
calc_patch_id withnl <withnl &&
- test_cmp patch-id_nonl patch-id_withnl
+ test_cmp patch-id_nonl patch-id_withnl &&
+ calc_patch_id nonl-inc-ws --verbatim <nonl &&
+ calc_patch_id withnl-inc-ws --verbatim <withnl &&
+ ! test_cmp patch-id_nonl-inc-ws patch-id_withnl-inc-ws
'
test_expect_success 'patch-id handles diffs with one line of before/after' '
diff --git a/t/t4213-log-tabexpand.sh b/t/t4213-log-tabexpand.sh
index 53a4af3244..590fce95e9 100755
--- a/t/t4213-log-tabexpand.sh
+++ b/t/t4213-log-tabexpand.sh
@@ -2,6 +2,7 @@
test_description='log/show --expand-tabs'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
HT=" "
diff --git a/t/t4301-merge-tree-write-tree.sh b/t/t4301-merge-tree-write-tree.sh
index 28ca5c38bb..a8983a0edc 100755
--- a/t/t4301-merge-tree-write-tree.sh
+++ b/t/t4301-merge-tree-write-tree.sh
@@ -810,4 +810,116 @@ test_expect_success 'can override merge of unrelated histories' '
test_cmp expect actual
'
+test_expect_success SANITY 'merge-ort fails gracefully in a read-only repository' '
+ git init --bare read-only &&
+ git push read-only side1 side2 side3 &&
+ test_when_finished "chmod -R u+w read-only" &&
+ chmod -R a-w read-only &&
+ test_must_fail git -C read-only merge-tree side1 side3 &&
+ test_must_fail git -C read-only merge-tree side1 side2
+'
+
+test_expect_success '--stdin with both a successful and a conflicted merge' '
+ printf "side1 side3\nside1 side2" | git merge-tree --stdin >actual &&
+
+ git checkout side1^0 &&
+ git merge side3 &&
+
+ printf "1\0" >expect &&
+ git rev-parse HEAD^{tree} | lf_to_nul >>expect &&
+ printf "\0" >>expect &&
+
+ git checkout side1^0 &&
+ test_must_fail git merge side2 &&
+ sed s/HEAD/side1/ greeting >tmp &&
+ mv tmp greeting &&
+ git add -u &&
+ git mv whatever~HEAD whatever~side1 &&
+
+ printf "0\0" >>expect &&
+ git write-tree | lf_to_nul >>expect &&
+
+ cat <<-EOF | q_to_tab | lf_to_nul >>expect &&
+ 100644 $(git rev-parse side1~1:greeting) 1Qgreeting
+ 100644 $(git rev-parse side1:greeting) 2Qgreeting
+ 100644 $(git rev-parse side2:greeting) 3Qgreeting
+ 100644 $(git rev-parse side1~1:whatever) 1Qwhatever~side1
+ 100644 $(git rev-parse side1:whatever) 2Qwhatever~side1
+ EOF
+
+ q_to_nul <<-EOF >>expect &&
+ Q1QgreetingQAuto-mergingQAuto-merging greeting
+ Q1QgreetingQCONFLICT (contents)QCONFLICT (content): Merge conflict in greeting
+ Q1QnumbersQAuto-mergingQAuto-merging numbers
+ Q2Qwhatever~side1QwhateverQCONFLICT (file/directory)QCONFLICT (file/directory): directory in the way of whatever from side1; moving it to whatever~side1 instead.
+ Q1Qwhatever~side1QCONFLICT (modify/delete)QCONFLICT (modify/delete): whatever~side1 deleted in side2 and modified in side1. Version side1 of whatever~side1 left in tree.
+ EOF
+
+ printf "\0\0" >>expect &&
+
+ test_cmp expect actual
+'
+
+
+test_expect_success '--merge-base is incompatible with --stdin' '
+ test_must_fail git merge-tree --merge-base=side1 --stdin 2>expect &&
+
+ grep "^fatal: --merge-base is incompatible with --stdin" expect
+'
+
+# specify merge-base as parent of branch2
+# git merge-tree --write-tree --merge-base=c2 c1 c3
+# Commit c1: add file1
+# Commit c2: add file2 after c1
+# Commit c3: add file3 after c2
+# Expected: add file3, and file2 does NOT appear
+
+test_expect_success 'specify merge-base as parent of branch2' '
+ # Setup
+ test_when_finished "rm -rf base-b2-p" &&
+ git init base-b2-p &&
+ test_commit -C base-b2-p c1 file1 &&
+ test_commit -C base-b2-p c2 file2 &&
+ test_commit -C base-b2-p c3 file3 &&
+
+ # Testing
+ TREE_OID=$(git -C base-b2-p merge-tree --write-tree --merge-base=c2 c1 c3) &&
+
+ q_to_tab <<-EOF >expect &&
+ 100644 blob $(git -C base-b2-p rev-parse c1:file1)Qfile1
+ 100644 blob $(git -C base-b2-p rev-parse c3:file3)Qfile3
+ EOF
+
+ git -C base-b2-p ls-tree $TREE_OID >actual &&
+ test_cmp expect actual
+'
+
+# Since the earlier tests have verified that individual merge-tree calls
+# are doing the right thing, this test case is only used to verify that
+# we can also trigger merges via --stdin, and that when we do we get
+# the same answer as running a bunch of separate merges.
+
+test_expect_success 'check the input format when --stdin is passed' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ test_commit -C repo c1 &&
+ test_commit -C repo c2 &&
+ test_commit -C repo c3 &&
+ printf "c1 c3\nc2 -- c1 c3\nc2 c3" | git -C repo merge-tree --stdin >actual &&
+
+ printf "1\0" >expect &&
+ git -C repo merge-tree --write-tree -z c1 c3 >>expect &&
+ printf "\0" >>expect &&
+
+ printf "1\0" >>expect &&
+ git -C repo merge-tree --write-tree -z --merge-base=c2 c1 c3 >>expect &&
+ printf "\0" >>expect &&
+
+ printf "1\0" >>expect &&
+ git -C repo merge-tree --write-tree -z c2 c3 >>expect &&
+ printf "\0" >>expect &&
+
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t5000-tar-tree.sh b/t/t5000-tar-tree.sh
index eaa0b22ece..d473048138 100755
--- a/t/t5000-tar-tree.sh
+++ b/t/t5000-tar-tree.sh
@@ -342,6 +342,13 @@ test_expect_success 'only enabled filters are available remotely' '
test_cmp_bin remote.bar config.bar
'
+test_expect_success 'invalid filter is reported only once' '
+ test_must_fail git -c tar.invalid.command= archive --format=invalid \
+ HEAD >out 2>err &&
+ test_must_be_empty out &&
+ test_line_count = 1 err
+'
+
test_expect_success 'git archive --format=tgz' '
git archive --format=tgz HEAD >j.tgz
'
diff --git a/t/t5100-mailinfo.sh b/t/t5100-mailinfo.sh
index cebad1048c..db11cababd 100755
--- a/t/t5100-mailinfo.sh
+++ b/t/t5100-mailinfo.sh
@@ -201,13 +201,13 @@ test_expect_success 'mailinfo -b double [PATCH]' '
test z"$subj" = z"Subject: message"
'
-test_expect_failure 'mailinfo -b trailing [PATCH]' '
+test_expect_success 'mailinfo -b trailing [PATCH]' '
subj="$(echo "Subject: [other] [PATCH] message" |
git mailinfo -b /dev/null /dev/null)" &&
test z"$subj" = z"Subject: [other] message"
'
-test_expect_failure 'mailinfo -b separated double [PATCH]' '
+test_expect_success 'mailinfo -b separated double [PATCH]' '
subj="$(echo "Subject: [PATCH] [other] [PATCH] message" |
git mailinfo -b /dev/null /dev/null)" &&
test z"$subj" = z"Subject: [other] message"
diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh
index 6d693eef82..7d8dee41b0 100755
--- a/t/t5310-pack-bitmaps.sh
+++ b/t/t5310-pack-bitmaps.sh
@@ -428,8 +428,9 @@ test_bitmap_cases () {
test_line_count = 2 packs &&
test_line_count = 2 bitmaps &&
- git rev-list --use-bitmap-index HEAD 2>err &&
- grep "ignoring extra bitmap file" err
+ GIT_TRACE2_EVENT=$(pwd)/trace2.txt git rev-list --use-bitmap-index HEAD &&
+ grep "opened bitmap" trace2.txt &&
+ grep "ignoring extra bitmap" trace2.txt
)
'
}
diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh
index afbe93f162..b5f9b10922 100755
--- a/t/t5319-multi-pack-index.sh
+++ b/t/t5319-multi-pack-index.sh
@@ -784,6 +784,70 @@ test_expect_success 'repack creates a new pack' '
)
'
+test_expect_success 'repack (all) ignores cruft pack' '
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit base &&
+ test_commit --no-tag unreachable &&
+
+ git reset --hard base &&
+ git reflog expire --all --expire=all &&
+ git repack --cruft -d &&
+
+ git multi-pack-index write &&
+
+ find $objdir/pack | sort >before &&
+ git multi-pack-index repack --batch-size=0 &&
+ find $objdir/pack | sort >after &&
+
+ test_cmp before after
+ )
+'
+
+test_expect_success 'repack (--batch-size) ignores cruft pack' '
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit_bulk 5 &&
+ test_commit --no-tag unreachable &&
+
+ git reset --hard HEAD^ &&
+ git reflog expire --all --expire=all &&
+ git repack --cruft -d &&
+
+ test_commit four &&
+
+ find $objdir/pack -type f -name "*.pack" | sort >before &&
+ git repack -d &&
+ find $objdir/pack -type f -name "*.pack" | sort >after &&
+
+ pack="$(comm -13 before after)" &&
+ test_file_size "$pack" >sz &&
+ # Set --batch-size to twice the size of the pack created
+ # in the previous step, since this is enough to
+ # accommodate it and the cruft pack.
+ #
+ # This means that the MIDX machinery *could* combine the
+ # new and cruft packs together.
+ #
+ # We ensure that it does not below.
+ batch="$((($(cat sz) * 2)))" &&
+
+ git multi-pack-index write &&
+
+ find $objdir/pack | sort >before &&
+ git multi-pack-index repack --batch-size=$batch &&
+ find $objdir/pack | sort >after &&
+
+ test_cmp before after
+ )
+'
+
test_expect_success 'expire removes repacked packs' '
(
cd dup &&
@@ -847,6 +911,36 @@ test_expect_success 'expire respects .keep files' '
)
'
+test_expect_success 'expiring unreferenced cruft pack retains pack' '
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit base &&
+ test_commit --no-tag unreachable &&
+ unreachable=$(git rev-parse HEAD) &&
+
+ git reset --hard base &&
+ git reflog expire --all --expire=all &&
+ git repack --cruft -d &&
+ mtimes="$(ls $objdir/pack/pack-*.mtimes)" &&
+
+ echo "base..$unreachable" >in &&
+ pack="$(git pack-objects --revs --delta-base-offset \
+ $objdir/pack/pack <in)" &&
+
+ # Preferring the contents of "$pack" will leave the
+ # cruft pack unreferenced (ie., none of the objects
+ # contained in the cruft pack will have their MIDX copy
+ # selected from the cruft pack).
+ git multi-pack-index write --preferred-pack="pack-$pack.pack" &&
+ git multi-pack-index expire &&
+
+ test_path_is_file "$mtimes"
+ )
+'
+
test_expect_success 'repack --batch-size=0 repacks everything' '
cp -r dup dup2 &&
(
diff --git a/t/t5320-delta-islands.sh b/t/t5320-delta-islands.sh
index 124d47603d..406363381f 100755
--- a/t/t5320-delta-islands.sh
+++ b/t/t5320-delta-islands.sh
@@ -134,7 +134,7 @@ test_expect_success 'island core places core objects first' '
repack -adfi &&
git verify-pack -v .git/objects/pack/*.pack |
cut -d" " -f1 |
- egrep "$root|$two" >actual &&
+ grep -E "$root|$two" >actual &&
test_cmp expect actual
'
diff --git a/t/t5326-multi-pack-bitmaps.sh b/t/t5326-multi-pack-bitmaps.sh
index ad6eea5fa0..0882cbb6e4 100755
--- a/t/t5326-multi-pack-bitmaps.sh
+++ b/t/t5326-multi-pack-bitmaps.sh
@@ -410,4 +410,28 @@ test_expect_success 'preferred pack change with existing MIDX bitmap' '
)
'
+test_expect_success 'tagged commits are selected for bitmapping' '
+ rm -fr repo &&
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit --annotate base &&
+ git repack -d &&
+
+ # Remove refs/heads/main which points at the commit directly,
+ # leaving only a reference to the annotated tag.
+ git branch -M main &&
+ git checkout base &&
+ git branch -d main &&
+
+ git multi-pack-index write --bitmap &&
+
+ git rev-parse HEAD >want &&
+ test-tool bitmap list-commits >actual &&
+ grep $(cat want) actual
+ )
+'
+
test_done
diff --git a/t/t5502-quickfetch.sh b/t/t5502-quickfetch.sh
index b160f8b7fb..0c4aadebae 100755
--- a/t/t5502-quickfetch.sh
+++ b/t/t5502-quickfetch.sh
@@ -122,7 +122,7 @@ test_expect_success 'quickfetch should not copy from alternate' '
'
-test_expect_success 'quickfetch should handle ~1000 refs (on Windows)' '
+test_expect_success PACKED_REFS_V1 'quickfetch should handle ~1000 refs (on Windows)' '
git gc &&
head=$(git rev-parse HEAD) &&
diff --git a/t/t5505-remote.sh b/t/t5505-remote.sh
index 9006196ac6..43b7bcd715 100755
--- a/t/t5505-remote.sh
+++ b/t/t5505-remote.sh
@@ -902,6 +902,17 @@ test_expect_success 'rename a remote renames repo remote.pushDefault but keeps g
)
'
+test_expect_success 'rename handles remote without fetch refspec' '
+ git clone --bare one no-refspec.git &&
+ # confirm assumption that bare clone does not create refspec
+ test_expect_code 5 \
+ git -C no-refspec.git config --unset-all remote.origin.fetch &&
+ git -C no-refspec.git config remote.origin.url >expect &&
+ git -C no-refspec.git remote rename origin foo &&
+ git -C no-refspec.git config remote.foo.url >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'rename does not update a non-default fetch refspec' '
git clone one four.one &&
(
diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh
index 79dc470c01..4f2bfaf005 100755
--- a/t/t5516-fetch-push.sh
+++ b/t/t5516-fetch-push.sh
@@ -1853,37 +1853,6 @@ test_expect_success 'refuse to push a hidden ref, and make sure do not pollute t
test_dir_is_empty testrepo/.git/objects/pack
'
-test_expect_success LIBCURL 'fetch warns or fails when using username:password' '
- message="URL '\''https://username:<redacted>@localhost/'\'' uses plaintext credentials" &&
- test_must_fail git -c transfer.credentialsInUrl=allow fetch https://username:password@localhost 2>err &&
- ! grep "$message" err &&
-
- test_must_fail git -c transfer.credentialsInUrl=warn fetch https://username:password@localhost 2>err &&
- grep "warning: $message" err >warnings &&
- test_line_count = 3 warnings &&
-
- test_must_fail git -c transfer.credentialsInUrl=die fetch https://username:password@localhost 2>err &&
- grep "fatal: $message" err >warnings &&
- test_line_count = 1 warnings &&
-
- test_must_fail git -c transfer.credentialsInUrl=die fetch https://username:@localhost 2>err &&
- grep "fatal: $message" err >warnings &&
- test_line_count = 1 warnings
-'
-
-
-test_expect_success LIBCURL 'push warns or fails when using username:password' '
- message="URL '\''https://username:<redacted>@localhost/'\'' uses plaintext credentials" &&
- test_must_fail git -c transfer.credentialsInUrl=allow push https://username:password@localhost 2>err &&
- ! grep "$message" err &&
-
- test_must_fail git -c transfer.credentialsInUrl=warn push https://username:password@localhost 2>err &&
- grep "warning: $message" err >warnings &&
- test_must_fail git -c transfer.credentialsInUrl=die push https://username:password@localhost 2>err &&
- grep "fatal: $message" err >warnings &&
- test_line_count = 1 warnings
-'
-
test_expect_success 'push with config push.useBitmaps' '
mk_test testrepo heads/main &&
git checkout main &&
diff --git a/t/t5526-fetch-submodules.sh b/t/t5526-fetch-submodules.sh
index 3c44f19612..b9546ef8e5 100755
--- a/t/t5526-fetch-submodules.sh
+++ b/t/t5526-fetch-submodules.sh
@@ -178,6 +178,7 @@ test_expect_success "submodule.recurse option triggers recursive fetch" '
'
test_expect_success "fetch --recurse-submodules -j2 has the same output behaviour" '
+ test_when_finished "rm -f trace.out" &&
add_submodule_commits &&
(
cd downstream &&
@@ -705,17 +706,30 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .git
test_expect_success 'fetching submodules respects parallel settings' '
git config fetch.recurseSubmodules true &&
+ test_when_finished "rm -f downstream/trace.out" &&
(
cd downstream &&
GIT_TRACE=$(pwd)/trace.out git fetch &&
grep "1 tasks" trace.out &&
+ >trace.out &&
+
GIT_TRACE=$(pwd)/trace.out git fetch --jobs 7 &&
grep "7 tasks" trace.out &&
+ >trace.out &&
+
git config submodule.fetchJobs 8 &&
GIT_TRACE=$(pwd)/trace.out git fetch &&
grep "8 tasks" trace.out &&
+ >trace.out &&
+
GIT_TRACE=$(pwd)/trace.out git fetch --jobs 9 &&
- grep "9 tasks" trace.out
+ grep "9 tasks" trace.out &&
+ >trace.out &&
+
+ GIT_TRACE=$(pwd)/trace.out git -c submodule.fetchJobs=0 fetch &&
+ grep "preparing to run up to [0-9]* tasks" trace.out &&
+ ! grep "up to 0 tasks" trace.out &&
+ >trace.out
)
'
diff --git a/t/t5531-deep-submodule-push.sh b/t/t5531-deep-submodule-push.sh
index 3f58b515ce..302e4cbdba 100755
--- a/t/t5531-deep-submodule-push.sh
+++ b/t/t5531-deep-submodule-push.sh
@@ -512,6 +512,56 @@ test_expect_success 'push only unpushed submodules recursively' '
test_cmp expected_pub actual_pub
'
+setup_subsub () {
+ git init upstream &&
+ git init upstream/sub &&
+ git init upstream/sub/deepsub &&
+ test_commit -C upstream/sub/deepsub innermost &&
+ git -C upstream/sub submodule add ./deepsub deepsub &&
+ git -C upstream/sub commit -m middle &&
+ git -C upstream submodule add ./sub sub &&
+ git -C upstream commit -m outermost &&
+
+ git -c protocol.file.allow=always clone --recurse-submodules upstream downstream &&
+ git -C downstream/sub/deepsub checkout -b downstream-branch &&
+ git -C downstream/sub checkout -b downstream-branch &&
+ git -C downstream checkout -b downstream-branch
+}
+
+new_downstream_commits () {
+ test_commit -C downstream/sub/deepsub new-innermost &&
+ git -C downstream/sub add deepsub &&
+ git -C downstream/sub commit -m new-middle &&
+ git -C downstream add sub &&
+ git -C downstream commit -m new-outermost
+}
+
+test_expect_success 'push with push.recurseSubmodules=only on superproject' '
+ test_when_finished rm -rf upstream downstream &&
+ setup_subsub &&
+ new_downstream_commits &&
+ git -C downstream config push.recurseSubmodules only &&
+ git -C downstream push origin downstream-branch &&
+
+ test_must_fail git -C upstream rev-parse refs/heads/downstream-branch &&
+ git -C upstream/sub rev-parse refs/heads/downstream-branch &&
+ test_must_fail git -C upstream/sub/deepsub rev-parse refs/heads/downstream-branch
+'
+
+test_expect_success 'push with push.recurseSubmodules=only on superproject and top-level submodule' '
+ test_when_finished rm -rf upstream downstream &&
+ setup_subsub &&
+ new_downstream_commits &&
+ git -C downstream config push.recurseSubmodules only &&
+ git -C downstream/sub config push.recurseSubmodules only &&
+ git -C downstream push origin downstream-branch 2> err &&
+
+ test_must_fail git -C upstream rev-parse refs/heads/downstream-branch &&
+ git -C upstream/sub rev-parse refs/heads/downstream-branch &&
+ git -C upstream/sub/deepsub rev-parse refs/heads/downstream-branch &&
+ grep "recursing into submodule with push.recurseSubmodules=only; using on-demand instead" err
+'
+
test_expect_success 'push propagating the remotes name to a submodule' '
git -C work remote add origin ../pub.git &&
git -C work remote add pub ../pub.git &&
diff --git a/t/t5539-fetch-http-shallow.sh b/t/t5539-fetch-http-shallow.sh
index 3ea75d34ca..5e3b430436 100755
--- a/t/t5539-fetch-http-shallow.sh
+++ b/t/t5539-fetch-http-shallow.sh
@@ -5,6 +5,13 @@ test_description='fetch/clone from a shallow clone over http'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+# If GIT_TEST_PACKED_REFS_VERSION=2, then the packed-refs file will
+# be written in v2 format without extensions.refFormat=packed-v2. This
+# causes issues for the HTTP server which does not carry over the
+# environment variable to the server process.
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
diff --git a/t/t5541-http-push-smart.sh b/t/t5541-http-push-smart.sh
index fbad2d5ff5..495437dd3c 100755
--- a/t/t5541-http-push-smart.sh
+++ b/t/t5541-http-push-smart.sh
@@ -7,6 +7,13 @@ test_description='test smart pushing over http via http-backend'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+# If GIT_TEST_PACKED_REFS_VERSION=2, then the packed-refs file will
+# be written in v2 format without extensions.refFormat=packed-v2. This
+# causes issues for the HTTP server which does not carry over the
+# environment variable to the server process.
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
. ./test-lib.sh
ROOT_PATH="$PWD"
diff --git a/t/t5542-push-http-shallow.sh b/t/t5542-push-http-shallow.sh
index c2cc83182f..c47b18b9fa 100755
--- a/t/t5542-push-http-shallow.sh
+++ b/t/t5542-push-http-shallow.sh
@@ -5,6 +5,13 @@ test_description='push from/to a shallow clone over http'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+# If GIT_TEST_PACKED_REFS_VERSION=2, then the packed-refs file will
+# be written in v2 format without extensions.refFormat=packed-v2. This
+# causes issues for the HTTP server which does not carry over the
+# environment variable to the server process.
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
diff --git a/t/t5544-pack-objects-hook.sh b/t/t5544-pack-objects-hook.sh
index 54f54f8d2e..1a9e14bbcc 100755
--- a/t/t5544-pack-objects-hook.sh
+++ b/t/t5544-pack-objects-hook.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test custom script in place of pack-objects'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'create some history to fetch' '
diff --git a/t/t5550-http-fetch-dumb.sh b/t/t5550-http-fetch-dumb.sh
index d7cf85ffea..8f182a3cbf 100755
--- a/t/t5550-http-fetch-dumb.sh
+++ b/t/t5550-http-fetch-dumb.sh
@@ -234,7 +234,7 @@ test_expect_success 'http-fetch --packfile' '
--index-pack-arg=--keep \
"$HTTPD_URL"/dumb/repo_pack.git/$p >out &&
- grep "^keep.[0-9a-f]\{16,\}$" out &&
+ grep -E "^keep.[0-9a-f]{16,}$" out &&
cut -c6- out >packhash &&
# Ensure that the expected files are generated
diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh
index 6a38294a47..faf6bd44d8 100755
--- a/t/t5551-http-fetch-smart.sh
+++ b/t/t5551-http-fetch-smart.sh
@@ -1,13 +1,26 @@
#!/bin/sh
-test_description='test smart fetching over http via http-backend'
+: ${HTTP_PROTO:=HTTP}
+test_description="test smart fetching over http via http-backend ($HTTP_PROTO)"
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+# If GIT_TEST_PACKED_REFS_VERSION=2, then the packed-refs file will
+# be written in v2 format without extensions.refFormat=packed-v2. This
+# causes issues for the HTTP server which does not carry over the
+# environment variable to the server process.
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-httpd.sh
+test "$HTTP_PROTO" = "HTTP/2" && enable_http2
start_httpd
+test_expect_success HTTP2 'enable client-side http/2' '
+ git config --global http.version HTTP/2
+'
+
test_expect_success 'setup repository' '
git config push.default matching &&
echo content >file &&
@@ -347,7 +360,10 @@ test_expect_success CMDLINE_LIMIT \
test_expect_success 'large fetch-pack requests can be sent using chunked encoding' '
GIT_TRACE_CURL=true git -c http.postbuffer=65536 \
clone --bare "$HTTPD_URL/smart/repo.git" split.git 2>err &&
- grep "^=> Send header: Transfer-Encoding: chunked" err
+ {
+ test_have_prereq HTTP2 ||
+ grep "^=> Send header: Transfer-Encoding: chunked" err
+ }
'
test_expect_success 'test allowreachablesha1inwant' '
@@ -580,4 +596,81 @@ test_expect_success 'passing hostname resolution information works' '
git -c "http.curloptResolve=$BOGUS_HOST:$LIB_HTTPD_PORT:127.0.0.1" ls-remote "$BOGUS_HTTPD_URL/smart/repo.git" >/dev/null
'
+# here user%40host is the URL-encoded version of user@host,
+# which is our intentionally-odd username to catch parsing errors
+url_user=$HTTPD_URL_USER/auth/smart/repo.git
+url_userpass=$HTTPD_URL_USER_PASS/auth/smart/repo.git
+url_userblank=$HTTPD_PROTO://user%40host:@$HTTPD_DEST/auth/smart/repo.git
+message="URL .*:<redacted>@.* uses plaintext credentials"
+
+test_expect_success 'clone warns or fails when using username:password' '
+ test_when_finished "rm -rf attempt*" &&
+
+ git -c transfer.credentialsInUrl=allow \
+ clone $url_userpass attempt1 2>err &&
+ ! grep "$message" err &&
+
+ git -c transfer.credentialsInUrl=warn \
+ clone $url_userpass attempt2 2>err &&
+ grep "warning: $message" err >warnings &&
+ test_line_count -ge 1 warnings &&
+
+ test_must_fail git -c transfer.credentialsInUrl=die \
+ clone $url_userpass attempt3 2>err &&
+ grep "fatal: $message" err >warnings &&
+ test_line_count -ge 1 warnings &&
+
+ test_must_fail git -c transfer.credentialsInUrl=die \
+ clone $url_userblank attempt4 2>err &&
+ grep "fatal: $message" err >warnings &&
+ test_line_count -ge 1 warnings
+'
+
+test_expect_success 'clone does not detect username:password when it is https://username@domain:port/' '
+ test_when_finished "rm -rf attempt1" &&
+
+ # we are relying on lib-httpd for url construction, so document our
+ # assumptions
+ case "$HTTPD_URL_USER" in
+ *:[0-9]*) : ok ;;
+ *) BUG "httpd url does not have port: $HTTPD_URL_USER"
+ esac &&
+
+ git -c transfer.credentialsInUrl=warn clone $url_user attempt1 2>err &&
+ ! grep "uses plaintext credentials" err
+'
+
+test_expect_success 'fetch warns or fails when using username:password' '
+ git -c transfer.credentialsInUrl=allow fetch $url_userpass 2>err &&
+ ! grep "$message" err &&
+
+ git -c transfer.credentialsInUrl=warn fetch $url_userpass 2>err &&
+ grep "warning: $message" err >warnings &&
+ test_line_count -ge 1 warnings &&
+
+ test_must_fail git -c transfer.credentialsInUrl=die \
+ fetch $url_userpass 2>err &&
+ grep "fatal: $message" err >warnings &&
+ test_line_count -ge 1 warnings &&
+
+ test_must_fail git -c transfer.credentialsInUrl=die \
+ fetch $url_userblank 2>err &&
+ grep "fatal: $message" err >warnings &&
+ test_line_count -ge 1 warnings
+'
+
+
+test_expect_success 'push warns or fails when using username:password' '
+ git -c transfer.credentialsInUrl=allow push $url_userpass 2>err &&
+ ! grep "$message" err &&
+
+ git -c transfer.credentialsInUrl=warn push $url_userpass 2>err &&
+ grep "warning: $message" err >warnings &&
+
+ test_must_fail git -c transfer.credentialsInUrl=die \
+ push $url_userpass 2>err &&
+ grep "fatal: $message" err >warnings &&
+ test_line_count -ge 1 warnings
+'
+
test_done
diff --git a/t/t5554-noop-fetch-negotiator.sh b/t/t5554-noop-fetch-negotiator.sh
index 2ac7b5859e..06991e8e8a 100755
--- a/t/t5554-noop-fetch-negotiator.sh
+++ b/t/t5554-noop-fetch-negotiator.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test noop fetch negotiator'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'noop negotiator does not emit any "have"' '
diff --git a/t/t5558-clone-bundle-uri.sh b/t/t5558-clone-bundle-uri.sh
index ad666a2d28..3e35322155 100755
--- a/t/t5558-clone-bundle-uri.sh
+++ b/t/t5558-clone-bundle-uri.sh
@@ -2,6 +2,13 @@
test_description='test fetching bundles with --bundle-uri'
+# If GIT_TEST_PACKED_REFS_VERSION=2, then the packed-refs file will
+# be written in v2 format without extensions.refFormat=packed-v2. This
+# causes issues for the HTTP server which does not carry over the
+# environment variable to the server process.
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
. ./test-lib.sh
test_expect_success 'fail to clone from non-existent file' '
@@ -41,6 +48,215 @@ test_expect_success 'clone with file:// bundle' '
test_cmp expect actual
'
+# To get interesting tests for bundle lists, we need to construct a
+# somewhat-interesting commit history.
+#
+# ---------------- bundle-4
+#
+# 4
+# / \
+# ----|---|------- bundle-3
+# | |
+# | 3
+# | |
+# ----|---|------- bundle-2
+# | |
+# 2 |
+# | |
+# ----|---|------- bundle-1
+# \ /
+# 1
+# |
+# (previous commits)
+test_expect_success 'construct incremental bundle list' '
+ (
+ cd clone-from &&
+ git checkout -b base &&
+ test_commit 1 &&
+ git checkout -b left &&
+ test_commit 2 &&
+ git checkout -b right base &&
+ test_commit 3 &&
+ git checkout -b merge left &&
+ git merge right -m "4" &&
+
+ git bundle create bundle-1.bundle base &&
+ git bundle create bundle-2.bundle base..left &&
+ git bundle create bundle-3.bundle base..right &&
+ git bundle create bundle-4.bundle merge --not left right
+ )
+'
+
+test_expect_success 'clone bundle list (file, no heuristic)' '
+ cat >bundle-list <<-EOF &&
+ [bundle]
+ version = 1
+ mode = all
+
+ [bundle "bundle-1"]
+ uri = file://$(pwd)/clone-from/bundle-1.bundle
+
+ [bundle "bundle-2"]
+ uri = file://$(pwd)/clone-from/bundle-2.bundle
+
+ [bundle "bundle-3"]
+ uri = file://$(pwd)/clone-from/bundle-3.bundle
+
+ [bundle "bundle-4"]
+ uri = file://$(pwd)/clone-from/bundle-4.bundle
+ EOF
+
+ git clone --bundle-uri="file://$(pwd)/bundle-list" \
+ clone-from clone-list-file 2>err &&
+ ! grep "Repository lacks these prerequisite commits" err &&
+
+ git -C clone-from for-each-ref --format="%(objectname)" >oids &&
+ git -C clone-list-file cat-file --batch-check <oids &&
+
+ git -C clone-list-file for-each-ref --format="%(refname)" >refs &&
+ grep "refs/bundles/" refs >actual &&
+ cat >expect <<-\EOF &&
+ refs/bundles/base
+ refs/bundles/left
+ refs/bundles/merge
+ refs/bundles/right
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'clone bundle list (file, all mode, some failures)' '
+ cat >bundle-list <<-EOF &&
+ [bundle]
+ version = 1
+ mode = all
+
+ # Does not exist. Should be skipped.
+ [bundle "bundle-0"]
+ uri = file://$(pwd)/clone-from/bundle-0.bundle
+
+ [bundle "bundle-1"]
+ uri = file://$(pwd)/clone-from/bundle-1.bundle
+
+ [bundle "bundle-2"]
+ uri = file://$(pwd)/clone-from/bundle-2.bundle
+
+ # No bundle-3 means bundle-4 will not apply.
+
+ [bundle "bundle-4"]
+ uri = file://$(pwd)/clone-from/bundle-4.bundle
+
+ # Does not exist. Should be skipped.
+ [bundle "bundle-5"]
+ uri = file://$(pwd)/clone-from/bundle-5.bundle
+ EOF
+
+ GIT_TRACE2_PERF=1 \
+ git clone --bundle-uri="file://$(pwd)/bundle-list" \
+ clone-from clone-all-some 2>err &&
+ ! grep "Repository lacks these prerequisite commits" err &&
+ ! grep "fatal" err &&
+ grep "warning: failed to download bundle from URI" err &&
+
+ git -C clone-from for-each-ref --format="%(objectname)" >oids &&
+ git -C clone-all-some cat-file --batch-check <oids &&
+
+ git -C clone-all-some for-each-ref --format="%(refname)" >refs &&
+ grep "refs/bundles/" refs >actual &&
+ cat >expect <<-\EOF &&
+ refs/bundles/base
+ refs/bundles/left
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'clone bundle list (file, all mode, all failures)' '
+ cat >bundle-list <<-EOF &&
+ [bundle]
+ version = 1
+ mode = all
+
+ # Does not exist. Should be skipped.
+ [bundle "bundle-0"]
+ uri = file://$(pwd)/clone-from/bundle-0.bundle
+
+ # Does not exist. Should be skipped.
+ [bundle "bundle-5"]
+ uri = file://$(pwd)/clone-from/bundle-5.bundle
+ EOF
+
+ git clone --bundle-uri="file://$(pwd)/bundle-list" \
+ clone-from clone-all-fail 2>err &&
+ ! grep "Repository lacks these prerequisite commits" err &&
+ ! grep "fatal" err &&
+ grep "warning: failed to download bundle from URI" err &&
+
+ git -C clone-from for-each-ref --format="%(objectname)" >oids &&
+ git -C clone-all-fail cat-file --batch-check <oids &&
+
+ git -C clone-all-fail for-each-ref --format="%(refname)" >refs &&
+ ! grep "refs/bundles/" refs
+'
+
+test_expect_success 'clone bundle list (file, any mode)' '
+ cat >bundle-list <<-EOF &&
+ [bundle]
+ version = 1
+ mode = any
+
+ # Does not exist. Should be skipped.
+ [bundle "bundle-0"]
+ uri = file://$(pwd)/clone-from/bundle-0.bundle
+
+ [bundle "bundle-1"]
+ uri = file://$(pwd)/clone-from/bundle-1.bundle
+
+ # Does not exist. Should be skipped.
+ [bundle "bundle-5"]
+ uri = file://$(pwd)/clone-from/bundle-5.bundle
+ EOF
+
+ git clone --bundle-uri="file://$(pwd)/bundle-list" \
+ clone-from clone-any-file 2>err &&
+ ! grep "Repository lacks these prerequisite commits" err &&
+
+ git -C clone-from for-each-ref --format="%(objectname)" >oids &&
+ git -C clone-any-file cat-file --batch-check <oids &&
+
+ git -C clone-any-file for-each-ref --format="%(refname)" >refs &&
+ grep "refs/bundles/" refs >actual &&
+ cat >expect <<-\EOF &&
+ refs/bundles/base
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'clone bundle list (file, any mode, all failures)' '
+ cat >bundle-list <<-EOF &&
+ [bundle]
+ version = 1
+ mode = any
+
+ # Does not exist. Should be skipped.
+ [bundle "bundle-0"]
+ uri = $HTTPD_URL/bundle-0.bundle
+
+ # Does not exist. Should be skipped.
+ [bundle "bundle-5"]
+ uri = $HTTPD_URL/bundle-5.bundle
+ EOF
+
+ git clone --bundle-uri="file://$(pwd)/bundle-list" \
+ clone-from clone-any-fail 2>err &&
+ ! grep "fatal" err &&
+ grep "warning: failed to download bundle from URI" err &&
+
+ git -C clone-from for-each-ref --format="%(objectname)" >oids &&
+ git -C clone-any-fail cat-file --batch-check <oids &&
+
+ git -C clone-any-fail for-each-ref --format="%(refname)" >refs &&
+ ! grep "refs/bundles/" refs
+'
+
#########################################################################
# HTTP tests begin here
@@ -75,6 +291,72 @@ test_expect_success 'clone HTTP bundle' '
test_config -C clone-http log.excludedecoration refs/bundle/
'
+test_expect_success 'clone bundle list (HTTP, no heuristic)' '
+ cp clone-from/bundle-*.bundle "$HTTPD_DOCUMENT_ROOT_PATH/" &&
+ cat >"$HTTPD_DOCUMENT_ROOT_PATH/bundle-list" <<-EOF &&
+ [bundle]
+ version = 1
+ mode = all
+
+ [bundle "bundle-1"]
+ uri = $HTTPD_URL/bundle-1.bundle
+
+ [bundle "bundle-2"]
+ uri = $HTTPD_URL/bundle-2.bundle
+
+ [bundle "bundle-3"]
+ uri = $HTTPD_URL/bundle-3.bundle
+
+ [bundle "bundle-4"]
+ uri = $HTTPD_URL/bundle-4.bundle
+ EOF
+
+ git clone --bundle-uri="$HTTPD_URL/bundle-list" \
+ clone-from clone-list-http 2>err &&
+ ! grep "Repository lacks these prerequisite commits" err &&
+
+ git -C clone-from for-each-ref --format="%(objectname)" >oids &&
+ git -C clone-list-http cat-file --batch-check <oids
+'
+
+test_expect_success 'clone bundle list (HTTP, any mode)' '
+ cp clone-from/bundle-*.bundle "$HTTPD_DOCUMENT_ROOT_PATH/" &&
+ cat >"$HTTPD_DOCUMENT_ROOT_PATH/bundle-list" <<-EOF &&
+ [bundle]
+ version = 1
+ mode = any
+
+ # Does not exist. Should be skipped.
+ [bundle "bundle-0"]
+ uri = $HTTPD_URL/bundle-0.bundle
+
+ [bundle "bundle-1"]
+ uri = $HTTPD_URL/bundle-1.bundle
+
+ # Does not exist. Should be skipped.
+ [bundle "bundle-5"]
+ uri = $HTTPD_URL/bundle-5.bundle
+ EOF
+
+ git clone --bundle-uri="$HTTPD_URL/bundle-list" \
+ clone-from clone-any-http 2>err &&
+ ! grep "fatal" err &&
+ grep "warning: failed to download bundle from URI" err &&
+
+ git -C clone-from for-each-ref --format="%(objectname)" >oids &&
+ git -C clone-any-http cat-file --batch-check <oids &&
+
+ git -C clone-list-file for-each-ref --format="%(refname)" >refs &&
+ grep "refs/bundles/" refs >actual &&
+ cat >expect <<-\EOF &&
+ refs/bundles/base
+ refs/bundles/left
+ refs/bundles/merge
+ refs/bundles/right
+ EOF
+ test_cmp expect actual
+'
+
# Do not add tests here unless they use the HTTP server, as they will
# not run unless the HTTP dependencies exist.
diff --git a/t/t5559-http-fetch-smart-http2.sh b/t/t5559-http-fetch-smart-http2.sh
new file mode 100755
index 0000000000..9eece71c2c
--- /dev/null
+++ b/t/t5559-http-fetch-smart-http2.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+HTTP_PROTO=HTTP/2
+. ./t5551-http-fetch-smart.sh
diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh
index 45f0803ed4..23c8999bc5 100755
--- a/t/t5601-clone.sh
+++ b/t/t5601-clone.sh
@@ -71,29 +71,6 @@ test_expect_success 'clone respects GIT_WORK_TREE' '
'
-test_expect_success LIBCURL 'clone warns or fails when using username:password' '
- message="URL '\''https://username:<redacted>@localhost/'\'' uses plaintext credentials" &&
- test_must_fail git -c transfer.credentialsInUrl=allow clone https://username:password@localhost attempt1 2>err &&
- ! grep "$message" err &&
-
- test_must_fail git -c transfer.credentialsInUrl=warn clone https://username:password@localhost attempt2 2>err &&
- grep "warning: $message" err >warnings &&
- test_line_count = 2 warnings &&
-
- test_must_fail git -c transfer.credentialsInUrl=die clone https://username:password@localhost attempt3 2>err &&
- grep "fatal: $message" err >warnings &&
- test_line_count = 1 warnings &&
-
- test_must_fail git -c transfer.credentialsInUrl=die clone https://username:@localhost attempt3 2>err &&
- grep "fatal: $message" err >warnings &&
- test_line_count = 1 warnings
-'
-
-test_expect_success LIBCURL 'clone does not detect username:password when it is https://username@domain:port/' '
- test_must_fail git -c transfer.credentialsInUrl=warn clone https://username@localhost:8080 attempt3 2>err &&
- ! grep "uses plaintext credentials" err
-'
-
test_expect_success 'clone from hooks' '
test_create_repo r0 &&
@@ -333,6 +310,28 @@ test_expect_success 'clone checking out a tag' '
test_cmp fetch.expected fetch.actual
'
+test_expect_success '--detach detaches and does not create branch' '
+ test_when_finished "rm -fr dst" &&
+ git clone --detach src dst &&
+ (
+ cd dst &&
+ test_must_fail git rev-parse main &&
+ test_must_fail git symbolic-ref HEAD &&
+ test_cmp_rev HEAD refs/remotes/origin/HEAD
+ )
+'
+
+test_expect_success '--detach with --bare detaches but creates branch' '
+ test_when_finished "rm -fr dst" &&
+ git clone --bare --detach src dst &&
+ (
+ cd dst &&
+ git rev-parse main &&
+ test_must_fail git symbolic-ref HEAD &&
+ test_cmp_rev HEAD refs/heads/main
+ )
+'
+
test_expect_success 'set up ssh wrapper' '
cp "$GIT_BUILD_DIR/t/helper/test-fake-ssh$X" \
"$TRASH_DIRECTORY/ssh$X" &&
diff --git a/t/t5606-clone-options.sh b/t/t5606-clone-options.sh
index f6bb02ab94..cf221e92c4 100755
--- a/t/t5606-clone-options.sh
+++ b/t/t5606-clone-options.sh
@@ -42,11 +42,12 @@ test_expect_success 'rejects invalid -o/--origin' '
'
-test_expect_success 'disallows --bare with --origin' '
+test_expect_success 'clone --bare -o' '
- test_must_fail git clone -o foo --bare parent clone-bare-o 2>err &&
- test_debug "cat err" &&
- test_i18ngrep -e "options .--bare. and .--origin foo. cannot be used together" err
+ git clone -o foo --bare parent clone-bare-o &&
+ (cd parent && pwd) >expect &&
+ git -C clone-bare-o config remote.foo.url >actual &&
+ test_cmp expect actual
'
diff --git a/t/t5610-clone-detached.sh b/t/t5610-clone-detached.sh
index a7ec21eda5..022ed3d87c 100755
--- a/t/t5610-clone-detached.sh
+++ b/t/t5610-clone-detached.sh
@@ -4,6 +4,7 @@ test_description='test cloning a repository with detached HEAD'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
head_is_detached() {
diff --git a/t/t5611-clone-config.sh b/t/t5611-clone-config.sh
index 4b3877216e..727caff443 100755
--- a/t/t5611-clone-config.sh
+++ b/t/t5611-clone-config.sh
@@ -4,6 +4,7 @@ test_description='tests for git clone -c key=value'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'clone -c sets config in cloned repo' '
diff --git a/t/t5614-clone-submodules-shallow.sh b/t/t5614-clone-submodules-shallow.sh
index 0c85ef834a..c2a2bb453e 100755
--- a/t/t5614-clone-submodules-shallow.sh
+++ b/t/t5614-clone-submodules-shallow.sh
@@ -2,6 +2,7 @@
test_description='Test shallow cloning of repos with submodules'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
pwd=$(pwd)
diff --git a/t/t5617-clone-submodules-remote.sh b/t/t5617-clone-submodules.sh
index 6884338249..5767c4d318 100755
--- a/t/t5617-clone-submodules-remote.sh
+++ b/t/t5617-clone-submodules.sh
@@ -1,10 +1,11 @@
#!/bin/sh
-test_description='Test cloning repos with submodules using remote-tracking branches'
+test_description='Test cloning repos with submodules'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
pwd=$(pwd)
@@ -13,10 +14,17 @@ test_expect_success 'setup' '
git config --global protocol.file.allow always &&
git checkout -b main &&
test_commit commit1 &&
+ mkdir subsub &&
+ (
+ cd subsub &&
+ git init &&
+ test_commit subsubcommit1
+ ) &&
mkdir sub &&
(
cd sub &&
git init &&
+ git submodule add "file://$pwd/subsub" subsub &&
test_commit subcommit1 &&
git tag sub_when_added_to_super &&
git branch other
@@ -107,4 +115,35 @@ test_expect_success '--no-also-filter-submodules overrides clone.filterSubmodule
test_cmp_config -C super_clone3/sub false --default false remote.origin.promisor
'
+test_expect_success 'submodule.propagateBranches checks out branches at correct commits' '
+ test_when_finished "git checkout main" &&
+
+ git checkout -b checked-out &&
+ git -C sub checkout -b not-in-clone &&
+ git -C subsub checkout -b not-in-clone &&
+ git clone --recurse-submodules \
+ --branch checked-out \
+ -c submodule.propagateBranches=true \
+ "file://$pwd/." super_clone4 &&
+
+ # Assert that each repo is pointing to "checked-out"
+ for REPO in "super_clone4" "super_clone4/sub" "super_clone4/sub/subsub"
+ do
+ HEAD_BRANCH=$(git -C $REPO symbolic-ref HEAD) &&
+ test $HEAD_BRANCH = "refs/heads/checked-out" || return 1
+ done &&
+
+ # Assert that the submodule branches are pointing to the right revs
+ EXPECT_SUB_OID="$(git -C super_clone4 rev-parse :sub)" &&
+ ACTUAL_SUB_OID="$(git -C super_clone4/sub rev-parse refs/heads/checked-out)" &&
+ test $EXPECT_SUB_OID = $ACTUAL_SUB_OID &&
+ EXPECT_SUBSUB_OID="$(git -C super_clone4/sub rev-parse :subsub)" &&
+ ACTUAL_SUBSUB_OID="$(git -C super_clone4/sub/subsub rev-parse refs/heads/checked-out)" &&
+ test $EXPECT_SUBSUB_OID = $ACTUAL_SUBSUB_OID &&
+
+ # Assert that the submodules do not have branches from their upstream
+ test_must_fail git -C super_clone4/sub rev-parse not-in-clone &&
+ test_must_fail git -C super_clone4/sub/subsub rev-parse not-in-clone
+'
+
test_done
diff --git a/t/t5618-alternate-refs.sh b/t/t5618-alternate-refs.sh
index 3353216f09..f905db0a3f 100755
--- a/t/t5618-alternate-refs.sh
+++ b/t/t5618-alternate-refs.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test handling of --alternate-refs traversal'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Avoid test_commit because we want a specific and known set of refs:
diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh
index 5d42a355a8..b33cd4afca 100755
--- a/t/t5702-protocol-v2.sh
+++ b/t/t5702-protocol-v2.sh
@@ -1001,7 +1001,7 @@ test_expect_success 'part of packfile response provided as URI' '
do
git verify-pack --object-format=$(test_oid algo) --verbose $idx >out &&
{
- grep "^[0-9a-f]\{16,\} " out || :
+ grep -E "^[0-9a-f]{16,} " out || :
} >out.objectlist &&
if test_line_count = 1 out.objectlist
then
diff --git a/t/t5750-bundle-uri-parse.sh b/t/t5750-bundle-uri-parse.sh
new file mode 100755
index 0000000000..c2fe3f9c5a
--- /dev/null
+++ b/t/t5750-bundle-uri-parse.sh
@@ -0,0 +1,171 @@
+#!/bin/sh
+
+test_description="Test bundle-uri bundle_uri_parse_line()"
+
+TEST_NO_CREATE_REPO=1
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+
+test_expect_success 'bundle_uri_parse_line() just URIs' '
+ cat >in <<-\EOF &&
+ bundle.one.uri=http://example.com/bundle.bdl
+ bundle.two.uri=https://example.com/bundle.bdl
+ bundle.three.uri=file:///usr/share/git/bundle.bdl
+ EOF
+
+ cat >expect <<-\EOF &&
+ [bundle]
+ version = 1
+ mode = all
+ [bundle "one"]
+ uri = http://example.com/bundle.bdl
+ [bundle "two"]
+ uri = https://example.com/bundle.bdl
+ [bundle "three"]
+ uri = file:///usr/share/git/bundle.bdl
+ EOF
+
+ test-tool bundle-uri parse-key-values in >actual 2>err &&
+ test_must_be_empty err &&
+ test_cmp_config_output expect actual
+'
+
+test_expect_success 'bundle_uri_parse_line() parsing edge cases: empty key or value' '
+ cat >in <<-\EOF &&
+ =bogus-value
+ bogus-key=
+ EOF
+
+ cat >err.expect <<-EOF &&
+ error: bundle-uri: line has empty key or value
+ error: bad line: '\''=bogus-value'\''
+ error: bundle-uri: line has empty key or value
+ error: bad line: '\''bogus-key='\''
+ EOF
+
+ cat >expect <<-\EOF &&
+ [bundle]
+ version = 1
+ mode = all
+ EOF
+
+ test_must_fail test-tool bundle-uri parse-key-values in >actual 2>err &&
+ test_cmp err.expect err &&
+ test_cmp_config_output expect actual
+'
+
+test_expect_success 'bundle_uri_parse_line() parsing edge cases: empty lines' '
+ cat >in <<-\EOF &&
+ bundle.one.uri=http://example.com/bundle.bdl
+
+ bundle.two.uri=https://example.com/bundle.bdl
+
+ bundle.three.uri=file:///usr/share/git/bundle.bdl
+ EOF
+
+ cat >err.expect <<-\EOF &&
+ error: bundle-uri: got an empty line
+ error: bad line: '\'''\''
+ error: bundle-uri: got an empty line
+ error: bad line: '\'''\''
+ EOF
+
+ # We fail, but try to continue parsing regardless
+ cat >expect <<-\EOF &&
+ [bundle]
+ version = 1
+ mode = all
+ [bundle "one"]
+ uri = http://example.com/bundle.bdl
+ [bundle "two"]
+ uri = https://example.com/bundle.bdl
+ [bundle "three"]
+ uri = file:///usr/share/git/bundle.bdl
+ EOF
+
+ test_must_fail test-tool bundle-uri parse-key-values in >actual 2>err &&
+ test_cmp err.expect err &&
+ test_cmp_config_output expect actual
+'
+
+test_expect_success 'bundle_uri_parse_line() parsing edge cases: duplicate lines' '
+ cat >in <<-\EOF &&
+ bundle.one.uri=http://example.com/bundle.bdl
+ bundle.two.uri=https://example.com/bundle.bdl
+ bundle.one.uri=https://example.com/bundle-2.bdl
+ bundle.three.uri=file:///usr/share/git/bundle.bdl
+ EOF
+
+ cat >err.expect <<-\EOF &&
+ error: bad line: '\''bundle.one.uri=https://example.com/bundle-2.bdl'\''
+ EOF
+
+ # We fail, but try to continue parsing regardless
+ cat >expect <<-\EOF &&
+ [bundle]
+ version = 1
+ mode = all
+ [bundle "one"]
+ uri = http://example.com/bundle.bdl
+ [bundle "two"]
+ uri = https://example.com/bundle.bdl
+ [bundle "three"]
+ uri = file:///usr/share/git/bundle.bdl
+ EOF
+
+ test_must_fail test-tool bundle-uri parse-key-values in >actual 2>err &&
+ test_cmp err.expect err &&
+ test_cmp_config_output expect actual
+'
+
+test_expect_success 'parse config format: just URIs' '
+ cat >expect <<-\EOF &&
+ [bundle]
+ version = 1
+ mode = all
+ [bundle "one"]
+ uri = http://example.com/bundle.bdl
+ [bundle "two"]
+ uri = https://example.com/bundle.bdl
+ [bundle "three"]
+ uri = file:///usr/share/git/bundle.bdl
+ EOF
+
+ test-tool bundle-uri parse-config expect >actual 2>err &&
+ test_must_be_empty err &&
+ test_cmp_config_output expect actual
+'
+
+test_expect_success 'parse config format edge cases: empty key or value' '
+ cat >in1 <<-\EOF &&
+ = bogus-value
+ EOF
+
+ cat >err1 <<-EOF &&
+ error: bad config line 1 in file in1
+ EOF
+
+ cat >expect <<-\EOF &&
+ [bundle]
+ version = 1
+ mode = all
+ EOF
+
+ test_must_fail test-tool bundle-uri parse-config in1 >actual 2>err &&
+ test_cmp err1 err &&
+ test_cmp_config_output expect actual &&
+
+ cat >in2 <<-\EOF &&
+ bogus-key =
+ EOF
+
+ cat >err2 <<-EOF &&
+ error: bad config line 1 in file in2
+ EOF
+
+ test_must_fail test-tool bundle-uri parse-config in2 >actual 2>err &&
+ test_cmp err2 err &&
+ test_cmp_config_output expect actual
+'
+
+test_done
diff --git a/t/t6018-rev-list-glob.sh b/t/t6018-rev-list-glob.sh
index e1abc5c2b3..aabf590dda 100755
--- a/t/t6018-rev-list-glob.sh
+++ b/t/t6018-rev-list-glob.sh
@@ -187,6 +187,46 @@ test_expect_success 'rev-parse --exclude=ref with --remotes=glob' '
compare rev-parse "--exclude=upstream/x --remotes=upstream/*" "upstream/one upstream/two"
'
+for section in receive uploadpack
+do
+ test_expect_success "rev-parse --exclude-hidden=$section with --all" '
+ compare "-c transfer.hideRefs=refs/remotes/ rev-parse" "--branches --tags" "--exclude-hidden=$section --all"
+ '
+
+ test_expect_success "rev-parse --exclude-hidden=$section with --all" '
+ compare "-c transfer.hideRefs=refs/heads/subspace/ rev-parse" "--exclude=refs/heads/subspace/* --all" "--exclude-hidden=$section --all"
+ '
+
+ test_expect_success "rev-parse --exclude-hidden=$section with --glob" '
+ compare "-c transfer.hideRefs=refs/heads/subspace/ rev-parse" "--exclude=refs/heads/subspace/* --glob=refs/heads/*" "--exclude-hidden=$section --glob=refs/heads/*"
+ '
+
+ test_expect_success "rev-parse --exclude-hidden=$section can be passed once per pseudo-ref" '
+ compare "-c transfer.hideRefs=refs/remotes/ rev-parse" "--branches --tags --branches --tags" "--exclude-hidden=$section --all --exclude-hidden=$section --all"
+ '
+
+ test_expect_success "rev-parse --exclude-hidden=$section can only be passed once per pseudo-ref" '
+ echo "fatal: --exclude-hidden= passed more than once" >expected &&
+ test_must_fail git rev-parse --exclude-hidden=$section --exclude-hidden=$section 2>err &&
+ test_cmp expected err
+ '
+
+ for pseudoopt in branches tags remotes
+ do
+ test_expect_success "rev-parse --exclude-hidden=$section fails with --$pseudoopt" '
+ echo "error: --exclude-hidden cannot be used together with --$pseudoopt" >expected &&
+ test_must_fail git rev-parse --exclude-hidden=$section --$pseudoopt 2>err &&
+ test_cmp expected err
+ '
+
+ test_expect_success "rev-parse --exclude-hidden=$section fails with --$pseudoopt=pattern" '
+ echo "error: --exclude-hidden cannot be used together with --$pseudoopt" >expected &&
+ test_must_fail git rev-parse --exclude-hidden=$section --$pseudoopt=pattern 2>err &&
+ test_cmp expected err
+ '
+ done
+done
+
test_expect_success 'rev-list --exclude=glob with --branches=glob' '
compare rev-list "--exclude=subspace-* --branches=sub*" "subspace/one subspace/two"
'
diff --git a/t/t6021-rev-list-exclude-hidden.sh b/t/t6021-rev-list-exclude-hidden.sh
new file mode 100755
index 0000000000..32b2b09413
--- /dev/null
+++ b/t/t6021-rev-list-exclude-hidden.sh
@@ -0,0 +1,163 @@
+#!/bin/sh
+
+test_description='git rev-list --exclude-hidden test'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit_bulk --id=commit --ref=refs/heads/branch 1 &&
+ COMMIT=$(git rev-parse refs/heads/branch) &&
+ test_commit_bulk --id=tag --ref=refs/tags/lightweight 1 &&
+ TAG=$(git rev-parse refs/tags/lightweight) &&
+ test_commit_bulk --id=hidden --ref=refs/hidden/commit 1 &&
+ HIDDEN=$(git rev-parse refs/hidden/commit) &&
+ test_commit_bulk --id=namespace --ref=refs/namespaces/namespace/refs/namespaced/commit 1 &&
+ NAMESPACE=$(git rev-parse refs/namespaces/namespace/refs/namespaced/commit)
+'
+
+test_expect_success 'invalid section' '
+ echo "fatal: unsupported section for hidden refs: unsupported" >expected &&
+ test_must_fail git rev-list --exclude-hidden=unsupported 2>err &&
+ test_cmp expected err
+'
+
+for section in receive uploadpack
+do
+ test_expect_success "$section: passed multiple times" '
+ echo "fatal: --exclude-hidden= passed more than once" >expected &&
+ test_must_fail git rev-list --exclude-hidden=$section --exclude-hidden=$section 2>err &&
+ test_cmp expected err
+ '
+
+ test_expect_success "$section: without hiddenRefs" '
+ git rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $HIDDEN
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: hidden via transfer.hideRefs" '
+ git -c transfer.hideRefs=refs/hidden/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: hidden via $section.hideRefs" '
+ git -c $section.hideRefs=refs/hidden/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: respects both transfer.hideRefs and $section.hideRefs" '
+ git -c transfer.hideRefs=refs/tags/ -c $section.hideRefs=refs/hidden/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: negation without hidden refs marks everything as uninteresting" '
+ git rev-list --all --exclude-hidden=$section --not --all >out &&
+ test_must_be_empty out
+ '
+
+ test_expect_success "$section: negation with hidden refs marks them as interesting" '
+ git -c transfer.hideRefs=refs/hidden/ rev-list --all --exclude-hidden=$section --not --all >out &&
+ cat >expected <<-EOF &&
+ $HIDDEN
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: hidden refs and excludes work together" '
+ git -c transfer.hideRefs=refs/hidden/ rev-list --exclude=refs/tags/* --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: excluded hidden refs get reset" '
+ git -c transfer.hideRefs=refs/ rev-list --exclude-hidden=$section --all --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $HIDDEN
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: excluded hidden refs can be used with multiple pseudo-refs" '
+ git -c transfer.hideRefs=refs/ rev-list --exclude-hidden=$section --all --exclude-hidden=$section --all >out &&
+ test_must_be_empty out
+ '
+
+ test_expect_success "$section: works with --glob" '
+ git -c transfer.hideRefs=refs/hidden/ rev-list --exclude-hidden=$section --glob=refs/h* >out &&
+ cat >expected <<-EOF &&
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: operates on stripped refs by default" '
+ GIT_NAMESPACE=namespace git -c transfer.hideRefs=refs/namespaced/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $HIDDEN
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: does not hide namespace by default" '
+ GIT_NAMESPACE=namespace git -c transfer.hideRefs=refs/namespaces/namespace/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $HIDDEN
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: can operate on unstripped refs" '
+ GIT_NAMESPACE=namespace git -c transfer.hideRefs=^refs/namespaces/namespace/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $HIDDEN
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ for pseudoopt in remotes branches tags
+ do
+ test_expect_success "$section: fails with --$pseudoopt" '
+ test_must_fail git rev-list --exclude-hidden=$section --$pseudoopt 2>err &&
+ test_i18ngrep "error: --exclude-hidden cannot be used together with --$pseudoopt" err
+ '
+
+ test_expect_success "$section: fails with --$pseudoopt=pattern" '
+ test_must_fail git rev-list --exclude-hidden=$section --$pseudoopt=pattern 2>err &&
+ test_i18ngrep "error: --exclude-hidden cannot be used together with --$pseudoopt" err
+ '
+ done
+done
+
+test_done
diff --git a/t/t6030-bisect-porcelain.sh b/t/t6030-bisect-porcelain.sh
index 83931d482f..98a72ff78a 100755
--- a/t/t6030-bisect-porcelain.sh
+++ b/t/t6030-bisect-porcelain.sh
@@ -34,6 +34,36 @@ HASH2=
HASH3=
HASH4=
+test_bisect_usage () {
+ local code="$1" &&
+ shift &&
+ cat >expect &&
+ test_expect_code $code "$@" >out 2>actual &&
+ test_must_be_empty out &&
+ test_cmp expect actual
+}
+
+test_expect_success 'bisect usage' "
+ test_bisect_usage 1 git bisect reset extra1 extra2 <<-\EOF &&
+ error: 'git bisect reset' requires either no argument or a commit
+ EOF
+ test_bisect_usage 1 git bisect terms extra1 extra2 <<-\EOF &&
+ error: 'git bisect terms' requires 0 or 1 argument
+ EOF
+ test_bisect_usage 1 git bisect next extra1 <<-\EOF &&
+ error: 'git bisect next' requires 0 arguments
+ EOF
+ test_bisect_usage 1 git bisect log extra1 <<-\EOF &&
+ error: We are not bisecting.
+ EOF
+ test_bisect_usage 1 git bisect replay <<-\EOF &&
+ error: no logfile given
+ EOF
+ test_bisect_usage 1 git bisect run <<-\EOF
+ error: 'git bisect run' failed: no command provided.
+ EOF
+"
+
test_expect_success 'set up basic repo with 1 file (hello) and 4 commits' '
add_line_into_file "1: Hello World" hello &&
HASH1=$(git rev-parse --verify HEAD) &&
@@ -252,6 +282,124 @@ test_expect_success 'bisect skip: with commit both bad and skipped' '
grep $HASH4 my_bisect_log.txt
'
+test_bisect_run_args () {
+ test_when_finished "rm -f run.sh actual" &&
+ >actual &&
+ cat >expect.args &&
+ cat <&6 >expect.out &&
+ cat <&7 >expect.err &&
+ write_script run.sh <<-\EOF &&
+ while test $# != 0
+ do
+ echo "<$1>" &&
+ shift
+ done >actual.args
+ EOF
+
+ test_when_finished "git bisect reset" &&
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ git bisect run ./run.sh $@ >actual.out.raw 2>actual.err &&
+ # Prune just the log output
+ sed -n \
+ -e '/^Author:/d' \
+ -e '/^Date:/d' \
+ -e '/^$/d' \
+ -e '/^commit /d' \
+ -e '/^ /d' \
+ -e 'p' \
+ <actual.out.raw >actual.out &&
+ test_cmp expect.out actual.out &&
+ test_cmp expect.err actual.err &&
+ test_cmp expect.args actual.args
+}
+
+test_expect_success 'git bisect run: args, stdout and stderr with no arguments' "
+ test_bisect_run_args <<-'EOF_ARGS' 6<<-EOF_OUT 7<<-'EOF_ERR'
+ EOF_ARGS
+ running './run.sh'
+ $HASH4 is the first bad commit
+ bisect found first bad commit
+ EOF_OUT
+ EOF_ERR
+"
+
+test_expect_success 'git bisect run: args, stdout and stderr: "--" argument' "
+ test_bisect_run_args -- <<-'EOF_ARGS' 6<<-EOF_OUT 7<<-'EOF_ERR'
+ <-->
+ EOF_ARGS
+ running './run.sh' '--'
+ $HASH4 is the first bad commit
+ bisect found first bad commit
+ EOF_OUT
+ EOF_ERR
+"
+
+test_expect_success 'git bisect run: args, stdout and stderr: "--log foo --no-log bar" arguments' "
+ test_bisect_run_args --log foo --no-log bar <<-'EOF_ARGS' 6<<-EOF_OUT 7<<-'EOF_ERR'
+ <--log>
+ <foo>
+ <--no-log>
+ <bar>
+ EOF_ARGS
+ running './run.sh' '--log' 'foo' '--no-log' 'bar'
+ $HASH4 is the first bad commit
+ bisect found first bad commit
+ EOF_OUT
+ EOF_ERR
+"
+
+test_expect_success 'git bisect run: args, stdout and stderr: "--bisect-start" argument' "
+ test_bisect_run_args --bisect-start <<-'EOF_ARGS' 6<<-EOF_OUT 7<<-'EOF_ERR'
+ <--bisect-start>
+ EOF_ARGS
+ running './run.sh' '--bisect-start'
+ $HASH4 is the first bad commit
+ bisect found first bad commit
+ EOF_OUT
+ EOF_ERR
+"
+
+test_expect_success 'git bisect run: negative exit code' "
+ write_script fail.sh <<-'EOF' &&
+ exit 255
+ EOF
+ cat <<-'EOF' >expect &&
+ bisect run failed: exit code -1 from './fail.sh' is < 0 or >= 128
+ EOF
+ test_when_finished 'git bisect reset' &&
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ ! git bisect run ./fail.sh 2>err &&
+ sed -En 's/.*(bisect.*code) (-?[0-9]+) (from.*)/\1 -1 \3/p' err >actual &&
+ test_cmp expect actual
+"
+
+test_expect_success 'git bisect run: unable to verify on good' "
+ write_script fail.sh <<-'EOF' &&
+ head=\$(git rev-parse --verify HEAD)
+ good=\$(git rev-parse --verify $HASH1)
+ if test "\$head" = "\$good"
+ then
+ exit 255
+ else
+ exit 127
+ fi
+ EOF
+ cat <<-'EOF' >expect &&
+ unable to verify './fail.sh' on good revision
+ EOF
+ test_when_finished 'git bisect reset' &&
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ ! git bisect run ./fail.sh 2>err &&
+ sed -n 's/.*\(unable to verify.*\)/\1/p' err >actual &&
+ test_cmp expect actual
+"
+
# We want to automatically find the commit that
# added "Another" into hello.
test_expect_success '"git bisect run" simple case' '
@@ -266,6 +414,16 @@ test_expect_success '"git bisect run" simple case' '
git bisect reset
'
+# We want to make sure no arguments has been eaten
+test_expect_success '"git bisect run" simple case' '
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ git bisect run printf "%s %s\n" reset --bisect-skip >my_bisect_log.txt &&
+ grep -e "reset --bisect-skip" my_bisect_log.txt &&
+ git bisect reset
+'
+
# We want to automatically find the commit that
# added "Ciao" into hello.
test_expect_success '"git bisect run" with more complex "git bisect start"' '
diff --git a/t/t6060-merge-index.sh b/t/t6060-merge-index.sh
index ed449abe55..1a8b64cce1 100755
--- a/t/t6060-merge-index.sh
+++ b/t/t6060-merge-index.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='basic git merge-index / git-merge-one-file tests'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup diverging branches' '
diff --git a/t/t6102-rev-list-unexpected-objects.sh b/t/t6102-rev-list-unexpected-objects.sh
index 4a9a4436e2..9350b5fd2c 100755
--- a/t/t6102-rev-list-unexpected-objects.sh
+++ b/t/t6102-rev-list-unexpected-objects.sh
@@ -121,8 +121,8 @@ test_expect_success 'setup unexpected non-blob tag' '
tag=$(git hash-object -w --literally -t tag broken-tag)
'
-test_expect_success 'TODO (should fail!): traverse unexpected non-blob tag (lone)' '
- git rev-list --objects $tag
+test_expect_success 'traverse unexpected non-blob tag (lone)' '
+ test_must_fail git rev-list --objects $tag
'
test_expect_success 'traverse unexpected non-blob tag (seen)' '
diff --git a/t/t6300-for-each-ref.sh b/t/t6300-for-each-ref.sh
index dcaab7265f..fa38b87441 100755
--- a/t/t6300-for-each-ref.sh
+++ b/t/t6300-for-each-ref.sh
@@ -1406,4 +1406,44 @@ test_expect_success 'for-each-ref reports broken tags' '
refs/tags/broken-tag-*
'
+test_expect_success 'set up tag with signature and no blank lines' '
+ git tag -F - fake-sig-no-blanks <<-\EOF
+ this is the subject
+ -----BEGIN PGP SIGNATURE-----
+ not a real signature, but we just care about the
+ subject/body parsing. It is important here that
+ there are no blank lines in the signature.
+ -----END PGP SIGNATURE-----
+ EOF
+'
+
+test_atom refs/tags/fake-sig-no-blanks contents:subject 'this is the subject'
+test_atom refs/tags/fake-sig-no-blanks contents:body ''
+test_atom refs/tags/fake-sig-no-blanks contents:signature "$sig"
+
+test_expect_success 'set up tag with CRLF signature' '
+ append_cr <<-\EOF |
+ this is the subject
+ -----BEGIN PGP SIGNATURE-----
+
+ not a real signature, but we just care about
+ the subject/body parsing. It is important here
+ that there is a blank line separating this
+ from the signature header.
+ -----END PGP SIGNATURE-----
+ EOF
+ git tag -F - --cleanup=verbatim fake-sig-crlf
+'
+
+test_atom refs/tags/fake-sig-crlf contents:subject 'this is the subject'
+test_atom refs/tags/fake-sig-crlf contents:body ''
+
+# CRLF is retained in the signature, so we have to pass our expected value
+# through append_cr. But test_atom requires a shell string, which means command
+# substitution, and the shell will strip trailing newlines from the output of
+# the substitution. Hack around it by adding and then removing a dummy line.
+sig_crlf="$(printf "%s" "$sig" | append_cr; echo dummy)"
+sig_crlf=${sig_crlf%dummy}
+test_atom refs/tags/fake-sig-crlf contents:signature "$sig_crlf"
+
test_done
diff --git a/t/t6301-for-each-ref-errors.sh b/t/t6301-for-each-ref-errors.sh
index 40edf9dab5..bfda1f46ad 100755
--- a/t/t6301-for-each-ref-errors.sh
+++ b/t/t6301-for-each-ref-errors.sh
@@ -2,6 +2,7 @@
test_description='for-each-ref errors for broken refs'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
ZEROS=$ZERO_OID
diff --git a/t/t6401-merge-criss-cross.sh b/t/t6401-merge-criss-cross.sh
index 9d5e992878..1962310408 100755
--- a/t/t6401-merge-criss-cross.sh
+++ b/t/t6401-merge-criss-cross.sh
@@ -8,6 +8,8 @@
test_description='Test criss-cross merge'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'prepare repository' '
diff --git a/t/t6406-merge-attr.sh b/t/t6406-merge-attr.sh
index 8650a88c40..5e4e4dd6d9 100755
--- a/t/t6406-merge-attr.sh
+++ b/t/t6406-merge-attr.sh
@@ -8,6 +8,7 @@ test_description='per path merge controlled by merge attribute'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
diff --git a/t/t6407-merge-binary.sh b/t/t6407-merge-binary.sh
index e8a28717ce..0753fc95f4 100755
--- a/t/t6407-merge-binary.sh
+++ b/t/t6407-merge-binary.sh
@@ -5,6 +5,7 @@ test_description='ask merge-recursive to merge binary files'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
diff --git a/t/t6415-merge-dir-to-symlink.sh b/t/t6415-merge-dir-to-symlink.sh
index 2655e295f5..ae00492c76 100755
--- a/t/t6415-merge-dir-to-symlink.sh
+++ b/t/t6415-merge-dir-to-symlink.sh
@@ -4,6 +4,7 @@ test_description='merging when a directory was replaced with a symlink'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'create a commit where dir a/b changed to symlink' '
diff --git a/t/t6423-merge-rename-directories.sh b/t/t6423-merge-rename-directories.sh
index a4941878fe..944de75b80 100755
--- a/t/t6423-merge-rename-directories.sh
+++ b/t/t6423-merge-rename-directories.sh
@@ -5304,6 +5304,62 @@ test_expect_merge_algorithm failure success '12l (A into B): Rename into each ot
)
'
+# Testcase 12m, Directory rename, plus change of parent dir to symlink
+# Commit O: dir/subdir/file
+# Commit A: renamed-dir/subdir/file
+# Commit B: dir/subdir
+# In words:
+# A: dir/subdir/ -> renamed-dir/subdir
+# B: delete dir/subdir/file, add dir/subdir as symlink
+#
+# Expected: CONFLICT (rename/delete): renamed-dir/subdir/file,
+# CONFLICT (file location): renamed-dir/subdir vs. dir/subdir
+# CONFLICT (directory/file): renamed-dir/subdir symlink has
+# renamed-dir/subdir in the way
+
+test_setup_12m () {
+ git init 12m &&
+ (
+ cd 12m &&
+
+ mkdir -p dir/subdir &&
+ echo 1 >dir/subdir/file &&
+ git add . &&
+ git commit -m "O" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git switch A &&
+ git mv dir/ renamed-dir/ &&
+ git add . &&
+ git commit -m "A" &&
+
+ git switch B &&
+ git rm dir/subdir/file &&
+ mkdir dir &&
+ ln -s /dev/null dir/subdir &&
+ git add . &&
+ git commit -m "B"
+ )
+}
+
+test_expect_merge_algorithm failure success '12m: Change parent of renamed-dir to symlink on other side' '
+ test_setup_12m &&
+ (
+ cd 12m &&
+
+ git checkout -q A^0 &&
+
+ test_must_fail git -c merge.directoryRenames=conflict merge -s recursive B^0 &&
+
+ test_stdout_line_count = 3 git ls-files -s &&
+ test_stdout_line_count = 2 ls -1 renamed-dir &&
+ test_path_is_missing dir
+ )
+'
+
###########################################################################
# SECTION 13: Checking informational and conflict messages
#
diff --git a/t/t6435-merge-sparse.sh b/t/t6435-merge-sparse.sh
index fde4aa3cd1..78628fb248 100755
--- a/t/t6435-merge-sparse.sh
+++ b/t/t6435-merge-sparse.sh
@@ -3,6 +3,7 @@
test_description='merge with sparse files'
TEST_CREATE_REPO_NO_TEMPLATE=1
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# test_file $filename $content
diff --git a/t/t6500-gc.sh b/t/t6500-gc.sh
index cd6c53360d..d9acb63951 100755
--- a/t/t6500-gc.sh
+++ b/t/t6500-gc.sh
@@ -202,6 +202,102 @@ test_expect_success 'one of gc.reflogExpire{Unreachable,}=never does not skip "e
grep -E "^trace: (built-in|exec|run_command): git reflog expire --" trace.out
'
+prepare_cruft_history () {
+ test_commit base &&
+
+ test_commit --no-tag foo &&
+ test_commit --no-tag bar &&
+ git reset HEAD^^
+}
+
+assert_cruft_packs () {
+ find .git/objects/pack -name "*.mtimes" >mtimes &&
+ sed -e 's/\.mtimes$/\.pack/g' mtimes >packs &&
+
+ test_file_not_empty packs &&
+ while read pack
+ do
+ test_path_is_file "$pack" || return 1
+ done <packs
+}
+
+assert_no_cruft_packs () {
+ find .git/objects/pack -name "*.mtimes" >mtimes &&
+ test_must_be_empty mtimes
+}
+
+test_expect_success 'gc --cruft generates a cruft pack' '
+ test_when_finished "rm -fr crufts" &&
+ git init crufts &&
+ (
+ cd crufts &&
+
+ prepare_cruft_history &&
+ git gc --cruft &&
+ assert_cruft_packs
+ )
+'
+
+test_expect_success 'gc.cruftPacks=true generates a cruft pack' '
+ test_when_finished "rm -fr crufts" &&
+ git init crufts &&
+ (
+ cd crufts &&
+
+ prepare_cruft_history &&
+ git -c gc.cruftPacks=true gc &&
+ assert_cruft_packs
+ )
+'
+
+test_expect_success 'feature.experimental=true generates a cruft pack' '
+ git init crufts &&
+ test_when_finished "rm -fr crufts" &&
+ (
+ cd crufts &&
+
+ prepare_cruft_history &&
+ git -c feature.experimental=true gc &&
+ assert_cruft_packs
+ )
+'
+
+test_expect_success 'feature.experimental=false allows explicit cruft packs' '
+ git init crufts &&
+ test_when_finished "rm -fr crufts" &&
+ (
+ cd crufts &&
+
+ prepare_cruft_history &&
+ git -c gc.cruftPacks=true -c feature.experimental=false gc &&
+ assert_cruft_packs
+ )
+'
+
+test_expect_success 'feature.experimental=true can be overridden' '
+ git init crufts &&
+ test_when_finished "rm -fr crufts" &&
+ (
+ cd crufts &&
+
+ prepare_cruft_history &&
+ git -c feature.expiremental=true -c gc.cruftPacks=false gc &&
+ assert_no_cruft_packs
+ )
+'
+
+test_expect_success 'feature.experimental=false avoids cruft packs by default' '
+ git init crufts &&
+ test_when_finished "rm -fr crufts" &&
+ (
+ cd crufts &&
+
+ prepare_cruft_history &&
+ git -c feature.experimental=false gc &&
+ assert_no_cruft_packs
+ )
+'
+
run_and_wait_for_auto_gc () {
# We read stdout from gc for the side effect of waiting until the
# background gc process exits, closing its fd 9. Furthermore, the
diff --git a/t/t7001-mv.sh b/t/t7001-mv.sh
index 8c37bceb33..d72cef8826 100755
--- a/t/t7001-mv.sh
+++ b/t/t7001-mv.sh
@@ -60,8 +60,8 @@ test_expect_success 'checking the commit' '
test_expect_success 'mv --dry-run does not move file' '
git mv -n path0/COPYING MOVED &&
- test -f path0/COPYING &&
- test ! -f MOVED
+ test_path_is_file path0/COPYING &&
+ test_path_is_missing MOVED
'
test_expect_success 'checking -k on non-existing file' '
@@ -71,25 +71,25 @@ test_expect_success 'checking -k on non-existing file' '
test_expect_success 'checking -k on untracked file' '
>untracked1 &&
git mv -k untracked1 path0 &&
- test -f untracked1 &&
- test ! -f path0/untracked1
+ test_path_is_file untracked1 &&
+ test_path_is_missing path0/untracked1
'
test_expect_success 'checking -k on multiple untracked files' '
>untracked2 &&
git mv -k untracked1 untracked2 path0 &&
- test -f untracked1 &&
- test -f untracked2 &&
- test ! -f path0/untracked1 &&
- test ! -f path0/untracked2
+ test_path_is_file untracked1 &&
+ test_path_is_file untracked2 &&
+ test_path_is_missing path0/untracked1 &&
+ test_path_is_missing path0/untracked2
'
test_expect_success 'checking -f on untracked file with existing target' '
>path0/untracked1 &&
test_must_fail git mv -f untracked1 path0 &&
- test ! -f .git/index.lock &&
- test -f untracked1 &&
- test -f path0/untracked1
+ test_path_is_missing .git/index.lock &&
+ test_path_is_file untracked1 &&
+ test_path_is_file path0/untracked1
'
# clean up the mess in case bad things happen
@@ -215,8 +215,8 @@ test_expect_success 'absolute pathname' '
git add sub/file &&
git mv sub "$(pwd)/in" &&
- ! test -d sub &&
- test -d in &&
+ test_path_is_missing sub &&
+ test_path_is_dir in &&
git ls-files --error-unmatch in/file
)
'
@@ -234,8 +234,8 @@ test_expect_success 'absolute pathname outside should fail' '
git add sub/file &&
test_must_fail git mv sub "$out/out" &&
- test -d sub &&
- ! test -d ../in &&
+ test_path_is_dir sub &&
+ test_path_is_missing ../in &&
git ls-files --error-unmatch sub/file
)
'
@@ -295,8 +295,8 @@ test_expect_success 'git mv should overwrite symlink to a file' '
git add moved &&
test_must_fail git mv moved symlink &&
git mv -f moved symlink &&
- ! test -e moved &&
- test -f symlink &&
+ test_path_is_missing moved &&
+ test_path_is_file symlink &&
test "$(cat symlink)" = 1 &&
git update-index --refresh &&
git diff-files --quiet
@@ -312,13 +312,13 @@ test_expect_success 'git mv should overwrite file with a symlink' '
git add moved &&
test_must_fail git mv symlink moved &&
git mv -f symlink moved &&
- ! test -e symlink &&
+ test_path_is_missing symlink &&
git update-index --refresh &&
git diff-files --quiet
'
test_expect_success SYMLINKS 'check moved symlink' '
- test -h moved
+ test_path_is_symlink moved
'
rm -f moved symlink
@@ -352,7 +352,7 @@ test_expect_success 'git mv moves a submodule with a .git directory and no .gitm
) &&
mkdir mod &&
git mv sub mod/sub &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
git update-index --refresh &&
@@ -372,7 +372,7 @@ test_expect_success 'git mv moves a submodule with a .git directory and .gitmodu
) &&
mkdir mod &&
git mv sub mod/sub &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
echo mod/sub >expected &&
@@ -389,7 +389,7 @@ test_expect_success 'git mv moves a submodule with gitfile' '
entry="$(git ls-files --stage sub | cut -f 1)" &&
mkdir mod &&
git -C mod mv ../sub/ . &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
echo mod/sub >expected &&
@@ -408,7 +408,7 @@ test_expect_success 'mv does not complain when no .gitmodules file is found' '
mkdir mod &&
git mv sub mod/sub 2>actual.err &&
test_must_be_empty actual.err &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
git update-index --refresh &&
@@ -423,13 +423,13 @@ test_expect_success 'mv will error out on a modified .gitmodules file unless sta
entry="$(git ls-files --stage sub | cut -f 1)" &&
mkdir mod &&
test_must_fail git mv sub mod/sub 2>actual.err &&
- test -s actual.err &&
- test -e sub &&
+ test_file_not_empty actual.err &&
+ test_path_exists sub &&
git diff-files --quiet -- sub &&
git add .gitmodules &&
git mv sub mod/sub 2>actual.err &&
test_must_be_empty actual.err &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
git update-index --refresh &&
@@ -447,7 +447,7 @@ test_expect_success 'mv issues a warning when section is not found in .gitmodule
mkdir mod &&
git mv sub mod/sub 2>actual.err &&
test_cmp expect.err actual.err &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
git update-index --refresh &&
@@ -460,7 +460,7 @@ test_expect_success 'mv --dry-run does not touch the submodule or .gitmodules' '
git submodule update &&
mkdir mod &&
git mv -n sub mod/sub 2>actual.err &&
- test -f sub/.git &&
+ test_path_is_file sub/.git &&
git diff-index --exit-code HEAD &&
git update-index --refresh &&
git diff-files --quiet -- sub .gitmodules
@@ -474,10 +474,10 @@ test_expect_success 'checking out a commit before submodule moved needs manual u
git status -s sub2 >actual &&
echo "?? sub2/" >expected &&
test_cmp expected actual &&
- ! test -f sub/.git &&
- test -f sub2/.git &&
+ test_path_is_missing sub/.git &&
+ test_path_is_file sub2/.git &&
git submodule update &&
- test -f sub/.git &&
+ test_path_is_file sub/.git &&
rm -rf sub2 &&
git diff-index --exit-code HEAD &&
git update-index --refresh &&
diff --git a/t/t7003-filter-branch.sh b/t/t7003-filter-branch.sh
index e18a218952..f6aebe92ff 100755
--- a/t/t7003-filter-branch.sh
+++ b/t/t7003-filter-branch.sh
@@ -49,7 +49,7 @@ test_expect_success 'result is really identical' '
test_expect_success 'rewrite bare repository identically' '
(git config core.bare true && cd .git &&
git filter-branch branch > filter-output 2>&1 &&
- ! fgrep fatal filter-output)
+ ! grep fatal filter-output)
'
git config core.bare false
test_expect_success 'result is really identical' '
@@ -506,7 +506,7 @@ test_expect_success 'rewrite repository including refs that point at non-commit
git tag -a -m "tag to a tree" treetag $new_tree &&
git reset --hard HEAD &&
git filter-branch -f -- --all >filter-output 2>&1 &&
- ! fgrep fatal filter-output
+ ! grep fatal filter-output
'
test_expect_success 'filter-branch handles ref deletion' '
diff --git a/t/t7065-wtstatus-slow.sh b/t/t7065-wtstatus-slow.sh
new file mode 100755
index 0000000000..8d08a962f8
--- /dev/null
+++ b/t/t7065-wtstatus-slow.sh
@@ -0,0 +1,70 @@
+#!/bin/sh
+
+test_description='test status when slow untracked files'
+
+. ./test-lib.sh
+
+GIT_TEST_UF_DELAY_WARNING=1
+export GIT_TEST_UF_DELAY_WARNING
+
+test_expect_success setup '
+ git checkout -b test &&
+ cat >.gitignore <<-\EOF &&
+ /actual
+ /expected
+ /out
+ EOF
+ git add .gitignore &&
+ git commit -m "Add .gitignore"
+'
+
+test_expect_success 'when core.untrackedCache and fsmonitor are unset' '
+ test_might_fail git config --unset-all core.untrackedCache &&
+ test_might_fail git config --unset-all core.fsmonitor &&
+ git status >out &&
+ sed "s/[0-9]\.[0-9][0-9]/X/g" out >actual &&
+ cat >expected <<-\EOF &&
+ On branch test
+
+ It took X seconds to enumerate untracked files.
+ See '"'"'git help status'"'"' for information on how to improve this.
+
+ nothing to commit, working tree clean
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'when core.untrackedCache true, but not fsmonitor' '
+ test_config core.untrackedCache true &&
+ test_might_fail git config --unset-all core.fsmonitor &&
+ git status >out &&
+ sed "s/[0-9]\.[0-9][0-9]/X/g" out >actual &&
+ cat >expected <<-\EOF &&
+ On branch test
+
+ It took X seconds to enumerate untracked files.
+ See '"'"'git help status'"'"' for information on how to improve this.
+
+ nothing to commit, working tree clean
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'when core.untrackedCache true, and fsmonitor' '
+ test_config core.untrackedCache true &&
+ test_config core.fsmonitor true &&
+ git status >out &&
+ sed "s/[0-9]\.[0-9][0-9]/X/g" out >actual &&
+ cat >expected <<-\EOF &&
+ On branch test
+
+ It took X seconds to enumerate untracked files,
+ but the results were cached, and subsequent runs may be faster.
+ See '"'"'git help status'"'"' for information on how to improve this.
+
+ nothing to commit, working tree clean
+ EOF
+ test_cmp expected actual
+'
+
+test_done
diff --git a/t/t7103-reset-bare.sh b/t/t7103-reset-bare.sh
index a60153f9f3..18bbd9975e 100755
--- a/t/t7103-reset-bare.sh
+++ b/t/t7103-reset-bare.sh
@@ -63,7 +63,7 @@ test_expect_success '"mixed" reset is not allowed in bare' '
test_must_fail git reset --mixed HEAD^
'
-test_expect_success !SANITIZE_LEAK '"soft" reset is allowed in bare' '
+test_expect_success '"soft" reset is allowed in bare' '
git reset --soft HEAD^ &&
git show --pretty=format:%s >out &&
echo one >expect &&
diff --git a/t/t7400-submodule-basic.sh b/t/t7400-submodule-basic.sh
index a989aafaf5..eae6a46ef3 100755
--- a/t/t7400-submodule-basic.sh
+++ b/t/t7400-submodule-basic.sh
@@ -579,6 +579,16 @@ test_expect_success 'status should be "modified" after submodule commit' '
grep "^+$rev2" list
'
+test_expect_success '"submodule --cached" command forms should be identical' '
+ git submodule status --cached >expect &&
+
+ git submodule --cached >actual &&
+ test_cmp expect actual &&
+
+ git submodule --cached status >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'the --cached sha1 should be rev1' '
git submodule --cached status >list &&
grep "^+$rev1" list
diff --git a/t/t7406-submodule-update.sh b/t/t7406-submodule-update.sh
index f094e3d7f3..b749d35f78 100755
--- a/t/t7406-submodule-update.sh
+++ b/t/t7406-submodule-update.sh
@@ -1179,4 +1179,160 @@ test_expect_success 'submodule update --recursive skip submodules with strategy=
test_cmp expect.err actual.err
'
+test_expect_success 'setup superproject with submodule.propagateBranches' '
+ git init sub1 &&
+ test_commit -C sub1 "sub1" &&
+ git init branch-super &&
+ git -C branch-super submodule add ../sub1 sub1 &&
+ git -C branch-super commit -m "super" &&
+
+ # Clone into a clean repo that we can cp around
+ git clone --recurse-submodules \
+ -c submodule.propagateBranches=true \
+ branch-super branch-super-clean &&
+ git -C branch-super-clean config submodule.propagateBranches true &&
+
+ # sub2 will not be in the clone. We will fetch the containing
+ # superproject commit and clone sub2 with "git submodule update".
+ git init sub2 &&
+ test_commit -C sub2 "sub2" &&
+ git -C branch-super submodule add ../sub2 sub2 &&
+ git -C branch-super commit -m "add sub2"
+'
+
+test_clean_submodule ()
+{
+ local negate super_dir sub_dir expect_oid actual_oid &&
+ if test "$1" = "!"
+ then
+ negate=t
+ shift
+ fi
+ super_dir="$1" &&
+ sub_dir="$2" &&
+ expect_oid="$(git -C "$super_dir" rev-parse ":$sub_dir")" &&
+ actual_oid="$(git -C "$super_dir/$sub_dir" rev-parse HEAD)" &&
+ if test -n "$negate"
+ then
+ ! test "$expect_oid" = "$actual_oid"
+ else
+ test "$expect_oid" = "$actual_oid"
+ fi
+}
+
+# Test the behavior of a newly cloned submodule
+test_expect_success 'branches - newly-cloned submodule, detached HEAD' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned fetch origin main &&
+ git -C branch-super-cloned checkout FETCH_HEAD &&
+ git -C branch-super-cloned/sub1 checkout --detach &&
+ git -C branch-super-cloned submodule update &&
+
+ # sub1 and sub2 should be in detached HEAD
+ git -C branch-super-cloned/sub1 rev-parse --verify HEAD &&
+ test_must_fail git -C branch-super-cloned/sub1 symbolic-ref HEAD &&
+ test_clean_submodule branch-super-cloned sub1 &&
+ git -C branch-super-cloned/sub2 rev-parse --verify HEAD &&
+ test_must_fail git -C branch-super-cloned/sub2 symbolic-ref HEAD &&
+ test_clean_submodule branch-super-cloned sub2
+'
+
+test_expect_success 'branches - newly-cloned submodule, branch checked out' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned fetch origin main &&
+ git -C branch-super-cloned checkout FETCH_HEAD &&
+ git -C branch-super-cloned branch new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ git -C branch-super-cloned/sub1 branch new-branch &&
+ git -C branch-super-cloned submodule update &&
+
+ # Ignore sub1, we will test it later.
+ # sub2 should check out the branch
+ HEAD_BRANCH2=$(git -C branch-super-cloned/sub2 symbolic-ref HEAD) &&
+ test $HEAD_BRANCH2 = "refs/heads/new-branch" &&
+ test_clean_submodule branch-super-cloned sub2
+'
+
+# Test the behavior of an already-cloned submodule.
+# NEEDSWORK When updating with branches, we always use the branch instead of the
+# gitlink's OID. This results in some imperfect behavior:
+#
+# - If the gitlink's OID disagrees with the branch OID, updating with branches
+# may result in a dirty worktree
+# - If the branch does not exist, the update fails.
+#
+# We will reevaluate when "git checkout --recurse-submodules" supports branches
+# For now, just test for this imperfect behavior.
+test_expect_success 'branches - correct branch checked out, OIDs agree' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned branch --recurse-submodules new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ git -C branch-super-cloned/sub1 checkout new-branch &&
+ git -C branch-super-cloned submodule update &&
+
+ HEAD_BRANCH1=$(git -C branch-super-cloned/sub1 symbolic-ref HEAD) &&
+ test $HEAD_BRANCH1 = "refs/heads/new-branch" &&
+ test_clean_submodule branch-super-cloned sub1
+'
+
+test_expect_success 'branches - correct branch checked out, OIDs disagree' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned branch --recurse-submodules new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ git -C branch-super-cloned/sub1 checkout new-branch &&
+ test_commit -C branch-super-cloned/sub1 new-commit &&
+ git -C branch-super-cloned submodule update &&
+
+ HEAD_BRANCH1=$(git -C branch-super-cloned/sub1 symbolic-ref HEAD) &&
+ test $HEAD_BRANCH1 = "refs/heads/new-branch" &&
+ test_clean_submodule ! branch-super-cloned sub1
+'
+
+test_expect_success 'branches - other branch checked out, correct branch exists, OIDs agree' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned branch --recurse-submodules new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ git -C branch-super-cloned/sub1 checkout main &&
+ git -C branch-super-cloned submodule update &&
+
+ HEAD_BRANCH1=$(git -C branch-super-cloned/sub1 symbolic-ref HEAD) &&
+ test $HEAD_BRANCH1 = "refs/heads/new-branch" &&
+ test_clean_submodule branch-super-cloned sub1
+'
+
+test_expect_success 'branches - other branch checked out, correct branch exists, OIDs disagree' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned branch --recurse-submodules new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ git -C branch-super-cloned/sub1 checkout new-branch &&
+ test_commit -C branch-super-cloned/sub1 new-commit &&
+ git -C branch-super-cloned/sub1 checkout main &&
+ git -C branch-super-cloned submodule update &&
+
+ HEAD_BRANCH1=$(git -C branch-super-cloned/sub1 symbolic-ref HEAD) &&
+ test $HEAD_BRANCH1 = "refs/heads/new-branch" &&
+ test_clean_submodule ! branch-super-cloned sub1
+'
+
+test_expect_success 'branches - other branch checked out, correct branch does not exist' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned branch new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ test_must_fail git -C branch-super-cloned submodule update
+'
+
test_done
diff --git a/t/t7407-submodule-foreach.sh b/t/t7407-submodule-foreach.sh
index 59bd150166..8d7b234beb 100755
--- a/t/t7407-submodule-foreach.sh
+++ b/t/t7407-submodule-foreach.sh
@@ -154,6 +154,11 @@ test_expect_success 'use "submodule foreach" to checkout 2nd level submodule' '
)
'
+test_expect_success 'usage: foreach -- --not-an-option' '
+ test_expect_code 1 git submodule foreach -- --not-an-option &&
+ test_expect_code 1 git -C clone2 submodule foreach -- --not-an-option
+'
+
test_expect_success 'use "foreach --recursive" to checkout all submodules' '
(
cd clone2 &&
diff --git a/t/t7411-submodule-config.sh b/t/t7411-submodule-config.sh
index c583c4e373..c0167944ab 100755
--- a/t/t7411-submodule-config.sh
+++ b/t/t7411-submodule-config.sh
@@ -137,44 +137,44 @@ test_expect_success 'error in history in fetchrecursesubmodule lets continue' '
)
'
-test_expect_success 'reading submodules config from the working tree with "submodule--helper config"' '
+test_expect_success 'reading submodules config from the working tree' '
(cd super &&
echo "../submodule" >expect &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
)
'
-test_expect_success 'unsetting submodules config from the working tree with "submodule--helper config --unset"' '
+test_expect_success 'unsetting submodules config from the working tree' '
(cd super &&
- git submodule--helper config --unset submodule.submodule.url &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-unset submodule.submodule.url &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_must_be_empty actual
)
'
-test_expect_success 'writing submodules config with "submodule--helper config"' '
+test_expect_success 'writing submodules config' '
(cd super &&
echo "new_url" >expect &&
- git submodule--helper config submodule.submodule.url "new_url" &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-set submodule.submodule.url "new_url" &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
)
'
-test_expect_success 'overwriting unstaged submodules config with "submodule--helper config"' '
+test_expect_success 'overwriting unstaged submodules config' '
test_when_finished "git -C super checkout .gitmodules" &&
(cd super &&
echo "newer_url" >expect &&
- git submodule--helper config submodule.submodule.url "newer_url" &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-set submodule.submodule.url "newer_url" &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
)
'
test_expect_success 'writeable .gitmodules when it is in the working tree' '
- git -C super submodule--helper config --check-writeable
+ test-tool -C super submodule config-writeable
'
test_expect_success 'writeable .gitmodules when it is nowhere in the repository' '
@@ -183,7 +183,7 @@ test_expect_success 'writeable .gitmodules when it is nowhere in the repository'
(cd super &&
git rm .gitmodules &&
git commit -m "remove .gitmodules from the current branch" &&
- git submodule--helper config --check-writeable
+ test-tool submodule config-writeable
)
'
@@ -191,7 +191,7 @@ test_expect_success 'non-writeable .gitmodules when it is in the index but not i
test_when_finished "git -C super checkout .gitmodules" &&
(cd super &&
rm -f .gitmodules &&
- test_must_fail git submodule--helper config --check-writeable
+ test_must_fail test-tool submodule config-writeable
)
'
@@ -200,7 +200,7 @@ test_expect_success 'non-writeable .gitmodules when it is in the current branch
test_when_finished "git -C super reset --hard $ORIG" &&
(cd super &&
git rm .gitmodules &&
- test_must_fail git submodule--helper config --check-writeable
+ test_must_fail test-tool submodule config-writeable
)
'
@@ -208,11 +208,11 @@ test_expect_success 'reading submodules config from the index when .gitmodules i
ORIG=$(git -C super rev-parse HEAD) &&
test_when_finished "git -C super reset --hard $ORIG" &&
(cd super &&
- git submodule--helper config submodule.submodule.url "staged_url" &&
+ test-tool submodule config-set submodule.submodule.url "staged_url" &&
git add .gitmodules &&
rm -f .gitmodules &&
echo "staged_url" >expect &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
)
'
@@ -223,7 +223,7 @@ test_expect_success 'reading submodules config from the current branch when .git
(cd super &&
git rm .gitmodules &&
echo "../submodule" >expect &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
)
'
diff --git a/t/t7412-submodule-absorbgitdirs.sh b/t/t7412-submodule-absorbgitdirs.sh
index 2859695c6d..a5cd6db7ac 100755
--- a/t/t7412-submodule-absorbgitdirs.sh
+++ b/t/t7412-submodule-absorbgitdirs.sh
@@ -18,13 +18,19 @@ test_expect_success 'setup a real submodule' '
'
test_expect_success 'absorb the git dir' '
+ >expect &&
+ >actual &&
>expect.1 &&
>expect.2 &&
>actual.1 &&
>actual.2 &&
git status >expect.1 &&
git -C sub1 rev-parse HEAD >expect.2 &&
- git submodule absorbgitdirs &&
+ cat >expect <<-\EOF &&
+ Migrating git directory of '\''sub1'\'' from '\''sub1/.git'\'' to '\''.git/modules/sub1'\''
+ EOF
+ git submodule absorbgitdirs 2>actual &&
+ test_cmp expect actual &&
git fsck &&
test -f sub1/.git &&
test -d .git/modules/sub1 &&
@@ -37,7 +43,8 @@ test_expect_success 'absorb the git dir' '
test_expect_success 'absorbing does not fail for deinitialized submodules' '
test_when_finished "git submodule update --init" &&
git submodule deinit --all &&
- git submodule absorbgitdirs &&
+ git submodule absorbgitdirs 2>err &&
+ test_must_be_empty err &&
test -d .git/modules/sub1 &&
test -d sub1 &&
! test -e sub1/.git
@@ -56,7 +63,11 @@ test_expect_success 'setup nested submodule' '
test_expect_success 'absorb the git dir in a nested submodule' '
git status >expect.1 &&
git -C sub1/nested rev-parse HEAD >expect.2 &&
- git submodule absorbgitdirs &&
+ cat >expect <<-\EOF &&
+ Migrating git directory of '\''sub1/nested'\'' from '\''sub1/nested/.git'\'' to '\''.git/modules/sub1/modules/nested'\''
+ EOF
+ git submodule absorbgitdirs 2>actual &&
+ test_cmp expect actual &&
test -f sub1/nested/.git &&
test -d .git/modules/sub1/modules/nested &&
git status >actual.1 &&
@@ -87,7 +98,11 @@ test_expect_success 're-setup nested submodule' '
test_expect_success 'absorb the git dir in a nested submodule' '
git status >expect.1 &&
git -C sub1/nested rev-parse HEAD >expect.2 &&
- git submodule absorbgitdirs &&
+ cat >expect <<-\EOF &&
+ Migrating git directory of '\''sub1'\'' from '\''sub1/.git'\'' to '\''.git/modules/sub1'\''
+ EOF
+ git submodule absorbgitdirs 2>actual &&
+ test_cmp expect actual &&
test -f sub1/.git &&
test -f sub1/nested/.git &&
test -d .git/modules/sub1/modules/nested &&
@@ -107,7 +122,11 @@ test_expect_success 'setup a gitlink with missing .gitmodules entry' '
test_expect_success 'absorbing the git dir fails for incomplete submodules' '
git status >expect.1 &&
git -C sub2 rev-parse HEAD >expect.2 &&
- test_must_fail git submodule absorbgitdirs &&
+ cat >expect <<-\EOF &&
+ fatal: could not lookup name for submodule '\''sub2'\''
+ EOF
+ test_must_fail git submodule absorbgitdirs 2>actual &&
+ test_cmp expect actual &&
git -C sub2 fsck &&
test -d sub2/.git &&
git status >actual &&
@@ -127,8 +146,11 @@ test_expect_success 'setup a submodule with multiple worktrees' '
'
test_expect_success 'absorbing fails for a submodule with multiple worktrees' '
- test_must_fail git submodule absorbgitdirs sub3 2>error &&
- test_i18ngrep "not supported" error
+ cat >expect <<-\EOF &&
+ fatal: could not lookup name for submodule '\''sub2'\''
+ EOF
+ test_must_fail git submodule absorbgitdirs 2>actual &&
+ test_cmp expect actual
'
test_done
diff --git a/t/t7418-submodule-sparse-gitmodules.sh b/t/t7418-submodule-sparse-gitmodules.sh
index d5874200fd..dde11ecce8 100755
--- a/t/t7418-submodule-sparse-gitmodules.sh
+++ b/t/t7418-submodule-sparse-gitmodules.sh
@@ -50,12 +50,12 @@ test_expect_success 'sparse checkout setup which hides .gitmodules' '
test_expect_success 'reading gitmodules config file when it is not checked out' '
echo "../submodule" >expect &&
- git -C super submodule--helper config submodule.submodule.url >actual &&
+ test-tool -C super submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
'
test_expect_success 'not writing gitmodules config file when it is not checked out' '
- test_must_fail git -C super submodule--helper config submodule.submodule.url newurl &&
+ test_must_fail test-tool -C super submodule config-set submodule.submodule.url newurl &&
test_path_is_missing super/.gitmodules
'
diff --git a/t/t7422-submodule-output.sh b/t/t7422-submodule-output.sh
new file mode 100755
index 0000000000..ab946ec940
--- /dev/null
+++ b/t/t7422-submodule-output.sh
@@ -0,0 +1,170 @@
+#!/bin/sh
+
+test_description='submodule --cached, --quiet etc. output'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-t3100.sh
+
+setup_sub () {
+ local d="$1" &&
+ shift &&
+ git $@ clone . "$d" &&
+ git $@ submodule add ./"$d"
+}
+
+normalize_status () {
+ sed -e 's/-g[0-9a-f]*/-gHASH/'
+}
+
+test_expect_success 'setup' '
+ test_commit A &&
+ test_commit B &&
+ setup_sub S &&
+ setup_sub S.D &&
+ setup_sub S.C &&
+ setup_sub S.C.D &&
+ setup_sub X &&
+ git add S* &&
+ test_commit C &&
+
+ # recursive in X/
+ git -C X pull &&
+ GIT_ALLOW_PROTOCOL=file git -C X submodule update --init &&
+
+ # dirty
+ for d in S.D X/S.D
+ do
+ echo dirty >"$d"/A.t || return 1
+ done &&
+
+ # commit (for --cached)
+ for d in S.C* X/S.C*
+ do
+ git -C "$d" reset --hard A || return 1
+ done &&
+
+ # dirty
+ for d in S*.D X/S*.D
+ do
+ echo dirty >"$d/C2.t" || return 1
+ done &&
+
+ for ref in A B C
+ do
+ # Not different with SHA-1 and SHA-256, just (ab)using
+ # test_oid_cache as a variable bag to avoid using
+ # $(git rev-parse ...).
+ oid=$(git rev-parse $ref) &&
+ test_oid_cache <<-EOF || return 1
+ $ref sha1:$oid
+ $ref sha256:$oid
+ EOF
+ done
+'
+
+for opts in "" "status"
+do
+ test_expect_success "git submodule $opts" '
+ sed -e "s/^>//" >expect <<-EOF &&
+ > $(test_oid B) S (B)
+ >+$(test_oid A) S.C (A)
+ >+$(test_oid A) S.C.D (A)
+ > $(test_oid B) S.D (B)
+ >+$(test_oid C) X (C)
+ EOF
+ git submodule $opts >actual.raw &&
+ normalize_status <actual.raw >actual &&
+ test_cmp expect actual
+ '
+done
+
+for opts in \
+ "status --recursive"
+do
+ test_expect_success "git submodule $opts" '
+ sed -e "s/^>//" >expect <<-EOF &&
+ > $(test_oid B) S (B)
+ >+$(test_oid A) S.C (A)
+ >+$(test_oid A) S.C.D (A)
+ > $(test_oid B) S.D (B)
+ >+$(test_oid C) X (C)
+ > $(test_oid B) X/S (B)
+ >+$(test_oid A) X/S.C (A)
+ >+$(test_oid A) X/S.C.D (A)
+ > $(test_oid B) X/S.D (B)
+ > $(test_oid B) X/X (B)
+ EOF
+ git submodule $opts >actual.raw &&
+ normalize_status <actual.raw >actual &&
+ test_cmp expect actual
+ '
+done
+
+for opts in \
+ "--quiet" \
+ "--quiet status" \
+ "status --quiet"
+do
+ test_expect_success "git submodule $opts" '
+ git submodule $opts >out &&
+ test_must_be_empty out
+ '
+done
+
+for opts in \
+ "--cached" \
+ "--cached status" \
+ "status --cached"
+do
+ test_expect_success "git submodule $opts" '
+ sed -e "s/^>//" >expect <<-EOF &&
+ > $(test_oid B) S (B)
+ >+$(test_oid B) S.C (B)
+ >+$(test_oid B) S.C.D (B)
+ > $(test_oid B) S.D (B)
+ >+$(test_oid B) X (B)
+ EOF
+ git submodule $opts >actual.raw &&
+ normalize_status <actual.raw >actual &&
+ test_cmp expect actual
+ '
+done
+
+for opts in \
+ "--cached --quiet" \
+ "--cached --quiet status" \
+ "--cached status --quiet" \
+ "--quiet status --cached" \
+ "status --cached --quiet"
+do
+ test_expect_success "git submodule $opts" '
+ git submodule $opts >out &&
+ test_must_be_empty out
+ '
+done
+
+for opts in \
+ "status --cached --recursive" \
+ "--cached status --recursive"
+do
+ test_expect_success "git submodule $opts" '
+ sed -e "s/^>//" >expect <<-EOF &&
+ > $(test_oid B) S (B)
+ >+$(test_oid B) S.C (B)
+ >+$(test_oid B) S.C.D (B)
+ > $(test_oid B) S.D (B)
+ >+$(test_oid B) X (B)
+ > $(test_oid B) X/S (B)
+ >+$(test_oid B) X/S.C (B)
+ >+$(test_oid B) X/S.C.D (B)
+ > $(test_oid B) X/S.D (B)
+ > $(test_oid B) X/X (B)
+ EOF
+ git submodule $opts >actual.raw &&
+ normalize_status <actual.raw >actual &&
+ test_cmp expect actual
+ '
+done
+
+test_done
diff --git a/t/t7504-commit-msg-hook.sh b/t/t7504-commit-msg-hook.sh
index a39de8c112..07ca46fb0d 100755
--- a/t/t7504-commit-msg-hook.sh
+++ b/t/t7504-commit-msg-hook.sh
@@ -5,6 +5,7 @@ test_description='commit-msg hook'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'with no hook' '
diff --git a/t/t7506-status-submodule.sh b/t/t7506-status-submodule.sh
index d050091345..52a82b703f 100755
--- a/t/t7506-status-submodule.sh
+++ b/t/t7506-status-submodule.sh
@@ -412,4 +412,23 @@ test_expect_success 'status with added file in nested submodule (short)' '
EOF
'
+test_expect_success 'status in superproject with submodules respects parallel settings' '
+ test_when_finished "rm -f trace.out" &&
+ (
+ GIT_TRACE=$(pwd)/trace.out git status &&
+ grep "1 tasks" trace.out &&
+ >trace.out &&
+
+ git config submodule.diffJobs 8 &&
+ GIT_TRACE=$(pwd)/trace.out git status &&
+ grep "8 tasks" trace.out &&
+ >trace.out &&
+
+ GIT_TRACE=$(pwd)/trace.out git -c submodule.diffJobs=0 status &&
+ grep "preparing to run up to [0-9]* tasks" trace.out &&
+ ! grep "up to 0 tasks" trace.out &&
+ >trace.out
+ )
+'
+
test_done
diff --git a/t/t7517-per-repo-email.sh b/t/t7517-per-repo-email.sh
index 163ae80468..efc6496e2b 100755
--- a/t/t7517-per-repo-email.sh
+++ b/t/t7517-per-repo-email.sh
@@ -9,6 +9,7 @@ test_description='per-repo forced setting of email address'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup a likely user.useConfigOnly use case' '
diff --git a/t/t7520-ignored-hook-warning.sh b/t/t7520-ignored-hook-warning.sh
index dc57526e6f..184b258989 100755
--- a/t/t7520-ignored-hook-warning.sh
+++ b/t/t7520-ignored-hook-warning.sh
@@ -2,6 +2,7 @@
test_description='ignored hook warning'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
diff --git a/t/t7527-builtin-fsmonitor.sh b/t/t7527-builtin-fsmonitor.sh
index d419085379..4abc74db2b 100755
--- a/t/t7527-builtin-fsmonitor.sh
+++ b/t/t7527-builtin-fsmonitor.sh
@@ -943,9 +943,9 @@ test_expect_success CASE_INSENSITIVE_FS 'case insensitive+preserving' '
# directories and files that we touched. We may or may not get a
# trailing slash on modified directories.
#
- egrep "^event: abc/?$" ./insensitive.trace &&
- egrep "^event: abc/def/?$" ./insensitive.trace &&
- egrep "^event: abc/def/xyz$" ./insensitive.trace
+ grep -E "^event: abc/?$" ./insensitive.trace &&
+ grep -E "^event: abc/def/?$" ./insensitive.trace &&
+ grep -E "^event: abc/def/xyz$" ./insensitive.trace
'
# The variable "unicode_debug" is defined in the following library
@@ -987,20 +987,20 @@ test_expect_success !UNICODE_COMPOSITION_SENSITIVE 'Unicode nfc/nfd' '
then
# We should have seen NFC event from OS.
# We should not have synthesized an NFD event.
- egrep "^event: nfc/c_${utf8_nfc}/?$" ./unicode.trace &&
- egrep -v "^event: nfc/c_${utf8_nfd}/?$" ./unicode.trace
+ grep -E "^event: nfc/c_${utf8_nfc}/?$" ./unicode.trace &&
+ grep -E -v "^event: nfc/c_${utf8_nfd}/?$" ./unicode.trace
else
# We should have seen NFD event from OS.
# We should have synthesized an NFC event.
- egrep "^event: nfc/c_${utf8_nfd}/?$" ./unicode.trace &&
- egrep "^event: nfc/c_${utf8_nfc}/?$" ./unicode.trace
+ grep -E "^event: nfc/c_${utf8_nfd}/?$" ./unicode.trace &&
+ grep -E "^event: nfc/c_${utf8_nfc}/?$" ./unicode.trace
fi &&
# We assume UNICODE_NFD_PRESERVED.
# We should have seen explicit NFD from OS.
# We should have synthesized an NFC event.
- egrep "^event: nfd/d_${utf8_nfd}/?$" ./unicode.trace &&
- egrep "^event: nfd/d_${utf8_nfc}/?$" ./unicode.trace
+ grep -E "^event: nfd/d_${utf8_nfd}/?$" ./unicode.trace &&
+ grep -E "^event: nfd/d_${utf8_nfc}/?$" ./unicode.trace
'
test_done
diff --git a/t/t7605-merge-resolve.sh b/t/t7605-merge-resolve.sh
index 5d56c38546..62d935d31c 100755
--- a/t/t7605-merge-resolve.sh
+++ b/t/t7605-merge-resolve.sh
@@ -4,6 +4,7 @@ test_description='git merge
Testing the resolve strategy.'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t7609-mergetool--lib.sh b/t/t7609-mergetool--lib.sh
index 8b1c3bd39f..2090d12a48 100755
--- a/t/t7609-mergetool--lib.sh
+++ b/t/t7609-mergetool--lib.sh
@@ -8,7 +8,7 @@ TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'mergetool --tool=vimdiff creates the expected layout' '
- . "$GIT_BUILD_DIR"/mergetools/vimdiff &&
+ . "$GIT_SOURCE_DIR"/mergetools/vimdiff &&
run_unit_tests
'
diff --git a/t/t7610-mergetool.sh b/t/t7610-mergetool.sh
index 8cc64729ad..7b957022f1 100755
--- a/t/t7610-mergetool.sh
+++ b/t/t7610-mergetool.sh
@@ -33,7 +33,7 @@ test_expect_success 'setup' '
git add foo &&
git commit -m "Add foo"
) &&
- git submodule add git://example.com/submod submod &&
+ git submodule add file:///dev/null submod &&
git add file1 "spaced name" file1[1-4] subdir/file3 .gitmodules submod &&
git commit -m "add initial versions" &&
@@ -614,7 +614,7 @@ test_expect_success 'submodule in subdirectory' '
)
) &&
test_when_finished "rm -rf subdir/subdir_module" &&
- git submodule add git://example.com/subsubmodule subdir/subdir_module &&
+ git submodule add file:///dev/null subdir/subdir_module &&
git add subdir/subdir_module &&
git commit -m "add submodule in subdirectory" &&
diff --git a/t/t7614-merge-signoff.sh b/t/t7614-merge-signoff.sh
index fee258d4f0..cf96a35e8e 100755
--- a/t/t7614-merge-signoff.sh
+++ b/t/t7614-merge-signoff.sh
@@ -8,6 +8,7 @@ This test runs git merge --signoff and makes sure that it works.
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Setup test files
diff --git a/t/t7700-repack.sh b/t/t7700-repack.sh
index ca45c4cd2c..c630e0d52d 100755
--- a/t/t7700-repack.sh
+++ b/t/t7700-repack.sh
@@ -426,12 +426,73 @@ test_expect_success '--write-midx -b packs non-kept objects' '
)
'
+test_expect_success '--write-midx removes stale pack-based bitmaps' '
+ rm -fr repo &&
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+ test_commit base &&
+ GIT_TEST_MULTI_PACK_INDEX=0 git repack -Ab &&
+
+ pack_bitmap=$(ls $objdir/pack/pack-*.bitmap) &&
+ test_path_is_file "$pack_bitmap" &&
+
+ test_commit tip &&
+ GIT_TEST_MULTI_PACK_INDEX=0 git repack -bm &&
+
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
+ test_path_is_missing $pack_bitmap
+ )
+'
+
+test_expect_success '--write-midx with --pack-kept-objects' '
+ git init repo &&
+ test_when_finished "rm -fr repo" &&
+ (
+ cd repo &&
+
+ test_commit one &&
+ test_commit two &&
+
+ one="$(echo "one" | git pack-objects --revs $objdir/pack/pack)" &&
+ two="$(echo "one..two" | git pack-objects --revs $objdir/pack/pack)" &&
+
+ keep="$objdir/pack/pack-$one.keep" &&
+ touch "$keep" &&
+
+ git repack --write-midx --write-bitmap-index --geometric=2 -d \
+ --pack-kept-objects &&
+
+ test_path_is_file $keep &&
+ test_path_is_file $midx &&
+ test_path_is_file $midx-$(midx_checksum $objdir).bitmap
+ )
+'
+
test_expect_success TTY '--quiet disables progress' '
test_terminal env GIT_PROGRESS_DELAY=0 \
git -C midx repack -ad --quiet --write-midx 2>stderr &&
test_must_be_empty stderr
'
+test_expect_success 'clean up .tmp-* packs on error' '
+ test_must_fail ok=sigpipe git \
+ -c repack.cruftwindow=bogus \
+ repack -ad --cruft &&
+ find $objdir/pack -name '.tmp-*' >tmpfiles &&
+ test_must_be_empty tmpfiles
+'
+
+test_expect_success 'repack -ad cleans up old .tmp-* packs' '
+ git rev-parse HEAD >input &&
+ git pack-objects $objdir/pack/.tmp-1234 <input &&
+ git repack -ad &&
+ find $objdir/pack -name '.tmp-*' >tmpfiles &&
+ test_must_be_empty tmpfiles
+'
+
test_expect_success 'setup for update-server-info' '
git init update-server-info &&
test_commit -C update-server-info message
@@ -482,4 +543,125 @@ test_expect_success '-n overrides repack.updateServerInfo=true' '
test_server_info_missing
'
+test_expect_success '--expire-to stores pruned objects (now)' '
+ git init expire-to-now &&
+ (
+ cd expire-to-now &&
+
+ git branch -M main &&
+
+ test_commit base &&
+
+ git checkout -b cruft &&
+ test_commit --no-tag cruft &&
+
+ git rev-list --objects --no-object-names main..cruft >moved.raw &&
+ sort moved.raw >moved.want &&
+
+ git rev-list --all --objects --no-object-names >expect.raw &&
+ sort expect.raw >expect &&
+
+ git checkout main &&
+ git branch -D cruft &&
+ git reflog expire --all --expire=all &&
+
+ git init --bare expired.git &&
+ git repack -d \
+ --cruft --cruft-expiration="now" \
+ --expire-to="expired.git/objects/pack/pack" &&
+
+ expired="$(ls expired.git/objects/pack/pack-*.idx)" &&
+ test_path_is_file "${expired%.idx}.mtimes" &&
+
+ # Since the `--cruft-expiration` is "now", the effective
+ # behavior is to move _all_ unreachable objects out to
+ # the location in `--expire-to`.
+ git show-index <$expired >expired.raw &&
+ cut -d" " -f2 expired.raw | sort >expired.objects &&
+ git rev-list --all --objects --no-object-names \
+ >remaining.objects &&
+
+ # ...in other words, the combined contents of this
+ # repository and expired.git should be the same as the
+ # set of objects we started with.
+ cat expired.objects remaining.objects | sort >actual &&
+ test_cmp expect actual &&
+
+ # The "moved" objects (i.e., those in expired.git)
+ # should be the same as the cruft objects which were
+ # expired in the previous step.
+ test_cmp moved.want expired.objects
+ )
+'
+
+test_expect_success '--expire-to stores pruned objects (5.minutes.ago)' '
+ git init expire-to-5.minutes.ago &&
+ (
+ cd expire-to-5.minutes.ago &&
+
+ git branch -M main &&
+
+ test_commit base &&
+
+ # Create two classes of unreachable objects, one which
+ # is older than 5 minutes (stale), and another which is
+ # newer (recent).
+ for kind in stale recent
+ do
+ git checkout -b $kind main &&
+ test_commit --no-tag $kind || return 1
+ done &&
+
+ git rev-list --objects --no-object-names main..stale >in &&
+ stale="$(git pack-objects $objdir/pack/pack <in)" &&
+ mtime="$(test-tool chmtime --get =-600 $objdir/pack/pack-$stale.pack)" &&
+
+ # expect holds the set of objects we expect to find in
+ # this repository after repacking
+ git rev-list --objects --no-object-names recent >expect.raw &&
+ sort expect.raw >expect &&
+
+ # moved.want holds the set of objects we expect to find
+ # in expired.git
+ git rev-list --objects --no-object-names main..stale >out &&
+ sort out >moved.want &&
+
+ git checkout main &&
+ git branch -D stale recent &&
+ git reflog expire --all --expire=all &&
+ git prune-packed &&
+
+ git init --bare expired.git &&
+ git repack -d \
+ --cruft --cruft-expiration=5.minutes.ago \
+ --expire-to="expired.git/objects/pack/pack" &&
+
+ # Some of the remaining objects in this repository are
+ # unreachable, so use `cat-file --batch-all-objects`
+ # instead of `rev-list` to get their names
+ git cat-file --batch-all-objects --batch-check="%(objectname)" \
+ >remaining.objects &&
+ sort remaining.objects >actual &&
+ test_cmp expect actual &&
+
+ (
+ cd expired.git &&
+
+ expired="$(ls objects/pack/pack-*.mtimes)" &&
+ test-tool pack-mtimes $(basename $expired) >out &&
+ cut -d" " -f1 out | sort >../moved.got &&
+
+ # Ensure that there are as many objects with the
+ # expected mtime as were moved to expired.git.
+ #
+ # In other words, ensure that the recorded
+ # mtimes of any moved objects was written
+ # correctly.
+ grep " $mtime$" out >matching &&
+ test_line_count = $(wc -l <../moved.want) matching
+ ) &&
+ test_cmp moved.want moved.got
+ )
+'
+
test_done
diff --git a/t/t7701-repack-unpack-unreachable.sh b/t/t7701-repack-unpack-unreachable.sh
index 937f89ee8c..b7ac4f598a 100755
--- a/t/t7701-repack-unpack-unreachable.sh
+++ b/t/t7701-repack-unpack-unreachable.sh
@@ -35,7 +35,7 @@ test_expect_success '-A with -d option leaves unreachable objects unpacked' '
git repack -A -d -l &&
# verify objects are packed in repository
test 3 = $(git verify-pack -v -- .git/objects/pack/*.idx |
- egrep "^($fsha1|$csha1|$tsha1) " |
+ grep -E "^($fsha1|$csha1|$tsha1) " |
sort | uniq | wc -l) &&
git show $fsha1 &&
git show $csha1 &&
@@ -49,7 +49,7 @@ test_expect_success '-A with -d option leaves unreachable objects unpacked' '
git repack -A -d -l &&
# verify objects are retained unpacked
test 0 = $(git verify-pack -v -- .git/objects/pack/*.idx |
- egrep "^($fsha1|$csha1|$tsha1) " |
+ grep -E "^($fsha1|$csha1|$tsha1) " |
sort | uniq | wc -l) &&
git show $fsha1 &&
git show $csha1 &&
diff --git a/t/t7703-repack-geometric.sh b/t/t7703-repack-geometric.sh
index da87f8b2d8..8821fbd2dd 100755
--- a/t/t7703-repack-geometric.sh
+++ b/t/t7703-repack-geometric.sh
@@ -176,8 +176,12 @@ test_expect_success '--geometric ignores kept packs' '
# be repacked, too.
git repack --geometric 2 -d --pack-kept-objects &&
+ # After repacking, two packs remain: one new one (containing the
+ # objects in both the .keep and non-kept pack), and the .keep
+ # pack (since `--pack-kept-objects -d` does not actually delete
+ # the kept pack).
find $objdir/pack -name "*.pack" >after &&
- test_line_count = 1 after
+ test_line_count = 2 after
)
'
diff --git a/t/t7810-grep.sh b/t/t7810-grep.sh
index 0f937990a0..8eded6ab27 100755
--- a/t/t7810-grep.sh
+++ b/t/t7810-grep.sh
@@ -18,6 +18,9 @@ test_invalid_grep_expression() {
'
}
+LC_ALL=en_US.UTF-8 test-tool regex '^.$' '¿' &&
+ test_set_prereq MB_REGEX
+
cat >hello.c <<EOF
#include <assert.h>
#include <stdio.h>
@@ -88,6 +91,10 @@ test_expect_success setup '
echo unusual >"\"unusual\" pathname" &&
echo unusual >"t/nested \"unusual\" pathname"
fi &&
+ if test_have_prereq MB_REGEX
+ then
+ echo "¿" >reverse-question-mark
+ fi &&
git add . &&
test_tick &&
git commit -m initial
@@ -569,6 +576,14 @@ do
'
done
+test_expect_success MB_REGEX 'grep exactly one char in single-char multibyte file' '
+ LC_ALL=en_US.UTF-8 git grep "^.$" reverse-question-mark
+'
+
+test_expect_success MB_REGEX 'grep two chars in single-char multibyte file' '
+ LC_ALL=en_US.UTF-8 test_expect_code 1 git grep ".." reverse-question-mark
+'
+
cat >expected <<EOF
file
EOF
diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh
index 2724a44fe3..823331e44a 100755
--- a/t/t7900-maintenance.sh
+++ b/t/t7900-maintenance.sh
@@ -480,6 +480,11 @@ test_expect_success 'maintenance.strategy inheritance' '
test_expect_success 'register and unregister' '
test_when_finished git config --global --unset-all maintenance.repo &&
+
+ test_must_fail git maintenance unregister 2>err &&
+ grep "is not registered" err &&
+ git maintenance unregister --force &&
+
git config --global --add maintenance.repo /existing1 &&
git config --global --add maintenance.repo /existing2 &&
git config --global --get-all maintenance.repo >before &&
@@ -493,7 +498,30 @@ test_expect_success 'register and unregister' '
git maintenance unregister &&
git config --global --get-all maintenance.repo >actual &&
- test_cmp before actual
+ test_cmp before actual &&
+
+ git config --file ./other --add maintenance.repo /existing1 &&
+ git config --file ./other --add maintenance.repo /existing2 &&
+ git config --file ./other --get-all maintenance.repo >before &&
+
+ git maintenance register --config-file ./other &&
+ test_cmp_config false maintenance.auto &&
+ git config --file ./other --get-all maintenance.repo >between &&
+ cp before expect &&
+ pwd >>expect &&
+ test_cmp expect between &&
+
+ git maintenance unregister --config-file ./other &&
+ git config --file ./other --get-all maintenance.repo >actual &&
+ test_cmp before actual &&
+
+ test_must_fail git maintenance unregister 2>err &&
+ grep "is not registered" err &&
+ git maintenance unregister --force &&
+
+ test_must_fail git maintenance unregister --config-file ./other 2>err &&
+ grep "is not registered" err &&
+ git maintenance unregister --config-file ./other --force
'
test_expect_success !MINGW 'register and unregister with regex metacharacters' '
diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh
index 01c74b8b07..1130ef21b3 100755
--- a/t/t9001-send-email.sh
+++ b/t/t9001-send-email.sh
@@ -1519,7 +1519,7 @@ test_expect_success $PREREQ 'asks about and fixes 8bit encodings' '
grep "do not declare a Content-Transfer-Encoding" stdout &&
grep email-using-8bit stdout &&
grep "Which 8bit encoding" stdout &&
- egrep "Content|MIME" msgtxt1 >actual &&
+ grep -E "Content|MIME" msgtxt1 >actual &&
test_cmp content-type-decl actual
'
@@ -1530,7 +1530,7 @@ test_expect_success $PREREQ 'sendemail.8bitEncoding works' '
git send-email --from=author@example.com --to=nobody@example.com \
--smtp-server="$(pwd)/fake.sendmail" \
email-using-8bit >stdout &&
- egrep "Content|MIME" msgtxt1 >actual &&
+ grep -E "Content|MIME" msgtxt1 >actual &&
test_cmp content-type-decl actual
'
@@ -1545,7 +1545,7 @@ test_expect_success $PREREQ 'sendemail.8bitEncoding in .git/config overrides --g
git send-email --from=author@example.com --to=nobody@example.com \
--smtp-server="$(pwd)/fake.sendmail" \
email-using-8bit >stdout &&
- egrep "Content|MIME" msgtxt1 >actual &&
+ grep -E "Content|MIME" msgtxt1 >actual &&
test_cmp content-type-decl actual
'
@@ -1557,7 +1557,7 @@ test_expect_success $PREREQ '--8bit-encoding overrides sendemail.8bitEncoding' '
--smtp-server="$(pwd)/fake.sendmail" \
--8bit-encoding=UTF-8 \
email-using-8bit >stdout &&
- egrep "Content|MIME" msgtxt1 >actual &&
+ grep -E "Content|MIME" msgtxt1 >actual &&
test_cmp content-type-decl actual
'
diff --git a/t/t9003-help-autocorrect.sh b/t/t9003-help-autocorrect.sh
index f00deaf381..4b9cb4c942 100755
--- a/t/t9003-help-autocorrect.sh
+++ b/t/t9003-help-autocorrect.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='help.autocorrect finding a match'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t9115-git-svn-dcommit-funky-renames.sh b/t/t9115-git-svn-dcommit-funky-renames.sh
index 419f055721..743fbe1fe4 100755
--- a/t/t9115-git-svn-dcommit-funky-renames.sh
+++ b/t/t9115-git-svn-dcommit-funky-renames.sh
@@ -5,7 +5,6 @@
test_description='git svn dcommit can commit renames of files with ugly names'
-TEST_FAILS_SANITIZE_LEAK=true
. ./lib-git-svn.sh
test_expect_success 'load repository with strange names' '
diff --git a/t/t9133-git-svn-nested-git-repo.sh b/t/t9133-git-svn-nested-git-repo.sh
index f894860867..d8d536269c 100755
--- a/t/t9133-git-svn-nested-git-repo.sh
+++ b/t/t9133-git-svn-nested-git-repo.sh
@@ -35,7 +35,7 @@ test_expect_success 'SVN-side change outside of .git' '
echo b >> a &&
svn_cmd commit -m "SVN-side change outside of .git" &&
svn_cmd up &&
- svn_cmd log -v | fgrep "SVN-side change outside of .git"
+ svn_cmd log -v | grep -F "SVN-side change outside of .git"
)
'
@@ -59,7 +59,7 @@ test_expect_success 'SVN-side change inside of .git' '
svn_cmd add --force .git &&
svn_cmd commit -m "SVN-side change inside of .git" &&
svn_cmd up &&
- svn_cmd log -v | fgrep "SVN-side change inside of .git"
+ svn_cmd log -v | grep -F "SVN-side change inside of .git"
)
'
@@ -82,7 +82,7 @@ test_expect_success 'SVN-side change in and out of .git' '
git commit -m "add a inside an SVN repo" &&
svn_cmd commit -m "SVN-side change in and out of .git" &&
svn_cmd up &&
- svn_cmd log -v | fgrep "SVN-side change in and out of .git"
+ svn_cmd log -v | grep -F "SVN-side change in and out of .git"
)
'
diff --git a/t/t9134-git-svn-ignore-paths.sh b/t/t9134-git-svn-ignore-paths.sh
index 4a77eb9f60..3188400226 100755
--- a/t/t9134-git-svn-ignore-paths.sh
+++ b/t/t9134-git-svn-ignore-paths.sh
@@ -43,7 +43,7 @@ test_expect_success 'init+fetch an SVN repository with ignored www directory' '
test_expect_success 'verify ignore-paths config saved by clone' '
(
cd g &&
- git config --get svn-remote.svn.ignore-paths | fgrep "www"
+ git config --get svn-remote.svn.ignore-paths | grep www
)
'
@@ -53,7 +53,7 @@ test_expect_success 'SVN-side change outside of www' '
echo b >> qqq/test_qqq.txt &&
svn_cmd commit -m "SVN-side change outside of www" &&
svn_cmd up &&
- svn_cmd log -v | fgrep "SVN-side change outside of www"
+ svn_cmd log -v | grep "SVN-side change outside of www"
)
'
@@ -85,7 +85,7 @@ test_expect_success 'SVN-side change inside of ignored www' '
echo zaq >> www/test_www.txt &&
svn_cmd commit -m "SVN-side change inside of www/test_www.txt" &&
svn_cmd up &&
- svn_cmd log -v | fgrep "SVN-side change inside of www/test_www.txt"
+ svn_cmd log -v | grep -F "SVN-side change inside of www/test_www.txt"
)
'
@@ -118,7 +118,7 @@ test_expect_success 'SVN-side change in and out of ignored www' '
echo ygg >> qqq/test_qqq.txt &&
svn_cmd commit -m "SVN-side change in and out of ignored www" &&
svn_cmd up &&
- svn_cmd log -v | fgrep "SVN-side change in and out of ignored www"
+ svn_cmd log -v | grep "SVN-side change in and out of ignored www"
)
'
diff --git a/t/t9140-git-svn-reset.sh b/t/t9140-git-svn-reset.sh
index e855904629..a420b2a87a 100755
--- a/t/t9140-git-svn-reset.sh
+++ b/t/t9140-git-svn-reset.sh
@@ -43,7 +43,7 @@ test_expect_success 'fetch fails on modified hidden file' '
git svn find-rev refs/remotes/git-svn > ../expect &&
test_must_fail git svn fetch 2> ../errors &&
git svn find-rev refs/remotes/git-svn > ../expect2 ) &&
- fgrep "not found in commit" errors &&
+ grep "not found in commit" errors &&
test_cmp expect expect2
'
@@ -59,7 +59,7 @@ test_expect_success 'refetch succeeds not ignoring any files' '
( cd g &&
git svn fetch &&
git svn rebase &&
- fgrep "mod hidden" hid/hid.txt
+ grep "mod hidden" hid/hid.txt
)
'
diff --git a/t/t9146-git-svn-empty-dirs.sh b/t/t9146-git-svn-empty-dirs.sh
index 79c26ed69c..09606f1b3c 100755
--- a/t/t9146-git-svn-empty-dirs.sh
+++ b/t/t9146-git-svn-empty-dirs.sh
@@ -4,7 +4,6 @@
test_description='git svn creates empty directories'
-TEST_FAILS_SANITIZE_LEAK=true
. ./lib-git-svn.sh
test_expect_success 'initialize repo' '
diff --git a/t/t9147-git-svn-include-paths.sh b/t/t9147-git-svn-include-paths.sh
index 257fc8f2f8..63fa0b6732 100755
--- a/t/t9147-git-svn-include-paths.sh
+++ b/t/t9147-git-svn-include-paths.sh
@@ -45,7 +45,7 @@ test_expect_success 'init+fetch an SVN repository with included qqq directory' '
test_expect_success 'verify include-paths config saved by clone' '
(
cd g &&
- git config --get svn-remote.svn.include-paths | fgrep "qqq"
+ git config --get svn-remote.svn.include-paths | grep qqq
)
'
@@ -55,7 +55,7 @@ test_expect_success 'SVN-side change outside of www' '
echo b >> qqq/test_qqq.txt &&
svn_cmd commit -m "SVN-side change outside of www" &&
svn_cmd up &&
- svn_cmd log -v | fgrep "SVN-side change outside of www"
+ svn_cmd log -v | grep "SVN-side change outside of www"
)
'
@@ -87,7 +87,7 @@ test_expect_success 'SVN-side change inside of ignored www' '
echo zaq >> www/test_www.txt &&
svn_cmd commit -m "SVN-side change inside of www/test_www.txt" &&
svn_cmd up &&
- svn_cmd log -v | fgrep "SVN-side change inside of www/test_www.txt"
+ svn_cmd log -v | grep "SVN-side change inside of www/test_www.txt"
)
'
@@ -120,7 +120,7 @@ test_expect_success 'SVN-side change in and out of included qqq' '
echo ygg >> qqq/test_qqq.txt &&
svn_cmd commit -m "SVN-side change in and out of ignored www" &&
svn_cmd up &&
- svn_cmd log -v | fgrep "SVN-side change in and out of ignored www"
+ svn_cmd log -v | grep "SVN-side change in and out of ignored www"
)
'
diff --git a/t/t9148-git-svn-propset.sh b/t/t9148-git-svn-propset.sh
index 6cc76a07b3..aebb28995e 100755
--- a/t/t9148-git-svn-propset.sh
+++ b/t/t9148-git-svn-propset.sh
@@ -5,7 +5,6 @@
test_description='git svn propset tests'
-TEST_FAILS_SANITIZE_LEAK=true
. ./lib-git-svn.sh
test_expect_success 'setup propset via import' '
diff --git a/t/t9160-git-svn-preserve-empty-dirs.sh b/t/t9160-git-svn-preserve-empty-dirs.sh
index 9cf7a1427a..36c6b1a12f 100755
--- a/t/t9160-git-svn-preserve-empty-dirs.sh
+++ b/t/t9160-git-svn-preserve-empty-dirs.sh
@@ -9,7 +9,6 @@ This test uses git to clone a Subversion repository that contains empty
directories, and checks that corresponding directories are created in the
local Git repository with placeholder files.'
-TEST_FAILS_SANITIZE_LEAK=true
. ./lib-git-svn.sh
GIT_REPO=git-svn-repo
diff --git a/t/t9210-scalar.sh b/t/t9210-scalar.sh
index 14ca575a21..25f500cf68 100755
--- a/t/t9210-scalar.sh
+++ b/t/t9210-scalar.sh
@@ -116,7 +116,10 @@ test_expect_success 'scalar unregister' '
test_must_fail git config --get --global --fixed-value \
maintenance.repo "$(pwd)/vanish/src" &&
scalar list >scalar.repos &&
- ! grep -F "$(pwd)/vanish/src" scalar.repos
+ ! grep -F "$(pwd)/vanish/src" scalar.repos &&
+
+ # scalar unregister should be idempotent
+ scalar unregister vanish
'
test_expect_success 'set up repository to clone' '
@@ -163,6 +166,20 @@ test_expect_success 'scalar reconfigure' '
test true = "$(git -C one/src config core.preloadIndex)"
'
+test_expect_success '`reconfigure -a` removes stale config entries' '
+ git init stale/src &&
+ scalar register stale &&
+ scalar list >scalar.repos &&
+ grep stale scalar.repos &&
+
+ grep -v stale scalar.repos >expect &&
+
+ rm -rf stale &&
+ scalar reconfigure -a &&
+ scalar list >scalar.repos &&
+ test_cmp expect scalar.repos
+'
+
test_expect_success 'scalar delete without enlistment shows a usage' '
test_expect_code 129 scalar delete
'
diff --git a/t/t9700-perl-git.sh b/t/t9700-perl-git.sh
index 4aa5d90d32..b105d6d9d5 100755
--- a/t/t9700-perl-git.sh
+++ b/t/t9700-perl-git.sh
@@ -45,6 +45,10 @@ test_expect_success \
git config --add test.pathmulti bar
'
+test_expect_success 'set up bare repository' '
+ git init --bare bare.git
+'
+
test_expect_success 'use t9700/test.pl to test Git.pm' '
"$PERL_PATH" "$TEST_DIRECTORY"/t9700/test.pl 2>stderr &&
test_must_be_empty stderr
diff --git a/t/t9700/test.pl b/t/t9700/test.pl
index e046f7db76..6d753708d2 100755
--- a/t/t9700/test.pl
+++ b/t/t9700/test.pl
@@ -30,6 +30,18 @@ BEGIN { use_ok('Git') }
# set up
our $abs_repo_dir = cwd();
ok(our $r = Git->repository(Directory => "."), "open repository");
+{
+ local $ENV{GIT_TEST_ASSUME_DIFFERENT_OWNER} = 1;
+ my $failed;
+
+ $failed = eval { Git->repository(Directory => $abs_repo_dir) };
+ ok(!$failed, "reject unsafe non-bare repository");
+ like($@, qr/not a git repository/i, "unsafe error message");
+
+ $failed = eval { Git->repository(Directory => "$abs_repo_dir/bare.git") };
+ ok(!$failed, "reject unsafe bare repository");
+ like($@, qr/not a git repository/i, "unsafe error message");
+}
# config
is($r->config("test.string"), "value", "config scalar: string");
diff --git a/t/t9814-git-p4-rename.sh b/t/t9814-git-p4-rename.sh
index 468767cbf4..2a9838f37f 100755
--- a/t/t9814-git-p4-rename.sh
+++ b/t/t9814-git-p4-rename.sh
@@ -216,7 +216,7 @@ test_expect_success 'detect copies' '
# variable exists, which allows admins to disable the "p4 move" command.
test_lazy_prereq P4D_HAVE_CONFIGURABLE_RUN_MOVE_ALLOW '
p4 configure show run.move.allow >out &&
- egrep ^run.move.allow: out
+ grep -E ^run.move.allow: out
'
# If move can be disabled, turn it off and test p4 move handling
diff --git a/t/t9815-git-p4-submit-fail.sh b/t/t9815-git-p4-submit-fail.sh
index 9779dc0d11..0ca9937de6 100755
--- a/t/t9815-git-p4-submit-fail.sh
+++ b/t/t9815-git-p4-submit-fail.sh
@@ -417,8 +417,8 @@ test_expect_success 'cleanup chmod after submit cancel' '
! p4 fstat -T action text &&
test_path_is_file text+x &&
! p4 fstat -T action text+x &&
- ls -l text | egrep ^-r-- &&
- ls -l text+x | egrep ^-r-x
+ ls -l text | grep -E ^-r-- &&
+ ls -l text+x | grep -E ^-r-x
)
'
diff --git a/t/t9902-completion.sh b/t/t9902-completion.sh
index 43de868b80..27e3aa46c1 100755
--- a/t/t9902-completion.sh
+++ b/t/t9902-completion.sh
@@ -33,7 +33,7 @@ complete ()
GIT_TESTING_ALL_COMMAND_LIST='add checkout check-attr rebase ls-files'
GIT_TESTING_PORCELAIN_COMMAND_LIST='add checkout rebase'
-. "$GIT_BUILD_DIR/contrib/completion/git-completion.bash"
+. "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash"
# We don't need this function to actually join words or do anything special.
# Also, it's cleaner to avoid touching bash's internal completion variables.
@@ -2255,6 +2255,38 @@ test_expect_success 'checkout completes ref names' '
EOF
'
+test_expect_success 'checkout does not match ref names of a different case' '
+ test_completion "git checkout M" ""
+'
+
+test_expect_success 'checkout matches case insensitively with GIT_COMPLETION_IGNORE_CASE' '
+ (
+ . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ GIT_COMPLETION_IGNORE_CASE=1 && export GIT_COMPLETION_IGNORE_CASE &&
+ test_completion "git checkout M" <<-\EOF
+ main Z
+ mybranch Z
+ mytag Z
+ EOF
+ )
+'
+
+test_expect_success 'checkout completes pseudo refs' '
+ test_completion "git checkout H" <<-\EOF
+ HEAD Z
+ EOF
+'
+
+test_expect_success 'checkout completes pseudo refs case insensitively with GIT_COMPLETION_IGNORE_CASE' '
+ (
+ . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ GIT_COMPLETION_IGNORE_CASE=1 && export GIT_COMPLETION_IGNORE_CASE &&
+ test_completion "git checkout h" <<-\EOF
+ HEAD Z
+ EOF
+ )
+'
+
test_expect_success 'git -C <path> checkout uses the right repo' '
test_completion "git -C subdir -C subsubdir -C .. -C ../otherrepo checkout b" <<-\EOF
branch-in-other Z
@@ -2567,7 +2599,7 @@ test_expect_success 'sourcing the completion script clears cached commands' '
(
__git_compute_all_commands &&
verbose test -n "$__git_all_commands" &&
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
verbose test -z "$__git_all_commands"
)
'
@@ -2576,7 +2608,7 @@ test_expect_success 'sourcing the completion script clears cached merge strategi
(
__git_compute_merge_strategies &&
verbose test -n "$__git_merge_strategies" &&
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
verbose test -z "$__git_merge_strategies"
)
'
@@ -2587,7 +2619,7 @@ test_expect_success 'sourcing the completion script clears cached --options' '
verbose test -n "$__gitcomp_builtin_checkout" &&
__gitcomp_builtin notes_edit &&
verbose test -n "$__gitcomp_builtin_notes_edit" &&
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
verbose test -z "$__gitcomp_builtin_checkout" &&
verbose test -z "$__gitcomp_builtin_notes_edit"
)
@@ -2599,7 +2631,7 @@ test_expect_success 'option aliases are not shown by default' '
test_expect_success 'option aliases are shown with GIT_COMPLETION_SHOW_ALL' '
(
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
GIT_COMPLETION_SHOW_ALL=1 && export GIT_COMPLETION_SHOW_ALL &&
test_completion "git clone --recurs" <<-\EOF
--recurse-submodules Z
@@ -2610,7 +2642,7 @@ test_expect_success 'option aliases are shown with GIT_COMPLETION_SHOW_ALL' '
test_expect_success 'plumbing commands are excluded without GIT_COMPLETION_SHOW_ALL_COMMANDS' '
(
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
sane_unset GIT_TESTING_PORCELAIN_COMMAND_LIST &&
# Just mainporcelain, not plumbing commands
@@ -2622,7 +2654,7 @@ test_expect_success 'plumbing commands are excluded without GIT_COMPLETION_SHOW_
test_expect_success 'all commands are shown with GIT_COMPLETION_SHOW_ALL_COMMANDS (also main non-builtin)' '
(
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
GIT_COMPLETION_SHOW_ALL_COMMANDS=1 &&
export GIT_COMPLETION_SHOW_ALL_COMMANDS &&
sane_unset GIT_TESTING_PORCELAIN_COMMAND_LIST &&
diff --git a/t/t9903-bash-prompt.sh b/t/t9903-bash-prompt.sh
index d459fae655..06f0abfc29 100755
--- a/t/t9903-bash-prompt.sh
+++ b/t/t9903-bash-prompt.sh
@@ -10,7 +10,7 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./lib-bash.sh
-. "$GIT_BUILD_DIR/contrib/completion/git-prompt.sh"
+. "$GIT_SOURCE_DIR/contrib/completion/git-prompt.sh"
actual="$TRASH_DIRECTORY/actual"
c_red='\\[\\e[31m\\]'
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index c6479f24eb..796093a7b3 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -273,13 +273,13 @@ debug () {
# <file>, <contents>, and <tag> all default to <message>.
test_commit () {
- notick= &&
- echo=echo &&
- append= &&
- author= &&
- signoff= &&
- indir= &&
- tag=light &&
+ local notick= &&
+ local echo=echo &&
+ local append= &&
+ local author= &&
+ local signoff= &&
+ local indir= &&
+ local tag=light &&
while test $# != 0
do
case "$1" in
@@ -322,7 +322,7 @@ test_commit () {
shift
done &&
indir=${indir:+"$indir"/} &&
- file=${2:-"$1.t"} &&
+ local file=${2:-"$1.t"} &&
if test -n "$append"
then
$echo "${3-$1}" >>"$indir$file"
@@ -897,7 +897,7 @@ test_path_is_symlink () {
test_dir_is_empty () {
test "$#" -ne 1 && BUG "1 param"
test_path_is_dir "$1" &&
- if test -n "$(ls -a1 "$1" | egrep -v '^\.\.?$')"
+ if test -n "$(ls -a1 "$1" | grep -E -v '^\.\.?$')"
then
echo "Directory '$1' is not empty, it contains:"
ls -la "$1"
@@ -921,10 +921,6 @@ test_path_is_missing () {
then
echo "Path exists:"
ls -ld "$1"
- if test $# -ge 1
- then
- echo "$*"
- fi
false
fi
}
@@ -1868,3 +1864,14 @@ test_is_magic_mtime () {
rm -f .git/test-mtime-actual
return $ret
}
+
+# Given two filenames, parse both using 'git config --list --file'
+# and compare the sorted output of those commands. Useful when
+# wanting to ignore whitespace differences and sorting concerns.
+test_cmp_config_output () {
+ git config --list --file="$1" >config-expect &&
+ git config --list --file="$2" >config-actual &&
+ sort config-expect >sorted-expect &&
+ sort config-actual >sorted-actual &&
+ test_cmp sorted-expect sorted-actual
+}
diff --git a/t/test-lib.sh b/t/test-lib.sh
index a65df2fd22..090cfb9595 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -48,6 +48,28 @@ then
exit 1
fi
+# For CMake the top-level source directory is different from our build
+# directory. With the top-level Makefile they're the same.
+GIT_SOURCE_DIR="$GIT_BUILD_DIR"
+
+GIT_AUTO_CONTRIB_BUILDSYSTEMS_OUT=
+if test -n "$GIT_TEST_BUILD_DIR"
+then
+ GIT_BUILD_DIR="$GIT_TEST_BUILD_DIR"
+elif ! test -x "$GIT_BUILD_DIR/git" &&
+ test -x "$GIT_BUILD_DIR/contrib/buildsystems/out/git"
+then
+ GIT_BUILD_DIR="$GIT_SOURCE_DIR/contrib/buildsystems/out"
+ GIT_AUTO_CONTRIB_BUILDSYSTEMS_OUT=t
+
+ # On Windows, we must convert Windows paths lest they contain a colon
+ case "$(uname -s)" in
+ *MINGW*)
+ GIT_BUILD_DIR="$(cygpath -au "$GIT_BUILD_DIR")"
+ ;;
+ esac
+fi
+
# Prepend a string to a VAR using an arbitrary ":" delimiter, not
# adding the delimiter if VAR or VALUE is empty. I.e. a generalized:
#
@@ -563,9 +585,11 @@ case $GIT_TEST_FSYNC in
esac
# Add libc MALLOC and MALLOC_PERTURB test only if we are not executing
-# the test with valgrind and have not compiled with SANITIZE=address.
+# the test with valgrind and have not compiled with conflict SANITIZE
+# options.
if test -n "$valgrind" ||
test -n "$SANITIZE_ADDRESS" ||
+ test -n "$SANITIZE_LEAK" ||
test -n "$TEST_NO_MALLOC_CHECK"
then
setup_malloc_check () {
@@ -1435,7 +1459,7 @@ then
make_valgrind_symlink $file
done
# special-case the mergetools loadables
- make_symlink "$GIT_BUILD_DIR"/mergetools "$GIT_VALGRIND/bin/mergetools"
+ make_symlink "$GIT_SOURCE_DIR"/mergetools "$GIT_VALGRIND/bin/mergetools"
OLDIFS=$IFS
IFS=:
for path in $PATH
@@ -1488,6 +1512,8 @@ GIT_CONFIG_NOSYSTEM=1
GIT_ATTR_NOSYSTEM=1
GIT_CEILING_DIRECTORIES="$TRASH_DIRECTORY/.."
export PATH GIT_EXEC_PATH GIT_TEMPLATE_DIR GIT_CONFIG_NOSYSTEM GIT_ATTR_NOSYSTEM GIT_CEILING_DIRECTORIES
+MERGE_TOOLS_DIR="$GIT_SOURCE_DIR/mergetools"
+export MERGE_TOOLS_DIR
if test -z "$GIT_TEST_CMP"
then
@@ -1616,6 +1642,13 @@ remove_trash_directory "$TRASH_DIRECTORY" || {
BAIL_OUT 'cannot prepare test area'
}
+# Emitting this now because earlier we didn't have "say", but not in
+# anything using lib-subtest.sh
+if test -n "$GIT_AUTO_CONTRIB_BUILDSYSTEMS_OUT" && test -t 1
+then
+ say "setup: had no ../git, but found & used cmake built git in ../contrib/buildsystems/out/git"
+fi
+
remove_trash=t
if test -z "$TEST_NO_CREATE_REPO"
then
@@ -1942,3 +1975,7 @@ test_lazy_prereq FSMONITOR_DAEMON '
git version --build-options >output &&
grep "feature: fsmonitor--daemon" output
'
+
+test_lazy_prereq PACKED_REFS_V1 '
+ test "$GIT_TEST_PACKED_REFS_VERSION" -ne "2"
+' \ No newline at end of file
diff --git a/tmp-objdir.c b/tmp-objdir.c
index adf6033549..2a2012eb6d 100644
--- a/tmp-objdir.c
+++ b/tmp-objdir.c
@@ -18,7 +18,7 @@ struct tmp_objdir {
/*
* Allow only one tmp_objdir at a time in a running process, which simplifies
- * our signal/atexit cleanup routines. It's doubtful callers will ever need
+ * our atexit cleanup routines. It's doubtful callers will ever need
* more than one, and we can expand later if so. You can have many such
* tmp_objdirs simultaneously in many processes, of course.
*/
@@ -31,7 +31,7 @@ static void tmp_objdir_free(struct tmp_objdir *t)
free(t);
}
-static int tmp_objdir_destroy_1(struct tmp_objdir *t, int on_signal)
+int tmp_objdir_destroy(struct tmp_objdir *t)
{
int err;
@@ -41,44 +41,21 @@ static int tmp_objdir_destroy_1(struct tmp_objdir *t, int on_signal)
if (t == the_tmp_objdir)
the_tmp_objdir = NULL;
- if (!on_signal && t->prev_odb)
+ if (t->prev_odb)
restore_primary_odb(t->prev_odb, t->path.buf);
- /*
- * This may use malloc via strbuf_grow(), but we should
- * have pre-grown t->path sufficiently so that this
- * doesn't happen in practice.
- */
err = remove_dir_recursively(&t->path, 0);
- /*
- * When we are cleaning up due to a signal, we won't bother
- * freeing memory; it may cause a deadlock if the signal
- * arrived while libc's allocator lock is held.
- */
- if (!on_signal)
- tmp_objdir_free(t);
+ tmp_objdir_free(t);
return err;
}
-int tmp_objdir_destroy(struct tmp_objdir *t)
-{
- return tmp_objdir_destroy_1(t, 0);
-}
-
static void remove_tmp_objdir(void)
{
tmp_objdir_destroy(the_tmp_objdir);
}
-static void remove_tmp_objdir_on_signal(int signo)
-{
- tmp_objdir_destroy_1(the_tmp_objdir, 1);
- sigchain_pop(signo);
- raise(signo);
-}
-
void tmp_objdir_discard_objects(struct tmp_objdir *t)
{
remove_dir_recursively(&t->path, REMOVE_DIR_KEEP_TOPLEVEL);
@@ -152,14 +129,6 @@ struct tmp_objdir *tmp_objdir_create(const char *prefix)
*/
strbuf_addf(&t->path, "%s/tmp_objdir-%s-XXXXXX", get_object_directory(), prefix);
- /*
- * Grow the strbuf beyond any filename we expect to be placed in it.
- * If tmp_objdir_destroy() is called by a signal handler, then
- * we should be able to use the strbuf to remove files without
- * having to call malloc.
- */
- strbuf_grow(&t->path, 1024);
-
if (!mkdtemp(t->path.buf)) {
/* free, not destroy, as we never touched the filesystem */
tmp_objdir_free(t);
@@ -169,7 +138,6 @@ struct tmp_objdir *tmp_objdir_create(const char *prefix)
the_tmp_objdir = t;
if (!installed_handlers) {
atexit(remove_tmp_objdir);
- sigchain_push_common(remove_tmp_objdir_on_signal);
installed_handlers++;
}
diff --git a/tmp-objdir.h b/tmp-objdir.h
index 76efc7edee..237d96b660 100644
--- a/tmp-objdir.h
+++ b/tmp-objdir.h
@@ -10,9 +10,11 @@
*
* Example:
*
+ * struct child_process child = CHILD_PROCESS_INIT;
* struct tmp_objdir *t = tmp_objdir_create("incoming");
- * if (!run_command_v_opt_cd_env(cmd, 0, NULL, tmp_objdir_env(t)) &&
- * !tmp_objdir_migrate(t))
+ * strvec_push(&child.args, cmd);
+ * strvec_pushv(&child.env, tmp_objdir_env(t));
+ * if (!run_command(&child)) && !tmp_objdir_migrate(t))
* printf("success!\n");
* else
* die("failed...tmp_objdir will clean up for us");
diff --git a/trace2.c b/trace2.c
index 0c0a11e07d..279bddf53b 100644
--- a/trace2.c
+++ b/trace2.c
@@ -8,11 +8,13 @@
#include "version.h"
#include "trace2/tr2_cfg.h"
#include "trace2/tr2_cmd_name.h"
+#include "trace2/tr2_ctr.h"
#include "trace2/tr2_dst.h"
#include "trace2/tr2_sid.h"
#include "trace2/tr2_sysenv.h"
#include "trace2/tr2_tgt.h"
#include "trace2/tr2_tls.h"
+#include "trace2/tr2_tmr.h"
static int trace2_enabled;
@@ -52,7 +54,7 @@ static struct tr2_tgt *tr2_tgt_builtins[] =
* Force (rather than lazily) initialize any of the requested
* builtin TRACE2 targets at startup (and before we've seen an
* actual TRACE2 event call) so we can see if we need to setup
- * the TR2 and TLS machinery.
+ * private data structures and thread-local storage.
*
* Return the number of builtin targets enabled.
*/
@@ -83,6 +85,39 @@ static void tr2_tgt_disable_builtins(void)
tgt_j->pfn_term();
}
+/*
+ * The signature of this function must match the pfn_timer
+ * method in the targets. (Think of this is an apply operation
+ * across the set of active targets.)
+ */
+static void tr2_tgt_emit_a_timer(const struct tr2_timer_metadata *meta,
+ const struct tr2_timer *timer,
+ int is_final_data)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_timer)
+ tgt_j->pfn_timer(meta, timer, is_final_data);
+}
+
+/*
+ * The signature of this function must match the pfn_counter
+ * method in the targets.
+ */
+static void tr2_tgt_emit_a_counter(const struct tr2_counter_metadata *meta,
+ const struct tr2_counter *counter,
+ int is_final_data)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_counter)
+ tgt_j->pfn_counter(meta, counter, is_final_data);
+}
+
static int tr2main_exit_code;
/*
@@ -110,6 +145,32 @@ static void tr2main_atexit_handler(void)
*/
tr2tls_pop_unwind_self();
+ /*
+ * Some timers want per-thread details. If the main thread
+ * used one of those timers, emit the details now (before
+ * we emit the aggregate timer values).
+ *
+ * Likewise for counters.
+ */
+ tr2_emit_per_thread_timers(tr2_tgt_emit_a_timer);
+ tr2_emit_per_thread_counters(tr2_tgt_emit_a_counter);
+
+ /*
+ * Add stopwatch timer and counter data for the main thread to
+ * the final totals. And then emit the final values.
+ *
+ * Technically, we shouldn't need to hold the lock to update
+ * and output the final_timer_block and final_counter_block
+ * (since all other threads should be dead by now), but it
+ * doesn't hurt anything.
+ */
+ tr2tls_lock();
+ tr2_update_final_timers();
+ tr2_update_final_counters();
+ tr2_emit_final_timers(tr2_tgt_emit_a_timer);
+ tr2_emit_final_counters(tr2_tgt_emit_a_counter);
+ tr2tls_unlock();
+
for_each_wanted_builtin (j, tgt_j)
if (tgt_j->pfn_atexit)
tgt_j->pfn_atexit(us_elapsed_absolute,
@@ -466,7 +527,7 @@ void trace2_exec_result_fl(const char *file, int line, int exec_id, int code)
file, line, us_elapsed_absolute, exec_id, code);
}
-void trace2_thread_start_fl(const char *file, int line, const char *thread_name)
+void trace2_thread_start_fl(const char *file, int line, const char *thread_base_name)
{
struct tr2_tgt *tgt_j;
int j;
@@ -488,14 +549,14 @@ void trace2_thread_start_fl(const char *file, int line, const char *thread_name)
*/
trace2_region_enter_printf_fl(file, line, NULL, NULL, NULL,
"thread-proc on main: %s",
- thread_name);
+ thread_base_name);
return;
}
us_now = getnanotime() / 1000;
us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
- tr2tls_create_self(thread_name, us_now);
+ tr2tls_create_self(thread_base_name, us_now);
for_each_wanted_builtin (j, tgt_j)
if (tgt_j->pfn_thread_start_fl)
@@ -541,6 +602,25 @@ void trace2_thread_exit_fl(const char *file, int line)
tr2tls_pop_unwind_self();
us_elapsed_thread = tr2tls_region_elasped_self(us_now);
+ /*
+ * Some timers want per-thread details. If this thread used
+ * one of those timers, emit the details now.
+ *
+ * Likewise for counters.
+ */
+ tr2_emit_per_thread_timers(tr2_tgt_emit_a_timer);
+ tr2_emit_per_thread_counters(tr2_tgt_emit_a_counter);
+
+ /*
+ * Add stopwatch timer and counter data from the current
+ * (non-main) thread to the final totals. (We'll accumulate
+ * data for the main thread later during "atexit".)
+ */
+ tr2tls_lock();
+ tr2_update_final_timers();
+ tr2_update_final_counters();
+ tr2tls_unlock();
+
for_each_wanted_builtin (j, tgt_j)
if (tgt_j->pfn_thread_exit_fl)
tgt_j->pfn_thread_exit_fl(file, line,
@@ -795,6 +875,39 @@ void trace2_printf_fl(const char *file, int line, const char *fmt, ...)
va_end(ap);
}
+void trace2_timer_start(enum trace2_timer_id tid)
+{
+ if (!trace2_enabled)
+ return;
+
+ if (tid < 0 || tid >= TRACE2_NUMBER_OF_TIMERS)
+ BUG("trace2_timer_start: invalid timer id: %d", tid);
+
+ tr2_start_timer(tid);
+}
+
+void trace2_timer_stop(enum trace2_timer_id tid)
+{
+ if (!trace2_enabled)
+ return;
+
+ if (tid < 0 || tid >= TRACE2_NUMBER_OF_TIMERS)
+ BUG("trace2_timer_stop: invalid timer id: %d", tid);
+
+ tr2_stop_timer(tid);
+}
+
+void trace2_counter_add(enum trace2_counter_id cid, uint64_t value)
+{
+ if (!trace2_enabled)
+ return;
+
+ if (cid < 0 || cid >= TRACE2_NUMBER_OF_COUNTERS)
+ BUG("trace2_counter_add: invalid counter id: %d", cid);
+
+ tr2_counter_increment(cid, value);
+}
+
const char *trace2_session_id(void)
{
return tr2_sid_get();
diff --git a/trace2.h b/trace2.h
index 88d906ea83..4ced30c0db 100644
--- a/trace2.h
+++ b/trace2.h
@@ -51,6 +51,8 @@ struct json_writer;
* [] trace2_region* -- emit region nesting messages.
* [] trace2_data* -- emit region/thread/repo data messages.
* [] trace2_printf* -- legacy trace[1] messages.
+ * [] trace2_timer* -- stopwatch timers (messages are deferred).
+ * [] trace2_counter* -- global counters (messages are deferred).
*/
/*
@@ -73,8 +75,7 @@ void trace2_initialize_clock(void);
/*
* Initialize TRACE2 tracing facility if any of the builtin TRACE2
* targets are enabled in the system config or the environment.
- * This includes setting up the Trace2 thread local storage (TLS).
- * Emits a 'version' message containing the version of git
+ * This emits a 'version' message containing the version of git
* and the Trace2 protocol.
*
* This function should be called from `main()` as early as possible in
@@ -302,21 +303,23 @@ void trace2_exec_result_fl(const char *file, int line, int exec_id, int code);
/*
* Emit a 'thread_start' event. This must be called from inside the
- * thread-proc to set up the trace2 TLS data for the thread.
+ * thread-proc to allow the thread to create its own thread-local
+ * storage.
*
- * Thread names should be descriptive, like "preload_index".
- * Thread names will be decorated with an instance number automatically.
+ * The thread base name should be descriptive, like "preload_index" or
+ * taken from the thread-proc function. A unique thread name will be
+ * created from the given base name and the thread id automatically.
*/
void trace2_thread_start_fl(const char *file, int line,
- const char *thread_name);
+ const char *thread_base_name);
-#define trace2_thread_start(thread_name) \
- trace2_thread_start_fl(__FILE__, __LINE__, (thread_name))
+#define trace2_thread_start(thread_base_name) \
+ trace2_thread_start_fl(__FILE__, __LINE__, (thread_base_name))
/*
* Emit a 'thread_exit' event. This must be called from inside the
- * thread-proc to report thread-specific data and cleanup TLS data
- * for the thread.
+ * thread-proc so that the thread can access and clean up its
+ * thread-local storage.
*/
void trace2_thread_exit_fl(const char *file, int line);
@@ -485,6 +488,84 @@ void trace2_printf_fl(const char *file, int line, const char *fmt, ...);
#define trace2_printf(...) trace2_printf_fl(__FILE__, __LINE__, __VA_ARGS__)
/*
+ * Define the set of stopwatch timers.
+ *
+ * We can add more at any time, but they must be defined at compile
+ * time (to avoid the need to dynamically allocate and synchronize
+ * them between different threads).
+ *
+ * These must start at 0 and be contiguous (because we use them
+ * elsewhere as array indexes).
+ *
+ * Any values added to this enum must also be added to the
+ * `tr2_timer_metadata[]` in `trace2/tr2_tmr.c`.
+ */
+enum trace2_timer_id {
+ /*
+ * Define two timers for testing. See `t/helper/test-trace2.c`.
+ * These can be used for ad hoc testing, but should not be used
+ * for permanent analysis code.
+ */
+ TRACE2_TIMER_ID_TEST1 = 0, /* emits summary event only */
+ TRACE2_TIMER_ID_TEST2, /* emits summary and thread events */
+
+ /* Add additional timer definitions before here. */
+ TRACE2_NUMBER_OF_TIMERS
+};
+
+/*
+ * Start/Stop the indicated stopwatch timer in the current thread.
+ *
+ * The time spent by the current thread between the _start and _stop
+ * calls will be added to the thread's partial sum for this timer.
+ *
+ * Timer events are emitted at thread and program exit.
+ *
+ * Note: Since the stopwatch API routines do not generate individual
+ * events, they do not take (file, line) arguments. Similarly, the
+ * category and timer name values are defined at compile-time in the
+ * timer definitions array, so they are not needed here in the API.
+ */
+void trace2_timer_start(enum trace2_timer_id tid);
+void trace2_timer_stop(enum trace2_timer_id tid);
+
+/*
+ * Define the set of global counters.
+ *
+ * We can add more at any time, but they must be defined at compile
+ * time (to avoid the need to dynamically allocate and synchronize
+ * them between different threads).
+ *
+ * These must start at 0 and be contiguous (because we use them
+ * elsewhere as array indexes).
+ *
+ * Any values added to this enum be also be added to the
+ * `tr2_counter_metadata[]` in `trace2/tr2_tr2_ctr.c`.
+ */
+enum trace2_counter_id {
+ /*
+ * Define two counters for testing. See `t/helper/test-trace2.c`.
+ * These can be used for ad hoc testing, but should not be used
+ * for permanent analysis code.
+ */
+ TRACE2_COUNTER_ID_TEST1 = 0, /* emits summary event only */
+ TRACE2_COUNTER_ID_TEST2, /* emits summary and thread events */
+
+ /* Add additional counter definitions before here. */
+ TRACE2_NUMBER_OF_COUNTERS
+};
+
+/*
+ * Increase the named global counter by value.
+ *
+ * Note that this adds `value` to the current thread's partial sum for
+ * this counter (without locking) and that the complete sum is not
+ * available until all threads have exited, so it does not return the
+ * new value of the counter.
+ */
+void trace2_counter_add(enum trace2_counter_id cid, uint64_t value);
+
+/*
* Optional platform-specific code to dump information about the
* current and any parent process(es). This is intended to allow
* post-processors to know who spawned this git instance and anything
diff --git a/trace2/tr2_ctr.c b/trace2/tr2_ctr.c
new file mode 100644
index 0000000000..483ca7c308
--- /dev/null
+++ b/trace2/tr2_ctr.c
@@ -0,0 +1,101 @@
+#include "cache.h"
+#include "thread-utils.h"
+#include "trace2/tr2_tgt.h"
+#include "trace2/tr2_tls.h"
+#include "trace2/tr2_ctr.h"
+
+/*
+ * A global counter block to aggregrate values from the partial sums
+ * from each thread.
+ */
+static struct tr2_counter_block final_counter_block; /* access under tr2tls_mutex */
+
+/*
+ * Define metadata for each global counter.
+ *
+ * This array must match the "enum trace2_counter_id" and the values
+ * in "struct tr2_counter_block.counter[*]".
+ */
+static struct tr2_counter_metadata tr2_counter_metadata[TRACE2_NUMBER_OF_COUNTERS] = {
+ [TRACE2_COUNTER_ID_TEST1] = {
+ .category = "test",
+ .name = "test1",
+ .want_per_thread_events = 0,
+ },
+ [TRACE2_COUNTER_ID_TEST2] = {
+ .category = "test",
+ .name = "test2",
+ .want_per_thread_events = 1,
+ },
+
+ /* Add additional metadata before here. */
+};
+
+void tr2_counter_increment(enum trace2_counter_id cid, uint64_t value)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ struct tr2_counter *c = &ctx->counter_block.counter[cid];
+
+ c->value += value;
+
+ ctx->used_any_counter = 1;
+ if (tr2_counter_metadata[cid].want_per_thread_events)
+ ctx->used_any_per_thread_counter = 1;
+}
+
+void tr2_update_final_counters(void)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ enum trace2_counter_id cid;
+
+ if (!ctx->used_any_counter)
+ return;
+
+ /*
+ * Access `final_counter_block` requires holding `tr2tls_mutex`.
+ * We assume that our caller is holding the lock.
+ */
+
+ for (cid = 0; cid < TRACE2_NUMBER_OF_COUNTERS; cid++) {
+ struct tr2_counter *c_final = &final_counter_block.counter[cid];
+ const struct tr2_counter *c = &ctx->counter_block.counter[cid];
+
+ c_final->value += c->value;
+ }
+}
+
+void tr2_emit_per_thread_counters(tr2_tgt_evt_counter_t *fn_apply)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ enum trace2_counter_id cid;
+
+ if (!ctx->used_any_per_thread_counter)
+ return;
+
+ /*
+ * For each counter, if the counter wants per-thread events
+ * and this thread used it (the value is non-zero), emit it.
+ */
+ for (cid = 0; cid < TRACE2_NUMBER_OF_COUNTERS; cid++)
+ if (tr2_counter_metadata[cid].want_per_thread_events &&
+ ctx->counter_block.counter[cid].value)
+ fn_apply(&tr2_counter_metadata[cid],
+ &ctx->counter_block.counter[cid],
+ 0);
+}
+
+void tr2_emit_final_counters(tr2_tgt_evt_counter_t *fn_apply)
+{
+ enum trace2_counter_id cid;
+
+ /*
+ * Access `final_counter_block` requires holding `tr2tls_mutex`.
+ * We assume that our caller is holding the lock.
+ */
+
+ for (cid = 0; cid < TRACE2_NUMBER_OF_COUNTERS; cid++)
+ if (final_counter_block.counter[cid].value)
+ fn_apply(&tr2_counter_metadata[cid],
+ &final_counter_block.counter[cid],
+ 1);
+}
diff --git a/trace2/tr2_ctr.h b/trace2/tr2_ctr.h
new file mode 100644
index 0000000000..a2267ee990
--- /dev/null
+++ b/trace2/tr2_ctr.h
@@ -0,0 +1,104 @@
+#ifndef TR2_CTR_H
+#define TR2_CTR_H
+
+#include "trace2.h"
+#include "trace2/tr2_tgt.h"
+
+/*
+ * Define a mechanism to allow global "counters".
+ *
+ * Counters can be used count interesting activity that does not fit
+ * the "region and data" model, such as code called from many
+ * different regions and/or where you want to count a number of items,
+ * but don't have control of when the last item will be processed,
+ * such as counter the number of calls to `lstat()`.
+ *
+ * Counters differ from Trace2 "data" events. Data events are emitted
+ * immediately and are appropriate for documenting loop counters at
+ * the end of a region, for example. Counter values are accumulated
+ * during the program and final counter values are emitted at program
+ * exit.
+ *
+ * To make this model efficient, we define a compile-time fixed set of
+ * counters and counter ids using a fixed size "counter block" array
+ * in thread-local storage. This gives us constant time, lock-free
+ * access to each counter within each thread. This lets us avoid the
+ * complexities of dynamically allocating a counter and sharing that
+ * definition with other threads.
+ *
+ * Each thread uses the counter block in its thread-local storage to
+ * increment partial sums for each counter (without locking). When a
+ * thread exits, those partial sums are (under lock) added to the
+ * global final sum.
+ *
+ * Partial sums for each counter are optionally emitted when a thread
+ * exits.
+ *
+ * Final sums for each counter are emitted between the "exit" and
+ * "atexit" events.
+ *
+ * A parallel "counter metadata" table contains the "category" and
+ * "name" fields for each counter. This eliminates the need to
+ * include those args in the various counter APIs.
+ */
+
+/*
+ * The definition of an individual counter as used by an individual
+ * thread (and later in aggregation).
+ */
+struct tr2_counter {
+ uint64_t value;
+};
+
+/*
+ * Metadata for a counter.
+ */
+struct tr2_counter_metadata {
+ const char *category;
+ const char *name;
+
+ /*
+ * True if we should emit per-thread events for this counter
+ * when individual threads exit.
+ */
+ unsigned int want_per_thread_events:1;
+};
+
+/*
+ * A compile-time fixed block of counters to insert into thread-local
+ * storage. This wrapper is used to avoid quirks of C and the usual
+ * need to pass an array size argument.
+ */
+struct tr2_counter_block {
+ struct tr2_counter counter[TRACE2_NUMBER_OF_COUNTERS];
+};
+
+/*
+ * Private routines used by trace2.c to increment a counter for the
+ * current thread.
+ */
+void tr2_counter_increment(enum trace2_counter_id cid, uint64_t value);
+
+/*
+ * Add the current thread's counter data to the global totals.
+ * This is called during thread-exit.
+ *
+ * Caller must be holding the tr2tls_mutex.
+ */
+void tr2_update_final_counters(void);
+
+/*
+ * Emit per-thread counter data for the current thread.
+ * This is called during thread-exit.
+ */
+void tr2_emit_per_thread_counters(tr2_tgt_evt_counter_t *fn_apply);
+
+/*
+ * Emit global counter values.
+ * This is called during atexit handling.
+ *
+ * Caller must be holding the tr2tls_mutex.
+ */
+void tr2_emit_final_counters(tr2_tgt_evt_counter_t *fn_apply);
+
+#endif /* TR2_CTR_H */
diff --git a/trace2/tr2_tgt.h b/trace2/tr2_tgt.h
index 65f94e1574..bf8745c4f0 100644
--- a/trace2/tr2_tgt.h
+++ b/trace2/tr2_tgt.h
@@ -4,6 +4,12 @@
struct child_process;
struct repository;
struct json_writer;
+struct tr2_timer_metadata;
+struct tr2_timer;
+struct tr2_counter_metadata;
+struct tr2_counter;
+
+#define NS_TO_SEC(ns) ((double)(ns) / 1.0e9)
/*
* Function prototypes for a TRACE2 "target" vtable.
@@ -96,6 +102,14 @@ typedef void(tr2_tgt_evt_printf_va_fl_t)(const char *file, int line,
uint64_t us_elapsed_absolute,
const char *fmt, va_list ap);
+typedef void(tr2_tgt_evt_timer_t)(const struct tr2_timer_metadata *meta,
+ const struct tr2_timer *timer,
+ int is_final_data);
+
+typedef void(tr2_tgt_evt_counter_t)(const struct tr2_counter_metadata *meta,
+ const struct tr2_counter *counter,
+ int is_final_data);
+
/*
* "vtable" for a TRACE2 target. Use NULL if a target does not want
* to emit that message.
@@ -132,6 +146,8 @@ struct tr2_tgt {
tr2_tgt_evt_data_fl_t *pfn_data_fl;
tr2_tgt_evt_data_json_fl_t *pfn_data_json_fl;
tr2_tgt_evt_printf_va_fl_t *pfn_printf_va_fl;
+ tr2_tgt_evt_timer_t *pfn_timer;
+ tr2_tgt_evt_counter_t *pfn_counter;
};
/* clang-format on */
diff --git a/trace2/tr2_tgt_event.c b/trace2/tr2_tgt_event.c
index 37a3163be1..16f6332755 100644
--- a/trace2/tr2_tgt_event.c
+++ b/trace2/tr2_tgt_event.c
@@ -9,6 +9,7 @@
#include "trace2/tr2_sysenv.h"
#include "trace2/tr2_tgt.h"
#include "trace2/tr2_tls.h"
+#include "trace2/tr2_tmr.h"
static struct tr2_dst tr2dst_event = {
.sysenv_var = TR2_SYSENV_EVENT,
@@ -90,7 +91,7 @@ static void event_fmt_prepare(const char *event_name, const char *file,
jw_object_string(jw, "event", event_name);
jw_object_string(jw, "sid", tr2_sid_get());
- jw_object_string(jw, "thread", ctx->thread_name.buf);
+ jw_object_string(jw, "thread", ctx->thread_name);
/*
* In brief mode, only emit <time> on these 2 event types.
@@ -617,6 +618,48 @@ static void fn_data_json_fl(const char *file, int line,
}
}
+static void fn_timer(const struct tr2_timer_metadata *meta,
+ const struct tr2_timer *timer,
+ int is_final_data)
+{
+ const char *event_name = is_final_data ? "timer" : "th_timer";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_total = NS_TO_SEC(timer->total_ns);
+ double t_min = NS_TO_SEC(timer->min_ns);
+ double t_max = NS_TO_SEC(timer->max_ns);
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, __FILE__, __LINE__, NULL, &jw);
+ jw_object_string(&jw, "category", meta->category);
+ jw_object_string(&jw, "name", meta->name);
+ jw_object_intmax(&jw, "intervals", timer->interval_count);
+ jw_object_double(&jw, "t_total", 6, t_total);
+ jw_object_double(&jw, "t_min", 6, t_min);
+ jw_object_double(&jw, "t_max", 6, t_max);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_counter(const struct tr2_counter_metadata *meta,
+ const struct tr2_counter *counter,
+ int is_final_data)
+{
+ const char *event_name = is_final_data ? "counter" : "th_counter";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, __FILE__, __LINE__, NULL, &jw);
+ jw_object_string(&jw, "category", meta->category);
+ jw_object_string(&jw, "name", meta->name);
+ jw_object_intmax(&jw, "count", counter->value);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
struct tr2_tgt tr2_tgt_event = {
.pdst = &tr2dst_event,
@@ -648,4 +691,6 @@ struct tr2_tgt tr2_tgt_event = {
.pfn_data_fl = fn_data_fl,
.pfn_data_json_fl = fn_data_json_fl,
.pfn_printf_va_fl = NULL,
+ .pfn_timer = fn_timer,
+ .pfn_counter = fn_counter,
};
diff --git a/trace2/tr2_tgt_normal.c b/trace2/tr2_tgt_normal.c
index 69f8033077..fbbef68dfc 100644
--- a/trace2/tr2_tgt_normal.c
+++ b/trace2/tr2_tgt_normal.c
@@ -8,6 +8,7 @@
#include "trace2/tr2_tbuf.h"
#include "trace2/tr2_tgt.h"
#include "trace2/tr2_tls.h"
+#include "trace2/tr2_tmr.h"
static struct tr2_dst tr2dst_normal = {
.sysenv_var = TR2_SYSENV_NORMAL,
@@ -329,6 +330,42 @@ static void fn_printf_va_fl(const char *file, int line,
strbuf_release(&buf_payload);
}
+static void fn_timer(const struct tr2_timer_metadata *meta,
+ const struct tr2_timer *timer,
+ int is_final_data)
+{
+ const char *event_name = is_final_data ? "timer" : "th_timer";
+ struct strbuf buf_payload = STRBUF_INIT;
+ double t_total = NS_TO_SEC(timer->total_ns);
+ double t_min = NS_TO_SEC(timer->min_ns);
+ double t_max = NS_TO_SEC(timer->max_ns);
+
+ strbuf_addf(&buf_payload, ("%s %s/%s"
+ " intervals:%"PRIu64
+ " total:%8.6f min:%8.6f max:%8.6f"),
+ event_name, meta->category, meta->name,
+ timer->interval_count,
+ t_total, t_min, t_max);
+
+ normal_io_write_fl(__FILE__, __LINE__, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_counter(const struct tr2_counter_metadata *meta,
+ const struct tr2_counter *counter,
+ int is_final_data)
+{
+ const char *event_name = is_final_data ? "counter" : "th_counter";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "%s %s/%s value:%"PRIu64,
+ event_name, meta->category, meta->name,
+ counter->value);
+
+ normal_io_write_fl(__FILE__, __LINE__, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
struct tr2_tgt tr2_tgt_normal = {
.pdst = &tr2dst_normal,
@@ -360,4 +397,6 @@ struct tr2_tgt tr2_tgt_normal = {
.pfn_data_fl = NULL,
.pfn_data_json_fl = NULL,
.pfn_printf_va_fl = fn_printf_va_fl,
+ .pfn_timer = fn_timer,
+ .pfn_counter = fn_counter,
};
diff --git a/trace2/tr2_tgt_perf.c b/trace2/tr2_tgt_perf.c
index 8cb792488c..adae803263 100644
--- a/trace2/tr2_tgt_perf.c
+++ b/trace2/tr2_tgt_perf.c
@@ -10,6 +10,7 @@
#include "trace2/tr2_tbuf.h"
#include "trace2/tr2_tgt.h"
#include "trace2/tr2_tls.h"
+#include "trace2/tr2_tmr.h"
static struct tr2_dst tr2dst_perf = {
.sysenv_var = TR2_SYSENV_PERF,
@@ -108,7 +109,7 @@ static void perf_fmt_prepare(const char *event_name,
strbuf_addf(buf, "d%d | ", tr2_sid_depth());
strbuf_addf(buf, "%-*s | %-*s | ", TR2_MAX_THREAD_NAME,
- ctx->thread_name.buf, TR2FMT_PERF_MAX_EVENT_NAME,
+ ctx->thread_name, TR2FMT_PERF_MAX_EVENT_NAME,
event_name);
len = buf->len + TR2FMT_PERF_REPO_WIDTH;
@@ -555,6 +556,44 @@ static void fn_printf_va_fl(const char *file, int line,
strbuf_release(&buf_payload);
}
+static void fn_timer(const struct tr2_timer_metadata *meta,
+ const struct tr2_timer *timer,
+ int is_final_data)
+{
+ const char *event_name = is_final_data ? "timer" : "th_timer";
+ struct strbuf buf_payload = STRBUF_INIT;
+ double t_total = NS_TO_SEC(timer->total_ns);
+ double t_min = NS_TO_SEC(timer->min_ns);
+ double t_max = NS_TO_SEC(timer->max_ns);
+
+ strbuf_addf(&buf_payload, ("name:%s"
+ " intervals:%"PRIu64
+ " total:%8.6f min:%8.6f max:%8.6f"),
+ meta->name,
+ timer->interval_count,
+ t_total, t_min, t_max);
+
+ perf_io_write_fl(__FILE__, __LINE__, event_name, NULL, NULL, NULL,
+ meta->category, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_counter(const struct tr2_counter_metadata *meta,
+ const struct tr2_counter *counter,
+ int is_final_data)
+{
+ const char *event_name = is_final_data ? "counter" : "th_counter";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "name:%s value:%"PRIu64,
+ meta->name,
+ counter->value);
+
+ perf_io_write_fl(__FILE__, __LINE__, event_name, NULL, NULL, NULL,
+ meta->category, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
struct tr2_tgt tr2_tgt_perf = {
.pdst = &tr2dst_perf,
@@ -586,4 +625,6 @@ struct tr2_tgt tr2_tgt_perf = {
.pfn_data_fl = fn_data_fl,
.pfn_data_json_fl = fn_data_json_fl,
.pfn_printf_va_fl = fn_printf_va_fl,
+ .pfn_timer = fn_timer,
+ .pfn_counter = fn_counter,
};
diff --git a/trace2/tr2_tls.c b/trace2/tr2_tls.c
index 7da94aba52..04900bb4c3 100644
--- a/trace2/tr2_tls.c
+++ b/trace2/tr2_tls.c
@@ -31,10 +31,11 @@ void tr2tls_start_process_clock(void)
tr2tls_us_start_process = getnanotime() / 1000;
}
-struct tr2tls_thread_ctx *tr2tls_create_self(const char *thread_name,
+struct tr2tls_thread_ctx *tr2tls_create_self(const char *thread_base_name,
uint64_t us_thread_start)
{
struct tr2tls_thread_ctx *ctx = xcalloc(1, sizeof(*ctx));
+ struct strbuf buf = STRBUF_INIT;
/*
* Implicitly "tr2tls_push_self()" to capture the thread's start
@@ -47,12 +48,13 @@ struct tr2tls_thread_ctx *tr2tls_create_self(const char *thread_name,
ctx->thread_id = tr2tls_locked_increment(&tr2_next_thread_id);
- strbuf_init(&ctx->thread_name, 0);
+ strbuf_init(&buf, 0);
if (ctx->thread_id)
- strbuf_addf(&ctx->thread_name, "th%02d:", ctx->thread_id);
- strbuf_addstr(&ctx->thread_name, thread_name);
- if (ctx->thread_name.len > TR2_MAX_THREAD_NAME)
- strbuf_setlen(&ctx->thread_name, TR2_MAX_THREAD_NAME);
+ strbuf_addf(&buf, "th%02d:", ctx->thread_id);
+ strbuf_addstr(&buf, thread_base_name);
+ if (buf.len > TR2_MAX_THREAD_NAME)
+ strbuf_setlen(&buf, TR2_MAX_THREAD_NAME);
+ ctx->thread_name = strbuf_detach(&buf, NULL);
pthread_setspecific(tr2tls_key, ctx);
@@ -69,9 +71,9 @@ struct tr2tls_thread_ctx *tr2tls_get_self(void)
ctx = pthread_getspecific(tr2tls_key);
/*
- * If the thread-proc did not call trace2_thread_start(), we won't
- * have any TLS data associated with the current thread. Fix it
- * here and silently continue.
+ * If the current thread's thread-proc did not call
+ * trace2_thread_start(), then the thread will not have any
+ * thread-local storage. Create it now and silently continue.
*/
if (!ctx)
ctx = tr2tls_create_self("unknown", getnanotime() / 1000);
@@ -95,7 +97,7 @@ void tr2tls_unset_self(void)
pthread_setspecific(tr2tls_key, NULL);
- strbuf_release(&ctx->thread_name);
+ free((char *)ctx->thread_name);
free(ctx->array_us_start);
free(ctx);
}
@@ -113,7 +115,7 @@ void tr2tls_pop_self(void)
struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
if (!ctx->nr_open_regions)
- BUG("no open regions in thread '%s'", ctx->thread_name.buf);
+ BUG("no open regions in thread '%s'", ctx->thread_name);
ctx->nr_open_regions--;
}
@@ -179,3 +181,13 @@ int tr2tls_locked_increment(int *p)
return current_value;
}
+
+void tr2tls_lock(void)
+{
+ pthread_mutex_lock(&tr2tls_mutex);
+}
+
+void tr2tls_unlock(void)
+{
+ pthread_mutex_unlock(&tr2tls_mutex);
+}
diff --git a/trace2/tr2_tls.h b/trace2/tr2_tls.h
index b1e327a928..f9049805d4 100644
--- a/trace2/tr2_tls.h
+++ b/trace2/tr2_tls.h
@@ -2,6 +2,14 @@
#define TR2_TLS_H
#include "strbuf.h"
+#include "trace2/tr2_ctr.h"
+#include "trace2/tr2_tmr.h"
+
+/*
+ * Notice: the term "TLS" refers to "thread-local storage" in the
+ * Trace2 source files. This usage is borrowed from GCC and Windows.
+ * There is NO relation to "transport layer security".
+ */
/*
* Arbitry limit for thread names for column alignment.
@@ -9,33 +17,40 @@
#define TR2_MAX_THREAD_NAME (24)
struct tr2tls_thread_ctx {
- struct strbuf thread_name;
+ const char *thread_name;
uint64_t *array_us_start;
- int alloc;
- int nr_open_regions; /* plays role of "nr" in ALLOC_GROW */
+ size_t alloc;
+ size_t nr_open_regions; /* plays role of "nr" in ALLOC_GROW */
int thread_id;
+ struct tr2_timer_block timer_block;
+ struct tr2_counter_block counter_block;
+ unsigned int used_any_timer:1;
+ unsigned int used_any_per_thread_timer:1;
+ unsigned int used_any_counter:1;
+ unsigned int used_any_per_thread_counter:1;
};
/*
- * Create TLS data for the current thread. This gives us a place to
- * put per-thread data, such as thread start time, function nesting
- * and a per-thread label for our messages.
- *
- * We assume the first thread is "main". Other threads are given
- * non-zero thread-ids to help distinguish messages from concurrent
- * threads.
+ * Create thread-local storage for the current thread.
*
- * Truncate the thread name if necessary to help with column alignment
- * in printf-style messages.
+ * The first thread in the process will have:
+ * { .thread_id=0, .thread_name="main" }
+ * Subsequent threads are given a non-zero thread_id and a thread_name
+ * constructed from the id and a thread base name (which is usually just
+ * the name of the thread-proc function). For example:
+ * { .thread_id=10, .thread_name="th10:fsm-listen" }
+ * This helps to identify and distinguish messages from concurrent threads.
+ * The ctx.thread_name field is truncated if necessary to help with column
+ * alignment in printf-style messages.
*
* In this and all following functions the term "self" refers to the
* current thread.
*/
-struct tr2tls_thread_ctx *tr2tls_create_self(const char *thread_name,
+struct tr2tls_thread_ctx *tr2tls_create_self(const char *thread_base_name,
uint64_t us_thread_start);
/*
- * Get our TLS data.
+ * Get the thread-local storage pointer of the current thread.
*/
struct tr2tls_thread_ctx *tr2tls_get_self(void);
@@ -45,7 +60,7 @@ struct tr2tls_thread_ctx *tr2tls_get_self(void);
int tr2tls_is_main_thread(void);
/*
- * Free our TLS data.
+ * Free the current thread's thread-local storage.
*/
void tr2tls_unset_self(void);
@@ -81,12 +96,12 @@ uint64_t tr2tls_region_elasped_self(uint64_t us);
uint64_t tr2tls_absolute_elapsed(uint64_t us);
/*
- * Initialize the tr2 TLS system.
+ * Initialize thread-local storage for Trace2.
*/
void tr2tls_init(void);
/*
- * Free all tr2 TLS resources.
+ * Free all Trace2 thread-local storage resources.
*/
void tr2tls_release(void);
@@ -100,4 +115,10 @@ int tr2tls_locked_increment(int *p);
*/
void tr2tls_start_process_clock(void);
+/*
+ * Explicitly lock/unlock our mutex.
+ */
+void tr2tls_lock(void);
+void tr2tls_unlock(void);
+
#endif /* TR2_TLS_H */
diff --git a/trace2/tr2_tmr.c b/trace2/tr2_tmr.c
new file mode 100644
index 0000000000..786762dfd2
--- /dev/null
+++ b/trace2/tr2_tmr.c
@@ -0,0 +1,182 @@
+#include "cache.h"
+#include "thread-utils.h"
+#include "trace2/tr2_tgt.h"
+#include "trace2/tr2_tls.h"
+#include "trace2/tr2_tmr.h"
+
+#define MY_MAX(a, b) ((a) > (b) ? (a) : (b))
+#define MY_MIN(a, b) ((a) < (b) ? (a) : (b))
+
+/*
+ * A global timer block to aggregate values from the partial sums from
+ * each thread.
+ */
+static struct tr2_timer_block final_timer_block; /* access under tr2tls_mutex */
+
+/*
+ * Define metadata for each stopwatch timer.
+ *
+ * This array must match "enum trace2_timer_id" and the values
+ * in "struct tr2_timer_block.timer[*]".
+ */
+static struct tr2_timer_metadata tr2_timer_metadata[TRACE2_NUMBER_OF_TIMERS] = {
+ [TRACE2_TIMER_ID_TEST1] = {
+ .category = "test",
+ .name = "test1",
+ .want_per_thread_events = 0,
+ },
+ [TRACE2_TIMER_ID_TEST2] = {
+ .category = "test",
+ .name = "test2",
+ .want_per_thread_events = 1,
+ },
+
+ /* Add additional metadata before here. */
+};
+
+void tr2_start_timer(enum trace2_timer_id tid)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ struct tr2_timer *t = &ctx->timer_block.timer[tid];
+
+ t->recursion_count++;
+ if (t->recursion_count > 1)
+ return; /* ignore recursive starts */
+
+ t->start_ns = getnanotime();
+}
+
+void tr2_stop_timer(enum trace2_timer_id tid)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ struct tr2_timer *t = &ctx->timer_block.timer[tid];
+ uint64_t ns_now;
+ uint64_t ns_interval;
+
+ assert(t->recursion_count > 0);
+
+ t->recursion_count--;
+ if (t->recursion_count)
+ return; /* still in recursive call(s) */
+
+ ns_now = getnanotime();
+ ns_interval = ns_now - t->start_ns;
+
+ t->total_ns += ns_interval;
+
+ /*
+ * min_ns was initialized to zero (in the xcalloc()) rather
+ * than UINT_MAX when the block of timers was allocated,
+ * so we should always set both the min_ns and max_ns values
+ * the first time that the timer is used.
+ */
+ if (!t->interval_count) {
+ t->min_ns = ns_interval;
+ t->max_ns = ns_interval;
+ } else {
+ t->min_ns = MY_MIN(ns_interval, t->min_ns);
+ t->max_ns = MY_MAX(ns_interval, t->max_ns);
+ }
+
+ t->interval_count++;
+
+ ctx->used_any_timer = 1;
+ if (tr2_timer_metadata[tid].want_per_thread_events)
+ ctx->used_any_per_thread_timer = 1;
+}
+
+void tr2_update_final_timers(void)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ enum trace2_timer_id tid;
+
+ if (!ctx->used_any_timer)
+ return;
+
+ /*
+ * Accessing `final_timer_block` requires holding `tr2tls_mutex`.
+ * We assume that our caller is holding the lock.
+ */
+
+ for (tid = 0; tid < TRACE2_NUMBER_OF_TIMERS; tid++) {
+ struct tr2_timer *t_final = &final_timer_block.timer[tid];
+ struct tr2_timer *t = &ctx->timer_block.timer[tid];
+
+ if (t->recursion_count) {
+ /*
+ * The current thread is exiting with
+ * timer[tid] still running.
+ *
+ * Technically, this is a bug, but I'm going
+ * to ignore it.
+ *
+ * I don't think it is worth calling die()
+ * for. I don't think it is worth killing the
+ * process for this bookkeeping error. We
+ * might want to call warning(), but I'm going
+ * to wait on that.
+ *
+ * The downside here is that total_ns won't
+ * include the current open interval (now -
+ * start_ns). I can live with that.
+ */
+ }
+
+ if (!t->interval_count)
+ continue; /* this timer was not used by this thread */
+
+ t_final->total_ns += t->total_ns;
+
+ /*
+ * final_timer_block.timer[tid].min_ns was initialized to
+ * was initialized to zero rather than UINT_MAX, so we should
+ * always set both the min_ns and max_ns values the first time
+ * that we add a partial sum into it.
+ */
+ if (!t_final->interval_count) {
+ t_final->min_ns = t->min_ns;
+ t_final->max_ns = t->max_ns;
+ } else {
+ t_final->min_ns = MY_MIN(t_final->min_ns, t->min_ns);
+ t_final->max_ns = MY_MAX(t_final->max_ns, t->max_ns);
+ }
+
+ t_final->interval_count += t->interval_count;
+ }
+}
+
+void tr2_emit_per_thread_timers(tr2_tgt_evt_timer_t *fn_apply)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ enum trace2_timer_id tid;
+
+ if (!ctx->used_any_per_thread_timer)
+ return;
+
+ /*
+ * For each timer, if the timer wants per-thread events and
+ * this thread used it, emit it.
+ */
+ for (tid = 0; tid < TRACE2_NUMBER_OF_TIMERS; tid++)
+ if (tr2_timer_metadata[tid].want_per_thread_events &&
+ ctx->timer_block.timer[tid].interval_count)
+ fn_apply(&tr2_timer_metadata[tid],
+ &ctx->timer_block.timer[tid],
+ 0);
+}
+
+void tr2_emit_final_timers(tr2_tgt_evt_timer_t *fn_apply)
+{
+ enum trace2_timer_id tid;
+
+ /*
+ * Accessing `final_timer_block` requires holding `tr2tls_mutex`.
+ * We assume that our caller is holding the lock.
+ */
+
+ for (tid = 0; tid < TRACE2_NUMBER_OF_TIMERS; tid++)
+ if (final_timer_block.timer[tid].interval_count)
+ fn_apply(&tr2_timer_metadata[tid],
+ &final_timer_block.timer[tid],
+ 1);
+}
diff --git a/trace2/tr2_tmr.h b/trace2/tr2_tmr.h
new file mode 100644
index 0000000000..d575357613
--- /dev/null
+++ b/trace2/tr2_tmr.h
@@ -0,0 +1,140 @@
+#ifndef TR2_TMR_H
+#define TR2_TMR_H
+
+#include "trace2.h"
+#include "trace2/tr2_tgt.h"
+
+/*
+ * Define a mechanism to allow "stopwatch" timers.
+ *
+ * Timers can be used to measure "interesting" activity that does not
+ * fit the "region" model, such as code called from many different
+ * regions (like zlib) and/or where data for individual calls are not
+ * interesting or are too numerous to be efficiently logged.
+ *
+ * Timer values are accumulated during program execution and emitted
+ * to the Trace2 logs at program exit.
+ *
+ * To make this model efficient, we define a compile-time fixed set of
+ * timers and timer ids using a "timer block" array in thread-local
+ * storage. This gives us constant time access to each timer within
+ * each thread, since we want start/stop operations to be as fast as
+ * possible. This lets us avoid the complexities of dynamically
+ * allocating a timer on the first use by a thread and/or possibly
+ * sharing that timer definition with other concurrent threads.
+ * However, this does require that we define time the set of timers at
+ * compile time.
+ *
+ * Each thread uses the timer block in its thread-local storage to
+ * compute partial sums for each timer (without locking). When a
+ * thread exits, those partial sums are (under lock) added to the
+ * global final sum.
+ *
+ * Using this "timer block" model costs ~48 bytes per timer per thread
+ * (we have about six uint64 fields per timer). This does increase
+ * the size of the thread-local storage block, but it is allocated (at
+ * thread create time) and not on the thread stack, so I'm not worried
+ * about the size.
+ *
+ * Partial sums for each timer are optionally emitted when a thread
+ * exits.
+ *
+ * Final sums for each timer are emitted between the "exit" and
+ * "atexit" events.
+ *
+ * A parallel "timer metadata" table contains the "category" and "name"
+ * fields for each timer. This eliminates the need to include those
+ * args in the various timer APIs.
+ */
+
+/*
+ * The definition of an individual timer and used by an individual
+ * thread.
+ */
+struct tr2_timer {
+ /*
+ * Total elapsed time for this timer in this thread in nanoseconds.
+ */
+ uint64_t total_ns;
+
+ /*
+ * The maximum and minimum interval values observed for this
+ * timer in this thread.
+ */
+ uint64_t min_ns;
+ uint64_t max_ns;
+
+ /*
+ * The value of the clock when this timer was started in this
+ * thread. (Undefined when the timer is not active in this
+ * thread.)
+ */
+ uint64_t start_ns;
+
+ /*
+ * Number of times that this timer has been started and stopped
+ * in this thread. (Recursive starts are ignored.)
+ */
+ uint64_t interval_count;
+
+ /*
+ * Number of nested starts on the stack in this thread. (We
+ * ignore recursive starts and use this to track the recursive
+ * calls.)
+ */
+ unsigned int recursion_count;
+};
+
+/*
+ * Metadata for a timer.
+ */
+struct tr2_timer_metadata {
+ const char *category;
+ const char *name;
+
+ /*
+ * True if we should emit per-thread events for this timer
+ * when individual threads exit.
+ */
+ unsigned int want_per_thread_events:1;
+};
+
+/*
+ * A compile-time fixed-size block of timers to insert into
+ * thread-local storage. This wrapper is used to avoid quirks
+ * of C and the usual need to pass an array size argument.
+ */
+struct tr2_timer_block {
+ struct tr2_timer timer[TRACE2_NUMBER_OF_TIMERS];
+};
+
+/*
+ * Private routines used by trace2.c to actually start/stop an
+ * individual timer in the current thread.
+ */
+void tr2_start_timer(enum trace2_timer_id tid);
+void tr2_stop_timer(enum trace2_timer_id tid);
+
+/*
+ * Add the current thread's timer data to the global totals.
+ * This is called during thread-exit.
+ *
+ * Caller must be holding the tr2tls_mutex.
+ */
+void tr2_update_final_timers(void);
+
+/*
+ * Emit per-thread timer data for the current thread.
+ * This is called during thread-exit.
+ */
+void tr2_emit_per_thread_timers(tr2_tgt_evt_timer_t *fn_apply);
+
+/*
+ * Emit global total timer values.
+ * This is called during atexit handling.
+ *
+ * Caller must be holding the tr2tls_mutex.
+ */
+void tr2_emit_final_timers(tr2_tgt_evt_timer_t *fn_apply);
+
+#endif /* TR2_TMR_H */
diff --git a/transport.c b/transport.c
index 70e9c188a3..e7b97194c1 100644
--- a/transport.c
+++ b/transport.c
@@ -178,7 +178,7 @@ static int fetch_refs_from_bundle(struct transport *transport,
if (!data->get_refs_from_bundle_called)
get_refs_from_bundle_inner(transport);
ret = unbundle(the_repository, &data->header, data->fd,
- &extra_index_pack_args);
+ &extra_index_pack_args, 0);
transport->hash_algo = data->header.hash_algo;
return ret;
}
diff --git a/unpack-trees.c b/unpack-trees.c
index bae812156c..56b71d37ce 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -2043,7 +2043,8 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
if (!ret) {
if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
cache_tree_verify(the_repository, &o->result);
- if (!cache_tree_fully_valid(o->result.cache_tree))
+ if (!o->skip_cache_tree_update &&
+ !cache_tree_fully_valid(o->result.cache_tree))
cache_tree_update(&o->result,
WRITE_TREE_SILENT |
WRITE_TREE_REPAIR);
@@ -2288,7 +2289,8 @@ static int verify_clean_subdirectory(const struct cache_entry *ce,
if (S_ISGITLINK(ce->ce_mode)) {
struct object_id oid;
- int sub_head = resolve_gitlink_ref(ce->name, "HEAD", &oid);
+ int sub_head =
+ resolve_gitlink_ref(ce->name, "HEAD", &oid, NULL);
/*
* If we are not going to update the submodule, then
* we don't care.
diff --git a/unpack-trees.h b/unpack-trees.h
index efb9edfbb2..6ab0d74c84 100644
--- a/unpack-trees.h
+++ b/unpack-trees.h
@@ -71,7 +71,8 @@ struct unpack_trees_options {
quiet,
exiting_early,
show_all_errors,
- dry_run;
+ dry_run,
+ skip_cache_tree_update;
enum unpack_trees_reset_type reset;
const char *prefix;
int cache_bottom;
diff --git a/upload-pack.c b/upload-pack.c
index 0b8311bd68..551f22ffa5 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -62,6 +62,7 @@ struct upload_pack_data {
struct object_array have_obj;
struct oid_array haves; /* v2 only */
struct string_list wanted_refs; /* v2 only */
+ struct string_list hidden_refs;
struct object_array shallows;
struct string_list deepen_not;
@@ -118,6 +119,7 @@ static void upload_pack_data_init(struct upload_pack_data *data)
{
struct string_list symref = STRING_LIST_INIT_DUP;
struct string_list wanted_refs = STRING_LIST_INIT_DUP;
+ struct string_list hidden_refs = STRING_LIST_INIT_DUP;
struct object_array want_obj = OBJECT_ARRAY_INIT;
struct object_array have_obj = OBJECT_ARRAY_INIT;
struct oid_array haves = OID_ARRAY_INIT;
@@ -130,6 +132,7 @@ static void upload_pack_data_init(struct upload_pack_data *data)
memset(data, 0, sizeof(*data));
data->symref = symref;
data->wanted_refs = wanted_refs;
+ data->hidden_refs = hidden_refs;
data->want_obj = want_obj;
data->have_obj = have_obj;
data->haves = haves;
@@ -151,6 +154,7 @@ static void upload_pack_data_clear(struct upload_pack_data *data)
{
string_list_clear(&data->symref, 1);
string_list_clear(&data->wanted_refs, 1);
+ string_list_clear(&data->hidden_refs, 0);
object_array_clear(&data->want_obj);
object_array_clear(&data->have_obj);
oid_array_clear(&data->haves);
@@ -842,8 +846,8 @@ static void deepen(struct upload_pack_data *data, int depth)
* Checking for reachable shallows requires that our refs be
* marked with OUR_REF.
*/
- head_ref_namespaced(check_ref, NULL);
- for_each_namespaced_ref(check_ref, NULL);
+ head_ref_namespaced(check_ref, data);
+ for_each_namespaced_ref(check_ref, data);
get_reachable_list(data, &reachable_shallows);
result = get_shallow_commits(&reachable_shallows,
@@ -1158,11 +1162,11 @@ static void receive_needs(struct upload_pack_data *data,
/* return non-zero if the ref is hidden, otherwise 0 */
static int mark_our_ref(const char *refname, const char *refname_full,
- const struct object_id *oid)
+ const struct object_id *oid, const struct string_list *hidden_refs)
{
struct object *o = lookup_unknown_object(the_repository, oid);
- if (ref_is_hidden(refname, refname_full)) {
+ if (ref_is_hidden(refname, refname_full, hidden_refs)) {
o->flags |= HIDDEN_REF;
return 1;
}
@@ -1171,11 +1175,12 @@ static int mark_our_ref(const char *refname, const char *refname_full,
}
static int check_ref(const char *refname_full, const struct object_id *oid,
- int flag UNUSED, void *cb_data UNUSED)
+ int flag UNUSED, void *cb_data)
{
const char *refname = strip_namespace(refname_full);
+ struct upload_pack_data *data = cb_data;
- mark_our_ref(refname, refname_full, oid);
+ mark_our_ref(refname, refname_full, oid, &data->hidden_refs);
return 0;
}
@@ -1204,7 +1209,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
struct object_id peeled;
struct upload_pack_data *data = cb_data;
- if (mark_our_ref(refname_nons, refname, oid))
+ if (mark_our_ref(refname_nons, refname, oid, &data->hidden_refs))
return 0;
if (capabilities) {
@@ -1327,7 +1332,7 @@ static int upload_pack_config(const char *var, const char *value, void *cb_data)
if (parse_object_filter_config(var, value, data) < 0)
return -1;
- return parse_hide_refs_config(var, value, "uploadpack");
+ return parse_hide_refs_config(var, value, "uploadpack", &data->hidden_refs);
}
static int upload_pack_protected_config(const char *var, const char *value, void *cb_data)
@@ -1375,8 +1380,8 @@ void upload_pack(const int advertise_refs, const int stateless_rpc,
advertise_shallow_grafts(1);
packet_flush(1);
} else {
- head_ref_namespaced(check_ref, NULL);
- for_each_namespaced_ref(check_ref, NULL);
+ head_ref_namespaced(check_ref, &data);
+ for_each_namespaced_ref(check_ref, &data);
}
if (!advertise_refs) {
@@ -1441,6 +1446,7 @@ static int parse_want(struct packet_writer *writer, const char *line,
static int parse_want_ref(struct packet_writer *writer, const char *line,
struct string_list *wanted_refs,
+ struct string_list *hidden_refs,
struct object_array *want_obj)
{
const char *refname_nons;
@@ -1451,7 +1457,7 @@ static int parse_want_ref(struct packet_writer *writer, const char *line,
struct strbuf refname = STRBUF_INIT;
strbuf_addf(&refname, "%s%s", get_git_namespace(), refname_nons);
- if (ref_is_hidden(refname_nons, refname.buf) ||
+ if (ref_is_hidden(refname_nons, refname.buf, hidden_refs) ||
read_ref(refname.buf, &oid)) {
packet_writer_error(writer, "unknown ref %s", refname_nons);
die("unknown ref %s", refname_nons);
@@ -1508,7 +1514,7 @@ static void process_args(struct packet_reader *request,
continue;
if (data->allow_ref_in_want &&
parse_want_ref(&data->writer, arg, &data->wanted_refs,
- &data->want_obj))
+ &data->hidden_refs, &data->want_obj))
continue;
/* process have line */
if (parse_have(arg, &data->haves))
diff --git a/worktree.c b/worktree.c
index 257ba4cf1e..aa43c64119 100644
--- a/worktree.c
+++ b/worktree.c
@@ -489,62 +489,17 @@ int submodule_uses_worktrees(const char *path)
return ret;
}
-int parse_worktree_ref(const char *worktree_ref, const char **name,
- int *name_length, const char **ref)
-{
- if (skip_prefix(worktree_ref, "main-worktree/", &worktree_ref)) {
- if (!*worktree_ref)
- return -1;
- if (name)
- *name = NULL;
- if (name_length)
- *name_length = 0;
- if (ref)
- *ref = worktree_ref;
- return 0;
- }
- if (skip_prefix(worktree_ref, "worktrees/", &worktree_ref)) {
- const char *slash = strchr(worktree_ref, '/');
-
- if (!slash || slash == worktree_ref || !slash[1])
- return -1;
- if (name)
- *name = worktree_ref;
- if (name_length)
- *name_length = slash - worktree_ref;
- if (ref)
- *ref = slash + 1;
- return 0;
- }
- return -1;
-}
-
void strbuf_worktree_ref(const struct worktree *wt,
struct strbuf *sb,
const char *refname)
{
- switch (ref_type(refname)) {
- case REF_TYPE_PSEUDOREF:
- case REF_TYPE_PER_WORKTREE:
- if (wt && !wt->is_current) {
- if (is_main_worktree(wt))
- strbuf_addstr(sb, "main-worktree/");
- else
- strbuf_addf(sb, "worktrees/%s/", wt->id);
- }
- break;
-
- case REF_TYPE_MAIN_PSEUDOREF:
- case REF_TYPE_OTHER_PSEUDOREF:
- break;
-
- case REF_TYPE_NORMAL:
- /*
- * For shared refs, don't prefix worktrees/ or
- * main-worktree/. It's not necessary and
- * files-backend.c can't handle it anyway.
- */
- break;
+ if (parse_worktree_ref(refname, NULL, NULL, NULL) ==
+ REF_WORKTREE_CURRENT &&
+ wt && !wt->is_current) {
+ if (is_main_worktree(wt))
+ strbuf_addstr(sb, "main-worktree/");
+ else
+ strbuf_addf(sb, "worktrees/%s/", wt->id);
}
strbuf_addstr(sb, refname);
}
diff --git a/worktree.h b/worktree.h
index e9e839926b..9dcea6fc8c 100644
--- a/worktree.h
+++ b/worktree.h
@@ -167,16 +167,6 @@ const char *worktree_git_path(const struct worktree *wt,
__attribute__((format (printf, 2, 3)));
/*
- * Parse a worktree ref (i.e. with prefix main-worktree/ or
- * worktrees/) and return the position of the worktree's name and
- * length (or NULL and zero if it's main worktree), and ref.
- *
- * All name, name_length and ref arguments could be NULL.
- */
-int parse_worktree_ref(const char *worktree_ref, const char **name,
- int *name_length, const char **ref);
-
-/*
* Return a refname suitable for access from the current ref store.
*/
void strbuf_worktree_ref(const struct worktree *wt,
diff --git a/write-or-die.c b/write-or-die.c
index c4fd91b5b4..aaa0318e82 100644
--- a/write-or-die.c
+++ b/write-or-die.c
@@ -23,6 +23,7 @@ void maybe_flush_or_die(FILE *f, const char *desc)
if (f == stdout) {
if (skip_stdout_flush < 0) {
+ /* NEEDSWORK: make this a normal Boolean */
cp = getenv("GIT_FLUSH");
if (cp)
skip_stdout_flush = (atoi(cp) == 0);
diff --git a/wt-status.c b/wt-status.c
index 5813174896..1f6d64e759 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -18,8 +18,10 @@
#include "worktree.h"
#include "lockfile.h"
#include "sequencer.h"
+#include "fsmonitor-settings.h"
#define AB_DELAY_WARNING_IN_MS (2 * 1000)
+#define UF_DELAY_WARNING_IN_MS (2 * 1000)
static const char cut_line[] =
"------------------------ >8 ------------------------\n";
@@ -1205,6 +1207,13 @@ static void wt_longstatus_print_tracking(struct wt_status *s)
strbuf_release(&sb);
}
+static int uf_was_slow(uint32_t untracked_in_ms)
+{
+ if (getenv("GIT_TEST_UF_DELAY_WARNING"))
+ untracked_in_ms += UF_DELAY_WARNING_IN_MS + 1;
+ return UF_DELAY_WARNING_IN_MS < untracked_in_ms;
+}
+
static void show_merge_in_progress(struct wt_status *s,
const char *color)
{
@@ -1814,6 +1823,7 @@ static void wt_longstatus_print(struct wt_status *s)
{
const char *branch_color = color(WT_STATUS_ONBRANCH, s);
const char *branch_status_color = color(WT_STATUS_HEADER, s);
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(s->repo);
if (s->branch) {
const char *on_what = _("On branch ");
@@ -1870,13 +1880,21 @@ static void wt_longstatus_print(struct wt_status *s)
wt_longstatus_print_other(s, &s->untracked, _("Untracked files"), "add");
if (s->show_ignored_mode)
wt_longstatus_print_other(s, &s->ignored, _("Ignored files"), "add -f");
- if (advice_enabled(ADVICE_STATUS_U_OPTION) && 2000 < s->untracked_in_ms) {
+ if (advice_enabled(ADVICE_STATUS_U_OPTION) && uf_was_slow(s->untracked_in_ms)) {
status_printf_ln(s, GIT_COLOR_NORMAL, "%s", "");
+ if (fsm_mode > FSMONITOR_MODE_DISABLED) {
+ status_printf_ln(s, GIT_COLOR_NORMAL,
+ _("It took %.2f seconds to enumerate untracked files,\n"
+ "but the results were cached, and subsequent runs may be faster."),
+ s->untracked_in_ms / 1000.0);
+ } else {
+ status_printf_ln(s, GIT_COLOR_NORMAL,
+ _("It took %.2f seconds to enumerate untracked files."),
+ s->untracked_in_ms / 1000.0);
+ }
status_printf_ln(s, GIT_COLOR_NORMAL,
- _("It took %.2f seconds to enumerate untracked files. 'status -uno'\n"
- "may speed it up, but you have to be careful not to forget to add\n"
- "new files yourself (see 'git help status')."),
- s->untracked_in_ms / 1000.0);
+ _("See 'git help status' for information on how to improve this."));
+ status_printf_ln(s, GIT_COLOR_NORMAL, "%s", "");
}
} else if (s->committable)
status_printf_ln(s, GIT_COLOR_NORMAL, _("Untracked files not listed%s"),