summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/main.yml9
-rw-r--r--.gitignore3
-rw-r--r--Documentation/MyFirstContribution.txt2
-rw-r--r--Documentation/RelNotes/2.39.0.txt32
-rw-r--r--Documentation/config.txt2
-rw-r--r--Documentation/config/extensions.txt76
-rw-r--r--Documentation/config/index.txt8
-rw-r--r--Documentation/config/push.txt14
-rw-r--r--Documentation/config/refs.txt13
-rw-r--r--Documentation/config/submodule.txt12
-rw-r--r--Documentation/git-clone.txt8
-rw-r--r--Documentation/git-credential-cache.txt4
-rw-r--r--Documentation/git-credential.txt2
-rw-r--r--Documentation/git-maintenance.txt14
-rw-r--r--Documentation/git-merge-tree.txt16
-rw-r--r--Documentation/git-notes.txt10
-rw-r--r--Documentation/git-push.txt6
-rw-r--r--Documentation/git-repack.txt6
-rw-r--r--Documentation/git-rev-parse.txt7
-rw-r--r--Documentation/git-status.txt59
-rw-r--r--Documentation/git-worktree.txt14
-rw-r--r--Documentation/gitcredentials.txt16
-rw-r--r--Documentation/gitformat-chunk.txt26
-rw-r--r--Documentation/howto/maintain-git.txt9
-rw-r--r--Documentation/rev-list-options.txt7
-rw-r--r--Documentation/technical/repository-version.txt4
-rw-r--r--Documentation/technical/sparse-checkout.txt1103
-rw-r--r--INSTALL4
-rw-r--r--Makefile434
-rw-r--r--builtin.h2
-rw-r--r--builtin/add.c2
-rw-r--r--builtin/bisect.c (renamed from builtin/bisect--helper.c)291
-rw-r--r--builtin/branch.c20
-rw-r--r--builtin/bugreport.c9
-rw-r--r--builtin/checkout.c2
-rw-r--r--builtin/clone.c12
-rw-r--r--builtin/commit.c13
-rw-r--r--builtin/config.c42
-rw-r--r--builtin/diff.c2
-rw-r--r--builtin/for-each-repo.c5
-rw-r--r--builtin/gc.c45
-rw-r--r--builtin/ls-files.c1
-rw-r--r--builtin/merge-tree.c65
-rw-r--r--builtin/merge.c1
-rw-r--r--builtin/notes.c22
-rw-r--r--builtin/pack-objects.c27
-rw-r--r--builtin/push.c12
-rw-r--r--builtin/read-tree.c4
-rw-r--r--builtin/rebase.c31
-rw-r--r--builtin/receive-pack.c10
-rw-r--r--builtin/repack.c73
-rw-r--r--builtin/reset.c4
-rw-r--r--builtin/rev-list.c1
-rw-r--r--builtin/rev-parse.c19
-rw-r--r--builtin/revert.c4
-rw-r--r--builtin/rm.c3
-rw-r--r--builtin/stash.c2
-rw-r--r--builtin/submodule--helper.c248
-rw-r--r--builtin/unpack-file.c1
-rw-r--r--builtin/update-index.c4
-rw-r--r--builtin/worktree.c71
-rw-r--r--cache.h3
-rw-r--r--chunk-format.c109
-rw-r--r--chunk-format.h18
-rwxr-xr-xci/lib.sh2
-rwxr-xr-xci/run-build-and-tests.sh14
-rw-r--r--combine-diff.c3
-rw-r--r--commit-graph.c2
-rw-r--r--compat/fsmonitor/fsm-listen-darwin.c2
-rw-r--r--config.c24
-rw-r--r--connected.c9
-rw-r--r--connected.h7
-rw-r--r--contrib/buildsystems/CMakeLists.txt185
-rw-r--r--contrib/coccinelle/.gitignore2
-rw-r--r--contrib/coccinelle/README49
-rw-r--r--contrib/coccinelle/hashmap.cocci2
-rw-r--r--contrib/coccinelle/preincr.cocci2
-rwxr-xr-xcontrib/coccinelle/spatchcache304
-rw-r--r--contrib/coccinelle/strbuf.cocci2
-rw-r--r--contrib/coccinelle/swap.cocci2
-rw-r--r--contrib/coccinelle/the_repository.pending.cocci1
-rw-r--r--contrib/completion/git-completion.bash51
-rw-r--r--csum-file.c14
-rw-r--r--csum-file.h7
-rw-r--r--delta-islands.c71
-rw-r--r--diff-lib.c105
-rw-r--r--dir.c12
-rw-r--r--dir.h1
-rwxr-xr-xgit-bisect.sh84
-rwxr-xr-xgit-submodule.sh3
-rw-r--r--git.c4
-rw-r--r--http.c47
-rw-r--r--list-objects-filter-options.c4
-rw-r--r--list-objects-filter-options.h18
-rw-r--r--ls-refs.c13
-rw-r--r--midx.c2
-rw-r--r--object-file.c2
-rw-r--r--object.c5
-rw-r--r--pack-bitmap.c13
-rw-r--r--read-cache.c31
-rw-r--r--ref-filter.c1
-rw-r--r--refs.c50
-rw-r--r--refs.h10
-rw-r--r--refs/files-backend.c8
-rw-r--r--refs/packed-backend.c880
-rw-r--r--refs/packed-backend.h281
-rw-r--r--refs/packed-format-v1.c456
-rw-r--r--refs/packed-format-v2.c624
-rw-r--r--refs/refs-internal.h9
-rw-r--r--repo-settings.c13
-rw-r--r--repository.c2
-rw-r--r--repository.h8
-rw-r--r--reset.c1
-rw-r--r--revision.c149
-rw-r--r--revision.h61
-rw-r--r--run-command.c13
-rw-r--r--run-command.h24
-rw-r--r--scalar.c54
-rw-r--r--sequencer.c109
-rw-r--r--sequencer.h6
-rw-r--r--setup.c26
-rw-r--r--sha1dc_git.h1
-rw-r--r--shared.mak8
-rw-r--r--sparse-index.c30
-rw-r--r--submodule.c286
-rw-r--r--submodule.h13
-rw-r--r--t/Makefile2
-rw-r--r--t/README3
-rwxr-xr-xt/chainlint.pl171
-rw-r--r--t/chainlint/block-comment.expect2
-rw-r--r--t/chainlint/case-comment.expect3
-rw-r--r--t/chainlint/close-subshell.expect3
-rw-r--r--t/chainlint/comment.expect4
-rw-r--r--t/chainlint/double-here-doc.expect14
-rw-r--r--t/chainlint/empty-here-doc.expect3
-rw-r--r--t/chainlint/for-loop.expect4
-rw-r--r--t/chainlint/here-doc-close-subshell.expect4
-rw-r--r--t/chainlint/here-doc-indent-operator.expect10
-rw-r--r--t/chainlint/here-doc-multi-line-command-subst.expect5
-rw-r--r--t/chainlint/here-doc-multi-line-string.expect4
-rw-r--r--t/chainlint/here-doc.expect24
-rw-r--r--t/chainlint/if-then-else.expect4
-rw-r--r--t/chainlint/incomplete-line.expect10
-rw-r--r--t/chainlint/inline-comment.expect4
-rw-r--r--t/chainlint/loop-detect-status.expect2
-rw-r--r--t/chainlint/nested-here-doc.expect27
-rw-r--r--t/chainlint/nested-subshell-comment.expect2
-rw-r--r--t/chainlint/subshell-here-doc.expect28
-rw-r--r--t/chainlint/t7900-subtree.expect4
-rw-r--r--t/chainlint/while-loop.expect4
-rw-r--r--t/helper/test-cache-tree.c64
-rw-r--r--t/helper/test-fake-ssh.c1
-rw-r--r--t/helper/test-run-command.c21
-rw-r--r--t/helper/test-sha1.c8
-rw-r--r--t/helper/test-submodule.c84
-rw-r--r--t/helper/test-tool.c2
-rw-r--r--t/helper/test-tool.h2
-rw-r--r--t/lib-gettext.sh2
-rw-r--r--t/lib-gitweb.sh2
-rw-r--r--t/lib-httpd.sh5
-rw-r--r--t/lib-httpd/apache.conf19
-rwxr-xr-xt/perf/p0006-read-tree-checkout.sh8
-rwxr-xr-xt/perf/p0090-cache-tree.sh36
-rwxr-xr-xt/perf/p1401-ref-operations.sh52
-rwxr-xr-xt/perf/p7102-reset.sh21
-rwxr-xr-xt/t0013-sha1dc.sh6
-rwxr-xr-xt/t0021-conversion.sh4
-rwxr-xr-xt/t0040-parse-options.sh12
-rwxr-xr-xt/t0061-run-command.sh54
-rwxr-xr-xt/t0068-for-each-repo.sh7
-rwxr-xr-xt/t0070-fundamental.sh1
-rwxr-xr-xt/t1011-read-tree-sparse-checkout.sh1
-rwxr-xr-xt/t1022-read-tree-partial-clone.sh4
-rwxr-xr-xt/t1050-large.sh6
-rwxr-xr-xt/t1300-config.sh6
-rwxr-xr-xt/t1404-update-ref-errors.sh2
-rwxr-xr-xt/t1409-avoid-packing-refs.sh23
-rwxr-xr-xt/t1413-reflog-detach.sh1
-rwxr-xr-xt/t1501-work-tree.sh2
-rwxr-xr-xt/t1600-index.sh8
-rwxr-xr-xt/t1800-hook.sh2
-rwxr-xr-xt/t2012-checkout-last.sh1
-rwxr-xr-xt/t2018-checkout-branch.sh1
-rwxr-xr-xt/t2025-checkout-no-overlay.sh1
-rwxr-xr-xt/t2400-worktree-add.sh45
-rwxr-xr-xt/t3009-ls-files-others-nonsubmodule.sh1
-rwxr-xr-xt/t3010-ls-files-killed-modified.sh2
-rwxr-xr-xt/t3050-subprojects-fetch.sh1
-rwxr-xr-xt/t3060-ls-files-with-tree.sh2
-rwxr-xr-xt/t3200-branch.sh36
-rwxr-xr-xt/t3210-pack-refs.sh8
-rwxr-xr-xt/t3212-ref-formats.sh100
-rwxr-xr-xt/t3301-notes.sh18
-rwxr-xr-xt/t3404-rebase-interactive.sh107
-rwxr-xr-xt/t3409-rebase-environ.sh1
-rwxr-xr-xt/t3413-rebase-hook.sh1
-rwxr-xr-xt/t3428-rebase-signoff.sh1
-rwxr-xr-xt/t3429-rebase-edit-todo.sh1
-rwxr-xr-xt/t3430-rebase-merges.sh17
-rwxr-xr-xt/t3433-rebase-across-mode-change.sh1
-rwxr-xr-xt/t4015-diff-whitespace.sh4
-rwxr-xr-xt/t4027-diff-submodule.sh19
-rwxr-xr-xt/t4045-diff-relative.sh2
-rwxr-xr-xt/t4052-stat-output.sh1
-rwxr-xr-xt/t4053-diff-no-index.sh1
-rwxr-xr-xt/t4067-diff-partial-clone.sh1
-rwxr-xr-xt/t4111-apply-subdir.sh1
-rwxr-xr-xt/t4135-apply-weird-filenames.sh1
-rwxr-xr-xt/t4213-log-tabexpand.sh1
-rwxr-xr-xt/t4301-merge-tree-write-tree.sh62
-rwxr-xr-xt/t5310-pack-bitmaps.sh5
-rwxr-xr-xt/t5317-pack-objects-filter-objects.sh19
-rwxr-xr-xt/t5502-quickfetch.sh2
-rwxr-xr-xt/t5526-fetch-submodules.sh8
-rwxr-xr-xt/t5531-deep-submodule-push.sh50
-rwxr-xr-xt/t5539-fetch-http-shallow.sh7
-rwxr-xr-xt/t5541-http-push-smart.sh7
-rwxr-xr-xt/t5542-push-http-shallow.sh7
-rwxr-xr-xt/t5544-pack-objects-hook.sh2
-rwxr-xr-xt/t5551-http-fetch-smart.sh20
-rwxr-xr-xt/t5554-noop-fetch-negotiator.sh2
-rwxr-xr-xt/t5558-clone-bundle-uri.sh7
-rwxr-xr-xt/t5559-http-fetch-smart-http2.sh4
-rwxr-xr-xt/t5601-clone.sh22
-rwxr-xr-xt/t5610-clone-detached.sh1
-rwxr-xr-xt/t5611-clone-config.sh1
-rwxr-xr-xt/t5614-clone-submodules-shallow.sh1
-rwxr-xr-xt/t5617-clone-submodules.sh (renamed from t/t5617-clone-submodules-remote.sh)41
-rwxr-xr-xt/t5618-alternate-refs.sh2
-rwxr-xr-xt/t6018-rev-list-glob.sh40
-rwxr-xr-xt/t6021-rev-list-exclude-hidden.sh163
-rwxr-xr-xt/t6030-bisect-porcelain.sh158
-rwxr-xr-xt/t6060-merge-index.sh2
-rwxr-xr-xt/t6102-rev-list-unexpected-objects.sh4
-rwxr-xr-xt/t6301-for-each-ref-errors.sh1
-rwxr-xr-xt/t6401-merge-criss-cross.sh2
-rwxr-xr-xt/t6406-merge-attr.sh1
-rwxr-xr-xt/t6407-merge-binary.sh1
-rwxr-xr-xt/t6415-merge-dir-to-symlink.sh1
-rwxr-xr-xt/t6435-merge-sparse.sh1
-rwxr-xr-xt/t7001-mv.sh62
-rwxr-xr-xt/t7065-wtstatus-slow.sh70
-rwxr-xr-xt/t7103-reset-bare.sh2
-rwxr-xr-xt/t7400-submodule-basic.sh10
-rwxr-xr-xt/t7406-submodule-update.sh156
-rwxr-xr-xt/t7407-submodule-foreach.sh5
-rwxr-xr-xt/t7411-submodule-config.sh36
-rwxr-xr-xt/t7412-submodule-absorbgitdirs.sh36
-rwxr-xr-xt/t7418-submodule-sparse-gitmodules.sh4
-rwxr-xr-xt/t7422-submodule-output.sh170
-rwxr-xr-xt/t7504-commit-msg-hook.sh1
-rwxr-xr-xt/t7506-status-submodule.sh19
-rwxr-xr-xt/t7517-per-repo-email.sh1
-rwxr-xr-xt/t7520-ignored-hook-warning.sh1
-rwxr-xr-xt/t7605-merge-resolve.sh1
-rwxr-xr-xt/t7609-mergetool--lib.sh2
-rwxr-xr-xt/t7610-mergetool.sh4
-rwxr-xr-xt/t7614-merge-signoff.sh1
-rwxr-xr-xt/t7700-repack.sh121
-rwxr-xr-xt/t7900-maintenance.sh21
-rwxr-xr-xt/t9003-help-autocorrect.sh2
-rwxr-xr-xt/t9115-git-svn-dcommit-funky-renames.sh1
-rwxr-xr-xt/t9146-git-svn-empty-dirs.sh1
-rwxr-xr-xt/t9148-git-svn-propset.sh1
-rwxr-xr-xt/t9160-git-svn-preserve-empty-dirs.sh1
-rwxr-xr-xt/t9210-scalar.sh14
-rwxr-xr-xt/t9902-completion.sh46
-rwxr-xr-xt/t9903-bash-prompt.sh2
-rw-r--r--t/test-lib-functions.sh66
-rw-r--r--t/test-lib.sh33
-rw-r--r--unpack-trees.c6
-rw-r--r--unpack-trees.h3
-rw-r--r--upload-pack.c30
-rw-r--r--wt-status.c28
274 files changed, 8111 insertions, 1993 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index bd6f75b8e0..4928cab0f1 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -37,14 +37,14 @@ jobs:
echo "::set-output name=enabled::$enabled"
- name: skip if the commit or tree was already tested
id: skip-if-redundant
- uses: actions/github-script@v3
+ uses: actions/github-script@v6
if: steps.check-ref.outputs.enabled == 'yes'
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
try {
// Figure out workflow ID, commit and tree
- const { data: run } = await github.actions.getWorkflowRun({
+ const { data: run } = await github.rest.actions.getWorkflowRun({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.runId,
@@ -54,7 +54,7 @@ jobs:
const tree_id = run.head_commit.tree_id;
// See whether there is a successful run for that commit or tree
- const { data: runs } = await github.actions.listWorkflowRuns({
+ const { data: runs } = await github.rest.actions.listWorkflowRuns({
owner: context.repo.owner,
repo: context.repo.repo,
per_page: 500,
@@ -238,6 +238,9 @@ jobs:
os: ubuntu
cc_package: gcc-8
pool: ubuntu-latest
+ - jobname: linux-cmake-ctest
+ cc: gcc
+ pool: ubuntu-latest
- jobname: osx-clang
cc: clang
pool: macos-latest
diff --git a/.gitignore b/.gitignore
index cb0231fb40..b9d91eaed1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,4 @@
/fuzz_corpora
-/GIT-BUILD-DIR
/GIT-BUILD-OPTIONS
/GIT-CFLAGS
/GIT-LDFLAGS
@@ -8,6 +7,7 @@
/GIT-PERL-HEADER
/GIT-PYTHON-VARS
/GIT-SCRIPT-DEFINES
+/GIT-SPATCH-DEFINES
/GIT-USER-AGENT
/GIT-VERSION-FILE
/bin-wrappers/
@@ -20,7 +20,6 @@
/git-archimport
/git-archive
/git-bisect
-/git-bisect--helper
/git-blame
/git-branch
/git-bugreport
diff --git a/Documentation/MyFirstContribution.txt b/Documentation/MyFirstContribution.txt
index 1a4be8ee0a..ccfd0cb5f3 100644
--- a/Documentation/MyFirstContribution.txt
+++ b/Documentation/MyFirstContribution.txt
@@ -736,7 +736,7 @@ the {lore}[Git mailing list archive]:
2022-02-21 1:43 ` John Cai
2022-02-21 1:50 ` Taylor Blau
2022-02-23 19:50 ` John Cai
-2022-02-18 20:00 ` // other replies ellided
+2022-02-18 20:00 ` // other replies elided
2022-02-18 18:40 ` [PATCH 2/3] reflog: call reflog_delete from reflog.c John Cai via GitGitGadget
2022-02-18 19:15 ` Ævar Arnfjörð Bjarmason
2022-02-18 20:26 ` Junio C Hamano
diff --git a/Documentation/RelNotes/2.39.0.txt b/Documentation/RelNotes/2.39.0.txt
index f21f949475..153bf6d89b 100644
--- a/Documentation/RelNotes/2.39.0.txt
+++ b/Documentation/RelNotes/2.39.0.txt
@@ -32,6 +32,9 @@ UI, Workflows & Features
* Enable gc.cruftpacks by default for those who opt into
feature.experimental setting.
+ * "git repack" learns to send cruft objects out of the way into
+ packfiles outside the repository.
+
Performance, Internal Implementation, Development Support etc.
--------------------------------------------------------------
@@ -98,6 +101,29 @@ Performance, Internal Implementation, Development Support etc.
* Simplify the run-command API.
+ * Update the actions/github-script dependency in CI to avoid a
+ deprecation warning.
+
+ * Progress on being able to initialize a rev_info struct with a
+ macro.
+
+ * Add trace2 counters to the region to clear skip worktree bits in a
+ sparse checkout.
+
+ * Modernize test script to avoid "test -f" and friends.
+
+ * Avoid calling 'cache_tree_update()' when doing so would be
+ redundant.
+
+ * Update the credential-cache documentation to provide a more
+ realistic example.
+
+ * Makefile comments updates and reordering to clarify knobs used to
+ choose SHA implementations.
+
+ * A design document for sparse-checkout's future directions has been
+ added.
+
Fixes since v2.38
-----------------
@@ -239,6 +265,12 @@ Fixes since v2.38
* "git archive" mistakenly complained twice about a missing
executable, which has been corrected.
+ * Fix a bug where `git branch -d` did not work on an orphaned HEAD.
+
+ * `git rebase --update-refs` would delete references when all
+ `update-ref` commands in the sequencer were removed, which has been
+ corrected.
+
* Other code cleanup, docfix, build fix, etc.
(merge 413bc6d20a ds/cmd-main-reorder later to maint).
(merge 8d2863e4ed nw/t1002-cleanup later to maint).
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 0e93aef862..e480f99c3e 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -493,6 +493,8 @@ include::config/rebase.txt[]
include::config/receive.txt[]
+include::config/refs.txt[]
+
include::config/remote.txt[]
include::config/remotes.txt[]
diff --git a/Documentation/config/extensions.txt b/Documentation/config/extensions.txt
index bccaec7a96..05abb821e0 100644
--- a/Documentation/config/extensions.txt
+++ b/Documentation/config/extensions.txt
@@ -7,6 +7,69 @@ Note that this setting should only be set by linkgit:git-init[1] or
linkgit:git-clone[1]. Trying to change it after initialization will not
work and will produce hard-to-diagnose issues.
+extensions.refFormat::
+ Specify the reference storage mechanisms used by the repoitory as a
+ multi-valued list. The acceptable values are `files` and `packed`.
+ If not specified, the list of `files` and `packed` is assumed. It
+ is an error to specify this key unless `core.repositoryFormatVersion`
+ is 1.
++
+As new ref formats are added, Git commands may modify this list before and
+after upgrading the on-disk reference storage files. The specific values
+indicate the existence of different layers:
++
+--
+`files`;;
+ When present, references may be stored as "loose" reference files
+ in the `$GIT_DIR/refs/` directory. The name of the reference
+ corresponds to the filename after `$GIT_DIR` and the file contains
+ an object ID as a hexadecimal string. If a loose reference file
+ exists, then its value takes precedence over all other formats.
+
+`packed`;;
+ When present, references may be stored as a group in a
+ `packed-refs` file in its version 1 format. When grouped with
+ `"files"` or provided on its own, this file is located at
+ `$GIT_DIR/packed-refs`. This file contains a list of distinct
+ reference names, paired with their object IDs. When combined with
+ `files`, the `packed` format will only be used to group multiple
+ loose object files upon request via the `git pack-refs` command or
+ via the `pack-refs` maintenance task.
+
+`packed-v2`;;
+ When present, references may be stored as a group in a
+ `packed-refs` file in its version 2 format. This file is in the
+ same position and interacts with loose refs the same as when the
+ `packed` value exists. Both `packed` and `packed-v2` must exist to
+ upgrade an existing `packed-refs` file from version 1 to version 2
+ or to downgrade from version 2 to version 1. When both are
+ present, the `refs.packedRefsVersion` config value indicates which
+ file format version is used during writes, but both versions are
+ understood when reading the file.
+--
++
+The following combinations are supported by this version of Git:
++
+--
+`files` and (`packed` and/or `packed-v2`);;
+ This set of values indicates that references are stored both as
+ loose reference files and in the `packed-refs` file. Loose
+ references are preferred, and the `packed-refs` file is updated
+ only when deleting a reference that is stored in the `packed-refs`
+ file or during a `git pack-refs` command.
++
+The presence of `packed` and `packed-v2` specifies whether the `packed-refs`
+file is allowed to be in its v1 or v2 formats, respectively. When only one
+is present, Git will refuse to read the `packed-refs` file that do not
+match the expected format. When both are present, the `refs.packedRefsVersion`
+config option indicates which file format is used during writes.
+
+`files`;;
+ When only this value is present, Git will ignore the `packed-refs`
+ file and refuse to write one during `git pack-refs`. All references
+ will be read from and written to loose reference files.
+--
+
extensions.worktreeConfig::
If enabled, then worktrees will load config settings from the
`$GIT_DIR/config.worktree` file in addition to the
@@ -21,10 +84,15 @@ When enabling `extensions.worktreeConfig`, you must be careful to move
certain values from the common config file to the main working tree's
`config.worktree` file, if present:
+
-* `core.worktree` must be moved from `$GIT_COMMON_DIR/config` to
- `$GIT_COMMON_DIR/config.worktree`.
-* If `core.bare` is true, then it must be moved from `$GIT_COMMON_DIR/config`
- to `$GIT_COMMON_DIR/config.worktree`.
+--
+`core.worktree`;;
+ This config value must be moved from `$GIT_COMMON_DIR/config` to
+ `$GIT_COMMON_DIR/config.worktree`.
+
+`core.bare`;;
+ If true, then this value must be moved from
+ `$GIT_COMMON_DIR/config` to `$GIT_COMMON_DIR/config.worktree`.
+--
+
It may also be beneficial to adjust the locations of `core.sparseCheckout`
and `core.sparseCheckoutCone` depending on your desire for customizable
diff --git a/Documentation/config/index.txt b/Documentation/config/index.txt
index 75f3a2d105..709ba72f62 100644
--- a/Documentation/config/index.txt
+++ b/Documentation/config/index.txt
@@ -30,3 +30,11 @@ index.version::
Specify the version with which new index files should be
initialized. This does not affect existing repositories.
If `feature.manyFiles` is enabled, then the default is 4.
+
+index.computeHash::
+ When enabled, compute the hash of the index file as it is written
+ and store the hash at the end of the content. This is enabled by
+ default.
++
+If you disable `index.computHash`, then older Git clients may report that
+your index is corrupt during `git fsck`.
diff --git a/Documentation/config/push.txt b/Documentation/config/push.txt
index 7386fea225..43338b65e8 100644
--- a/Documentation/config/push.txt
+++ b/Documentation/config/push.txt
@@ -110,18 +110,8 @@ This will result in only b (a and c are cleared).
----
push.recurseSubmodules::
- Make sure all submodule commits used by the revisions to be pushed
- are available on a remote-tracking branch. If the value is 'check'
- then Git will verify that all submodule commits that changed in the
- revisions to be pushed are available on at least one remote of the
- submodule. If any commits are missing, the push will be aborted and
- exit with non-zero status. If the value is 'on-demand' then all
- submodules that changed in the revisions to be pushed will be
- pushed. If on-demand was not able to push all necessary revisions
- it will also be aborted and exit with non-zero status. If the value
- is 'no' then default behavior of ignoring submodules when pushing
- is retained. You may override this configuration at time of push by
- specifying '--recurse-submodules=check|on-demand|no'.
+ May be "check", "on-demand", "only", or "no", with the same behavior
+ as that of "push --recurse-submodules".
If not set, 'no' is used by default, unless 'submodule.recurse' is
set (in which case a 'true' value means 'on-demand').
diff --git a/Documentation/config/refs.txt b/Documentation/config/refs.txt
new file mode 100644
index 0000000000..b2fdb2923f
--- /dev/null
+++ b/Documentation/config/refs.txt
@@ -0,0 +1,13 @@
+refs.packedRefsVersion::
+ Specifies the file format version to use when writing a `packed-refs`
+ file. Defaults to `1`.
++
+The only other value currently allowed is `2`, which uses a structured file
+format to result in a smaller `packed-refs` file. In order to write this
+file format version, the repository must also have the `packed-v2` extension
+enabled. The most typical setup will include the
+`core.repositoryFormatVersion=1` config value and the `extensions.refFormat`
+key will have three values: `files`, `packed`, and `packed-v2`.
++
+If `extensions.refFormat` has the value `packed-v2` and not `packed`, then
+`refs.packedRefsVersion` defaults to `2`.
diff --git a/Documentation/config/submodule.txt b/Documentation/config/submodule.txt
index 6490527b45..1144a5ad74 100644
--- a/Documentation/config/submodule.txt
+++ b/Documentation/config/submodule.txt
@@ -93,6 +93,18 @@ submodule.fetchJobs::
in parallel. A value of 0 will give some reasonable default.
If unset, it defaults to 1.
+submodule.diffJobs::
+ Specifies how many submodules are diffed at the same time. A
+ positive integer allows up to that number of submodules diffed
+ in parallel. A value of 0 will give the number of logical cores.
+ If unset, it defaults to 1. The diff operation is used by many
+ other git commands such as add, merge, diff, status, stash and
+ more. Note that the expensive part of the diff operation is
+ reading the index from cache or memory. Therefore multiple jobs
+ may be detrimental to performance if your hardware does not
+ support parallel reads or if the number of jobs greatly exceeds
+ the amount of supported reads.
+
submodule.alternateLocation::
Specifies how the submodules obtain alternates when submodules are
cloned. Possible values are `no`, `superproject`.
diff --git a/Documentation/git-clone.txt b/Documentation/git-clone.txt
index d6434d262d..6a4e5d31b4 100644
--- a/Documentation/git-clone.txt
+++ b/Documentation/git-clone.txt
@@ -16,7 +16,7 @@ SYNOPSIS
[--depth <depth>] [--[no-]single-branch] [--no-tags]
[--recurse-submodules[=<pathspec>]] [--[no-]shallow-submodules]
[--[no-]remote-submodules] [--jobs <n>] [--sparse] [--[no-]reject-shallow]
- [--filter=<filter> [--also-filter-submodules]] [--] <repository>
+ [--filter=<filter> [--also-filter-submodules] [--detach]] [--] <repository>
[<directory>]
DESCRIPTION
@@ -210,6 +210,12 @@ objects from the source repository into a pack in the cloned repository.
`--branch` can also take tags and detaches the HEAD at that commit
in the resulting repository.
+--detach::
+ If the cloned repository's HEAD points to a branch, point the newly
+ created HEAD to the branch's commit instead of the branch itself.
+ Additionally, in a non-bare repository, the corresponding local branch
+ will not be created.
+
-u <upload-pack>::
--upload-pack <upload-pack>::
When given, and the repository to clone from is accessed
diff --git a/Documentation/git-credential-cache.txt b/Documentation/git-credential-cache.txt
index 0216c18ef8..432e159d95 100644
--- a/Documentation/git-credential-cache.txt
+++ b/Documentation/git-credential-cache.txt
@@ -69,10 +69,10 @@ $ git push http://example.com/repo.git
------------------------------------
You can provide options via the credential.helper configuration
-variable (this example drops the cache time to 5 minutes):
+variable (this example increases the cache time to 1 hour):
-------------------------------------------------------
-$ git config credential.helper 'cache --timeout=300'
+$ git config credential.helper 'cache --timeout=3600'
-------------------------------------------------------
GIT
diff --git a/Documentation/git-credential.txt b/Documentation/git-credential.txt
index f18673017f..ac2818b9f6 100644
--- a/Documentation/git-credential.txt
+++ b/Documentation/git-credential.txt
@@ -160,6 +160,8 @@ empty string.
Components which are missing from the URL (e.g., there is no
username in the example above) will be left unset.
+Unrecognised attributes are silently discarded.
+
GIT
---
Part of the linkgit:git[1] suite
diff --git a/Documentation/git-maintenance.txt b/Documentation/git-maintenance.txt
index bb888690e4..805e5a2e3a 100644
--- a/Documentation/git-maintenance.txt
+++ b/Documentation/git-maintenance.txt
@@ -50,13 +50,13 @@ stop::
the background maintenance is restarted later.
register::
- Initialize Git config values so any scheduled maintenance will
- start running on this repository. This adds the repository to the
- `maintenance.repo` config variable in the current user's global
- config and enables some recommended configuration values for
- `maintenance.<task>.schedule`. The tasks that are enabled are safe
- for running in the background without disrupting foreground
- processes.
+ Initialize Git config values so any scheduled maintenance will start
+ running on this repository. This adds the repository to the
+ `maintenance.repo` config variable in the current user's global config,
+ or the config specified by --config-file option, and enables some
+ recommended configuration values for `maintenance.<task>.schedule`. The
+ tasks that are enabled are safe for running in the background without
+ disrupting foreground processes.
+
The `register` subcommand will also set the `maintenance.strategy` config
value to `incremental`, if this value is not previously set. The
diff --git a/Documentation/git-merge-tree.txt b/Documentation/git-merge-tree.txt
index 04bcc416e6..298c133fdb 100644
--- a/Documentation/git-merge-tree.txt
+++ b/Documentation/git-merge-tree.txt
@@ -64,6 +64,11 @@ OPTIONS
share no common history. This flag can be given to override that
check and make the merge proceed anyway.
+--merge-base=<commit>::
+ Instead of finding the merge-bases for <branch1> and <branch2>,
+ specify a merge-base for the merge. This option is incompatible
+ with `--stdin`.
+
[[OUTPUT]]
OUTPUT
------
@@ -216,6 +221,17 @@ with linkgit:git-merge[1]:
* any messages that would have been printed to stdout (the
<<IM,Informational messages>>)
+INPUT FORMAT
+------------
+'git merge-tree --stdin' input format is fully text based. Each line
+has this format:
+
+ [<base-commit> -- ]<branch1> <branch2>
+
+If one line is separated by `--`, the string before the separator is
+used for specifying a merge-base for the merge and the string after
+the separator describes the branches to be merged.
+
MISTAKES TO AVOID
-----------------
diff --git a/Documentation/git-notes.txt b/Documentation/git-notes.txt
index efbc10f0f5..50b198c2b2 100644
--- a/Documentation/git-notes.txt
+++ b/Documentation/git-notes.txt
@@ -11,7 +11,7 @@ SYNOPSIS
'git notes' [list [<object>]]
'git notes' add [-f] [--allow-empty] [-F <file> | -m <msg> | (-c | -C) <object>] [<object>]
'git notes' copy [-f] ( --stdin | <from-object> [<to-object>] )
-'git notes' append [--allow-empty] [-F <file> | -m <msg> | (-c | -C) <object>] [<object>]
+'git notes' append [--allow-empty] [--no-blank-line] [-F <file> | -m <msg> | (-c | -C) <object>] [<object>]
'git notes' edit [--allow-empty] [<object>]
'git notes' show [<object>]
'git notes' merge [-v | -q] [-s <strategy> ] <notes-ref>
@@ -86,7 +86,9 @@ the command can read the input given to the `post-rewrite` hook.)
append::
Append to the notes of an existing object (defaults to HEAD).
- Creates a new notes object if needed.
+ Creates a new notes object if needed. If the note of the given
+ object and the note to be appended are not empty, a blank line
+ will be inserted between them.
edit::
Edit the notes for a given object (defaults to HEAD).
@@ -159,6 +161,10 @@ OPTIONS
Allow an empty note object to be stored. The default behavior is
to automatically remove empty notes.
+--no-blank-line::
+ Do not insert a blank line before the inserted notes (insert
+ a blank line is the default).
+
--ref <ref>::
Manipulate the notes tree in <ref>. This overrides
`GIT_NOTES_REF` and the "core.notesRef" configuration. The ref
diff --git a/Documentation/git-push.txt b/Documentation/git-push.txt
index def7657ef9..5bb1d5aae2 100644
--- a/Documentation/git-push.txt
+++ b/Documentation/git-push.txt
@@ -409,10 +409,14 @@ Specifying `--no-force-if-includes` disables this behavior.
all submodules that changed in the revisions to be pushed will be
pushed. If on-demand was not able to push all necessary revisions it will
also be aborted and exit with non-zero status. If 'only' is used all
- submodules will be recursively pushed while the superproject is left
+ submodules will be pushed while the superproject is left
unpushed. A value of 'no' or using `--no-recurse-submodules` can be used
to override the push.recurseSubmodules configuration variable when no
submodule recursion is required.
++
+When using 'on-demand' or 'only', if a submodule has a
+"push.recurseSubmodules={on-demand,only}" or "submodule.recurse" configuration,
+further recursion will occur. In this case, "only" is treated as "on-demand".
--[no-]verify::
Toggle the pre-push hook (see linkgit:githooks[5]). The
diff --git a/Documentation/git-repack.txt b/Documentation/git-repack.txt
index 0bf13893d8..4017157949 100644
--- a/Documentation/git-repack.txt
+++ b/Documentation/git-repack.txt
@@ -74,6 +74,12 @@ to the new separate pack will be written.
immediately instead of waiting for the next `git gc` invocation.
Only useful with `--cruft -d`.
+--expire-to=<dir>::
+ Write a cruft pack containing pruned objects (if any) to the
+ directory `<dir>`. This option is useful for keeping a copy of
+ any pruned objects in a separate directory as a backup. Only
+ useful with `--cruft -d`.
+
-l::
Pass the `--local` option to 'git pack-objects'. See
linkgit:git-pack-objects[1].
diff --git a/Documentation/git-rev-parse.txt b/Documentation/git-rev-parse.txt
index 6b8ca085aa..bcd8069287 100644
--- a/Documentation/git-rev-parse.txt
+++ b/Documentation/git-rev-parse.txt
@@ -197,6 +197,13 @@ respectively, and they must begin with `refs/` when applied to `--glob`
or `--all`. If a trailing '/{asterisk}' is intended, it must be given
explicitly.
+--exclude-hidden=[receive|uploadpack]::
+ Do not include refs that would be hidden by `git-receive-pack` or
+ `git-upload-pack` by consulting the appropriate `receive.hideRefs` or
+ `uploadpack.hideRefs` configuration along with `transfer.hideRefs` (see
+ linkgit:git-config[1]). This option affects the next pseudo-ref option
+ `--all` or `--glob` and is cleared after processing them.
+
--disambiguate=<prefix>::
Show every object whose name begins with the given prefix.
The <prefix> must be at least 4 hexadecimal digits long to
diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt
index 5e438a7fdc..570c36e07c 100644
--- a/Documentation/git-status.txt
+++ b/Documentation/git-status.txt
@@ -457,6 +457,65 @@ during the write may conflict with other simultaneous processes, causing
them to fail. Scripts running `status` in the background should consider
using `git --no-optional-locks status` (see linkgit:git[1] for details).
+UNTRACKED FILES AND STATUS SPEED
+--------------------------------
+
+`git status` can be very slow in large worktrees if/when it
+needs to search for untracked files and directories. There are
+many configuration options available to speed this up by either
+avoiding the work or making use of cached results from previous
+Git commands. There is no single optimum set of settings right
+for everyone. Here is a brief summary of the relevant options
+to help you choose which is right for you.
+
+* First, you may want to run `git status` again. Your current
+ configuration may already be caching `git status` results,
+ so it could be faster on subsequent runs.
+
+* The `--untracked-files=no` flag or the
+ `status.showUntrackedfiles=false` config (see above for both) :
+ indicate that `git status` should not report untracked
+ files. This is the fastest option. `git status` will not list
+ the untracked files, so you need to be careful to remember if
+ you create any new files and manually `git add` them.
+
+* `advice.statusUoption=false` (see linkgit:git-config[1]) :
+ this config option disables a warning message when the search
+ for untracked files takes longer than desired. In some large
+ repositories, this message may appear frequently and not be a
+ helpful signal.
+
+* `core.untrackedCache=true` (see linkgit:git-update-index[1]) :
+ enable the untracked cache feature and only search directories
+ that have been modified since the previous `git status` command.
+ Git remembers the set of untracked files within each directory
+ and assumes that if a directory has not been modified, then
+ the set of untracked files within has not changed. This is much
+ faster than enumerating the contents of every directory, but still
+ not without cost, because Git still has to search for the set of
+ modified directories. The untracked cache is stored in the
+ `.git/index` file. The reduced cost of searching for untracked
+ files is offset slightly by the increased size of the index and
+ the cost of keeping it up-to-date. That reduced search time is
+ usually worth the additional size.
+
+* `core.untrackedCache=true` and `core.fsmonitor=true` or
+ `core.fsmonitor=<hook_command_pathname>` (see
+ linkgit:git-update-index[1]) : enable both the untracked cache
+ and FSMonitor features and only search directories that have
+ been modified since the previous `git status` command. This
+ is faster than using just the untracked cache alone because
+ Git can also avoid searching for modified directories. Git
+ only has to enumerate the exact set of directories that have
+ changed recently. While the FSMonitor feature can be enabled
+ without the untracked cache, the benefits are greatly reduced
+ in that case.
+
+Note that after you turn on the untracked cache and/or FSMonitor
+features it may take a few `git status` commands for the various
+caches to warm up before you see improved command times. This is
+normal.
+
SEE ALSO
--------
linkgit:gitignore[5]
diff --git a/Documentation/git-worktree.txt b/Documentation/git-worktree.txt
index 063d6eeb99..1310bfb564 100644
--- a/Documentation/git-worktree.txt
+++ b/Documentation/git-worktree.txt
@@ -10,7 +10,7 @@ SYNOPSIS
--------
[verse]
'git worktree add' [-f] [--detach] [--checkout] [--lock [--reason <string>]]
- [-b <new-branch>] <path> [<commit-ish>]
+ [[-b | -B | --orphan] <new-branch>] <path> [<commit-ish>]
'git worktree list' [-v | --porcelain [-z]]
'git worktree lock' [--reason <string>] <worktree>
'git worktree move' <worktree> <new-path>
@@ -95,6 +95,14 @@ exist, a new branch based on `HEAD` is automatically created as if
`-b <branch>` was given. If `<branch>` does exist, it will be checked out
in the new worktree, if it's not checked out anywhere else, otherwise the
command will refuse to create the worktree (unless `--force` is used).
++
+------------
+$ git worktree add --orphan <branch> <path>
+------------
++
+Create a worktree containing an orphan branch named `<branch>` with a
+clean working directory. See `--orphan` in linkgit:git-switch[1] for
+more details.
list::
@@ -222,6 +230,10 @@ This can also be set up as the default behaviour by using the
With `prune`, do not remove anything; just report what it would
remove.
+--orphan <new-branch>::
+ With `add`, create a new orphan branch named `<new-branch>` in the new
+ worktree. See `--orphan` in linkgit:git-switch[1] for details.
+
--porcelain::
With `list`, output in an easy-to-parse format for scripts.
This format will remain stable across Git versions and regardless of user
diff --git a/Documentation/gitcredentials.txt b/Documentation/gitcredentials.txt
index 80517b4eb2..4522471c33 100644
--- a/Documentation/gitcredentials.txt
+++ b/Documentation/gitcredentials.txt
@@ -17,9 +17,10 @@ DESCRIPTION
Git will sometimes need credentials from the user in order to perform
operations; for example, it may need to ask for a username and password
-in order to access a remote repository over HTTP. This manual describes
-the mechanisms Git uses to request these credentials, as well as some
-features to avoid inputting these credentials repeatedly.
+in order to access a remote repository over HTTP. Some remotes accept
+a personal access token or OAuth access token as a password. This
+manual describes the mechanisms Git uses to request these credentials,
+as well as some features to avoid inputting these credentials repeatedly.
REQUESTING CREDENTIALS
----------------------
@@ -61,7 +62,9 @@ for a password. It is generally configured by adding this to your config:
Credential helpers, on the other hand, are external programs from which Git can
request both usernames and passwords; they typically interface with secure
-storage provided by the OS or other programs.
+storage provided by the OS or other programs. Alternatively, a
+credential-generating helper might generate credentials for certain servers via
+some API.
To use a helper, you must first select one to use. Git currently
includes the following helpers:
@@ -269,6 +272,7 @@ stdout in the same format (see linkgit:git-credential[1] for common
attributes). A helper is free to produce a subset, or even no values at
all if it has nothing useful to provide. Any provided attributes will
overwrite those already known about by Git's credential subsystem.
+Unrecognised attributes are silently discarded.
While it is possible to override all attributes, well behaving helpers
should refrain from doing so for any attribute other than username and
@@ -286,8 +290,8 @@ For a `store` or `erase` operation, the helper's output is ignored.
If a helper fails to perform the requested operation or needs to notify
the user of a potential issue, it may write to stderr.
-If it does not support the requested operation (e.g., a read-only store),
-it should silently ignore the request.
+If it does not support the requested operation (e.g., a read-only store
+or generator), it should silently ignore the request.
If a helper receives any other operation, it should silently ignore the
request. This leaves room for future operations to be added (older
diff --git a/Documentation/gitformat-chunk.txt b/Documentation/gitformat-chunk.txt
index 57202ede27..ee3718c430 100644
--- a/Documentation/gitformat-chunk.txt
+++ b/Documentation/gitformat-chunk.txt
@@ -24,8 +24,9 @@ how they use the chunks to describe structured data.
A chunk-based file format begins with some header information custom to
that format. That header should include enough information to identify
-the file type, format version, and number of chunks in the file. From this
-information, that file can determine the start of the chunk-based region.
+the file type, format version, and (optionally) the number of chunks in
+the file. From this information, that file can determine the start of the
+chunk-based region.
The chunk-based region starts with a table of contents describing where
each chunk starts and ends. This consists of (C+1) rows of 12 bytes each,
@@ -51,8 +52,27 @@ The final entry in the table of contents must be four zero bytes. This
confirms that the table of contents is ending and provides the offset for
the end of the chunk-based data.
+The default chunk format assumes the table of contents appears at the
+beginning of the file (after the header information) and the chunks are
+ordered by increasing offset. Alternatively, the chunk format allows a
+table of contents that is placed at the end of the file (before the
+trailing hash) and the offsets are in descending order. In this trailing
+table of contents case, the data in order looks instead like the following
+table:
+
+ | Chunk ID (4 bytes) | Chunk Offset (8 bytes) |
+ |--------------------|------------------------|
+ | 0x0000 | OFFSET[C+1] |
+ | ID[C] | OFFSET[C] |
+ | ... | ... |
+ | ID[0] | OFFSET[0] |
+
+The concrete file format that uses the chunk format will mention that it
+uses a trailing table of contents if it uses it. By default, the table of
+contents is in ascending order before all chunk data.
+
Note: The chunk-based format expects that the file contains _at least_ a
-trailing hash after `OFFSET[C+1]`.
+trailing hash after either `OFFSET[C+1]` or the trailing table of contents.
Functions for working with chunk-based file formats are declared in
`chunk-format.h`. Using these methods provide extra checks that assist
diff --git a/Documentation/howto/maintain-git.txt b/Documentation/howto/maintain-git.txt
index 215e2edb0f..d07c6d44e5 100644
--- a/Documentation/howto/maintain-git.txt
+++ b/Documentation/howto/maintain-git.txt
@@ -231,7 +231,7 @@ by doing the following:
- Prepare 'jch' branch, which is used to represent somewhere
between 'master' and 'seen' and often is slightly ahead of 'next'.
- $ Meta/Reintegrate master..seen >Meta/redo-jch.sh
+ $ Meta/Reintegrate master..jch >Meta/redo-jch.sh
The result is a script that lists topics to be merged in order to
rebuild 'seen' as the input to Meta/Reintegrate script. Remove
@@ -283,6 +283,11 @@ by doing the following:
$ git diff jch next
+ Then build the rest of 'jch':
+
+ $ git checkout jch
+ $ sh Meta/redo-jch.sh
+
When all is well, clean up the redo-jch.sh script with
$ sh Meta/redo-jch.sh -u
@@ -293,7 +298,7 @@ by doing the following:
- Rebuild 'seen'.
- $ Meta/Reintegrate master..seen >Meta/redo-seen.sh
+ $ Meta/Reintegrate jch..seen >Meta/redo-seen.sh
Edit the result by adding new topics that are not still in 'seen'
in the script. Then
diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt
index 1837509566..ff68e48406 100644
--- a/Documentation/rev-list-options.txt
+++ b/Documentation/rev-list-options.txt
@@ -195,6 +195,13 @@ respectively, and they must begin with `refs/` when applied to `--glob`
or `--all`. If a trailing '/{asterisk}' is intended, it must be given
explicitly.
+--exclude-hidden=[receive|uploadpack]::
+ Do not include refs that would be hidden by `git-receive-pack` or
+ `git-upload-pack` by consulting the appropriate `receive.hideRefs` or
+ `uploadpack.hideRefs` configuration along with `transfer.hideRefs` (see
+ linkgit:git-config[1]). This option affects the next pseudo-ref option
+ `--all` or `--glob` and is cleared after processing them.
+
--reflog::
Pretend as if all objects mentioned by reflogs are listed on the
command line as `<commit>`.
diff --git a/Documentation/technical/repository-version.txt b/Documentation/technical/repository-version.txt
index 7844ef30ff..8ef664b0b9 100644
--- a/Documentation/technical/repository-version.txt
+++ b/Documentation/technical/repository-version.txt
@@ -82,9 +82,9 @@ When the config key `extensions.preciousObjects` is set to `true`,
objects in the repository MUST NOT be deleted (e.g., by `git-prune` or
`git repack -d`).
-==== `partialclone`
+==== `partialClone`
-When the config key `extensions.partialclone` is set, it indicates
+When the config key `extensions.partialClone` is set, it indicates
that the repo was created with a partial clone (or later performed
a partial fetch) and that the remote may have omitted sending
certain unwanted objects. Such a remote is called a "promisor remote"
diff --git a/Documentation/technical/sparse-checkout.txt b/Documentation/technical/sparse-checkout.txt
new file mode 100644
index 0000000000..fa0d01cbda
--- /dev/null
+++ b/Documentation/technical/sparse-checkout.txt
@@ -0,0 +1,1103 @@
+Table of contents:
+
+ * Terminology
+ * Purpose of sparse-checkouts
+ * Usecases of primary concern
+ * Oversimplified mental models ("Cliff Notes" for this document!)
+ * Desired behavior
+ * Behavior classes
+ * Subcommand-dependent defaults
+ * Sparse specification vs. sparsity patterns
+ * Implementation Questions
+ * Implementation Goals/Plans
+ * Known bugs
+ * Reference Emails
+
+
+=== Terminology ===
+
+cone mode: one of two modes for specifying the desired subset of files
+ in a sparse-checkout. In cone-mode, the user specifies
+ directories (getting both everything under that directory as
+ well as everything in leading directories), while in non-cone
+ mode, the user specifies gitignore-style patterns. Controlled
+ by the --[no-]cone option to sparse-checkout init|set.
+
+SKIP_WORKTREE: When tracked files do not match the sparse specification and
+ are removed from the working tree, the file in the index is marked
+ with a SKIP_WORKTREE bit. Note that if a tracked file has the
+ SKIP_WORKTREE bit set but the file is later written by the user to
+ the working tree anyway, the SKIP_WORKTREE bit will be cleared at
+ the beginning of any subsequent Git operation.
+
+ Most sparse checkout users are unaware of this implementation
+ detail, and the term should generally be avoided in user-facing
+ descriptions and command flags. Unfortunately, prior to the
+ `sparse-checkout` subcommand this low-level detail was exposed,
+ and as of time of writing, is still exposed in various places.
+
+sparse-checkout: a subcommand in git used to reduce the files present in
+ the working tree to a subset of all tracked files. Also, the
+ name of the file in the $GIT_DIR/info directory used to track
+ the sparsity patterns corresponding to the user's desired
+ subset.
+
+sparse cone: see cone mode
+
+sparse directory: An entry in the index corresponding to a directory, which
+ appears in the index instead of all the files under that directory
+ that would normally appear. See also sparse-index. Something that
+ can cause confusion is that the "sparse directory" does NOT match
+ the sparse specification, i.e. the directory is NOT present in the
+ working tree. May be renamed in the future (e.g. to "skipped
+ directory").
+
+sparse index: A special mode for sparse-checkout that also makes the
+ index sparse by recording a directory entry in lieu of all the
+ files underneath that directory (thus making that a "skipped
+ directory" which unfortunately has also been called a "sparse
+ directory"), and does this for potentially multiple
+ directories. Controlled by the --[no-]sparse-index option to
+ init|set|reapply.
+
+sparsity patterns: patterns from $GIT_DIR/info/sparse-checkout used to
+ define the set of files of interest. A warning: It is easy to
+ over-use this term (or the shortened "patterns" term), for two
+ reasons: (1) users in cone mode specify directories rather than
+ patterns (their directories are transformed into patterns, but
+ users may think you are talking about non-cone mode if you use the
+ word "patterns"), and (b) the sparse specification might
+ transiently differ in the working tree or index from the sparsity
+ patterns (see "Sparse specification vs. sparsity patterns").
+
+sparse specification: The set of paths in the user's area of focus. This
+ is typically just the tracked files that match the sparsity
+ patterns, but the sparse specification can temporarily differ and
+ include additional files. (See also "Sparse specification
+ vs. sparsity patterns")
+
+ * When working with history, the sparse specification is exactly
+ the set of files matching the sparsity patterns.
+ * When interacting with the working tree, the sparse specification
+ is the set of tracked files with a clear SKIP_WORKTREE bit or
+ tracked files present in the working copy.
+ * When modifying or showing results from the index, the sparse
+ specification is the set of files with a clear SKIP_WORKTREE bit
+ or that differ in the index from HEAD.
+ * If working with the index and the working copy, the sparse
+ specification is the union of the paths from above.
+
+vivifying: When a command restores a tracked file to the working tree (and
+ hopefully also clears the SKIP_WORKTREE bit in the index for that
+ file), this is referred to as "vivifying" the file.
+
+
+=== Purpose of sparse-checkouts ===
+
+sparse-checkouts exist to allow users to work with a subset of their
+files.
+
+You can think of sparse-checkouts as subdividing "tracked" files into two
+categories -- a sparse subset, and all the rest. Implementationally, we
+mark "all the rest" in the index with a SKIP_WORKTREE bit and leave them
+out of the working tree. The SKIP_WORKTREE files are still tracked, just
+not present in the working tree.
+
+In the past, sparse-checkouts were defined by "SKIP_WORKTREE means the file
+is missing from the working tree but pretend the file contents match HEAD".
+That was not only bogus (it actually meant the file missing from the
+working tree matched the index rather than HEAD), but it was also a
+low-level detail which only provided decent behavior for a few commands.
+There were a surprising number of ways in which that guiding principle gave
+command results that violated user expectations, and as such was a bad
+mental model. However, it persisted for many years and may still be found
+in some corners of the code base.
+
+Anyway, the idea of "working with a subset of files" is simple enough, but
+there are multiple different high-level usecases which affect how some Git
+subcommands should behave. Further, even if we only considered one of
+those usecases, sparse-checkouts can modify different subcommands in over a
+half dozen different ways. Let's start by considering the high level
+usecases:
+
+ A) Users are _only_ interested in the sparse portion of the repo
+
+ A*) Users are _only_ interested in the sparse portion of the repo
+ that they have downloaded so far
+
+ B) Users want a sparse working tree, but are working in a larger whole
+
+ C) sparse-checkout is a behind-the-scenes implementation detail allowing
+ Git to work with a specially crafted in-house virtual file system;
+ users are actually working with a "full" working tree that is
+ lazily populated, and sparse-checkout helps with the lazy population
+ piece.
+
+It may be worth explaining each of these in a bit more detail:
+
+
+ (Behavior A) Users are _only_ interested in the sparse portion of the repo
+
+These folks might know there are other things in the repository, but
+don't care. They are uninterested in other parts of the repository, and
+only want to know about changes within their area of interest. Showing
+them other files from history (e.g. from diff/log/grep/etc.) is a
+usability annoyance, potentially a huge one since other changes in
+history may dwarf the changes they are interested in.
+
+Some of these users also arrive at this usecase from wanting to use partial
+clones together with sparse checkouts (in a way where they have downloaded
+blobs within the sparse specification) and do disconnected development.
+Not only do these users generally not care about other parts of the
+repository, but consider it a blocker for Git commands to try to operate on
+those. If commands attempt to access paths in history outside the sparsity
+specification, then the partial clone will attempt to download additional
+blobs on demand, fail, and then fail the user's command. (This may be
+unavoidable in some cases, e.g. when `git merge` has non-trivial changes to
+reconcile outside the sparse specification, but we should limit how often
+users are forced to connect to the network.)
+
+Also, even for users using partial clones that do not mind being
+always connected to the network, the need to download blobs as
+side-effects of various other commands (such as the printed diffstat
+after a merge or pull) can lead to worries about local repository size
+growing unnecessarily[10].
+
+ (Behavior A*) Users are _only_ interested in the sparse portion of the repo
+ that they have downloaded so far (a variant on the first usecase)
+
+This variant is driven by folks who using partial clones together with
+sparse checkouts and do disconnected development (so far sounding like a
+subset of behavior A users) and doing so on very large repositories. The
+reason for yet another variant is that downloading even just the blobs
+through history within their sparse specification may be too much, so they
+only download some. They would still like operations to succeed without
+network connectivity, though, so things like `git log -S${SEARCH_TERM} -p`
+or `git grep ${SEARCH_TERM} OLDREV ` would need to be prepared to provide
+partial results that depend on what happens to have been downloaded.
+
+This variant could be viewed as Behavior A with the sparse specification
+for history querying operations modified from "sparsity patterns" to
+"sparsity patterns limited to the blobs we have already downloaded".
+
+ (Behavior B) Users want a sparse working tree, but are working in a
+ larger whole
+
+Stolee described this usecase this way[11]:
+
+"I'm also focused on users that know that they are a part of a larger
+whole. They know they are operating on a large repository but focus on
+what they need to contribute their part. I expect multiple "roles" to
+use very different, almost disjoint parts of the codebase. Some other
+"architect" users operate across the entire tree or hop between different
+sections of the codebase as necessary. In this situation, I'm wary of
+scoping too many features to the sparse-checkout definition, especially
+"git log," as it can be too confusing to have their view of the codebase
+depend on your "point of view."
+
+People might also end up wanting behavior B due to complex inter-project
+dependencies. The initial attempts to use sparse-checkouts usually involve
+the directories you are directly interested in plus what those directories
+depend upon within your repository. But there's a monkey wrench here: if
+you have integration tests, they invert the hierarchy: to run integration
+tests, you need not only what you are interested in and its in-tree
+dependencies, you also need everything that depends upon what you are
+interested in or that depends upon one of your dependencies...AND you need
+all the in-tree dependencies of that expanded group. That can easily
+change your sparse-checkout into a nearly dense one.
+
+Naturally, that tends to kill the benefits of sparse-checkouts. There are
+a couple solutions to this conundrum: either avoid grabbing in-repo
+dependencies (maybe have built versions of your in-repo dependencies pulled
+from a CI cache somewhere), or say that users shouldn't run integration
+tests directly and instead do it on the CI server when they submit a code
+review. Or do both. Regardless of whether you stub out your in-repo
+dependencies or stub out the things that depend upon you, there is
+certainly a reason to want to query and be aware of those other stubbed-out
+parts of the repository, particularly when the dependencies are complex or
+change relatively frequently. Thus, for such uses, sparse-checkouts can be
+used to limit what you directly build and modify, but these users do not
+necessarily want their sparse checkout paths to limit their queries of
+versions in history.
+
+Some people may also be interested in behavior B over behavior A simply as
+a performance workaround: if they are using non-cone mode, then they have
+to deal with its inherent quadratic performance problems. In that mode,
+every operation that checks whether paths match the sparsity specification
+can be expensive. As such, these users may only be willing to pay for
+those expensive checks when interacting with the working copy, and may
+prefer getting "unrelated" results from their history queries over having
+slow commands.
+
+ (Behavior C) sparse-checkout is an implementational detail supporting a
+ special VFS.
+
+This usecase goes slightly against the traditional definition of
+sparse-checkout in that it actually tries to present a full or dense
+checkout to the user. However, this usecase utilizes the same underlying
+technical underpinnings in a new way which does provide some performance
+advantages to users. The basic idea is that a company can have an in-house
+Git-aware Virtual File System which pretends all files are present in the
+working tree, by intercepting all file system accesses and using those to
+fetch and write accessed files on demand via partial clones. The VFS uses
+sparse-checkout to prevent Git from writing or paying attention to many
+files, and manually updates the sparse checkout patterns itself based on
+user access and modification of files in the working tree. See commit
+ecc7c8841d ("repo_read_index: add config to expect files outside sparse
+patterns", 2022-02-25) and the link at [17] for a more detailed description
+of such a VFS.
+
+The biggest difference here is that users are completely unaware that the
+sparse-checkout machinery is even in use. The sparse patterns are not
+specified by the user but rather are under the complete control of the VFS
+(and the patterns are updated frequently and dynamically by it). The user
+will perceive the checkout as dense, and commands should thus behave as if
+all files are present.
+
+
+=== Usecases of primary concern ===
+
+Most of the rest of this document will focus on Behavior A and Behavior
+B. Some notes about the other two cases and why we are not focusing on
+them:
+
+ (Behavior A*)
+
+Supporting this usecase is estimated to be difficult and a lot of work.
+There are no plans to implement it currently, but it may be a potential
+future alternative. Knowing about the existence of additional alternatives
+may affect our choice of command line flags (e.g. if we need tri-state or
+quad-state flags rather than just binary flags), so it was still important
+to at least note.
+
+Further, I believe the descriptions below for Behavior A are probably still
+valid for this usecase, with the only exception being that it redefines the
+sparse specification to restrict it to already-downloaded blobs. The hard
+part is in making commands capable of respecting that modified definition.
+
+ (Behavior C)
+
+This usecase violates some of the early sparse-checkout documented
+assumptions (since files marked as SKIP_WORKTREE will be displayed to users
+as present in the working tree). That violation may mean various
+sparse-checkout related behaviors are not well suited to this usecase and
+we may need tweaks -- to both documentation and code -- to handle it.
+However, this usecase is also perhaps the simplest model to support in that
+everything behaves like a dense checkout with a few exceptions (e.g. branch
+checkouts and switches write fewer things, knowing the VFS will lazily
+write the rest on an as-needed basis).
+
+Since there is no publically available VFS-related code for folks to try,
+the number of folks who can test such a usecase is limited.
+
+The primary reason to note the Behavior C usecase is that as we fix things
+to better support Behaviors A and B, there may be additional places where
+we need to make tweaks allowing folks in this usecase to get the original
+non-sparse treatment. For an example, see ecc7c8841d ("repo_read_index:
+add config to expect files outside sparse patterns", 2022-02-25). The
+secondary reason to note Behavior C, is so that folks taking advantage of
+Behavior C do not assume they are part of the Behavior B camp and propose
+patches that break things for the real Behavior B folks.
+
+
+=== Oversimplified mental models ===
+
+An oversimplification of the differences in the above behaviors is:
+
+ Behavior A: Restrict worktree and history operations to sparse specification
+ Behavior B: Restrict worktree operations to sparse specification; have any
+ history operations work across all files
+ Behavior C: Do not restrict either worktree or history operations to the
+ sparse specification...with the exception of branch checkouts or
+ switches which avoid writing files that will match the index so
+ they can later lazily be populated instead.
+
+
+=== Desired behavior ===
+
+As noted previously, despite the simple idea of just working with a subset
+of files, there are a range of different behavioral changes that need to be
+made to different subcommands to work well with such a feature. See
+[1,2,3,4,5,6,7,8,9,10] for various examples. In particular, at [2], we saw
+that mere composition of other commands that individually worked correctly
+in a sparse-checkout context did not imply that the higher level command
+would work correctly; it sometimes requires further tweaks. So,
+understanding these differences can be beneficial.
+
+* Commands behaving the same regardless of high-level use-case
+
+ * commands that only look at files within the sparsity specification
+
+ * diff (without --cached or REVISION arguments)
+ * grep (without --cached or REVISION arguments)
+ * diff-files
+
+ * commands that restore files to the working tree that match sparsity
+ patterns, and remove unmodified files that don't match those
+ patterns:
+
+ * switch
+ * checkout (the switch-like half)
+ * read-tree
+ * reset --hard
+
+ * commands that write conflicted files to the working tree, but otherwise
+ will omit writing files to the working tree that do not match the
+ sparsity patterns:
+
+ * merge
+ * rebase
+ * cherry-pick
+ * revert
+
+ * `am` and `apply --cached` should probably be in this section but
+ are buggy (see the "Known bugs" section below)
+
+ The behavior for these commands somewhat depends upon the merge
+ strategy being used:
+ * `ort` behaves as described above
+ * `recursive` tries to not vivify files unnecessarily, but does sometimes
+ vivify files without conflicts.
+ * `octopus` and `resolve` will always vivify any file changed in the merge
+ relative to the first parent, which is rather suboptimal.
+
+ It is also important to note that these commands WILL update the index
+ outside the sparse specification relative to when the operation began,
+ BUT these commands often make a commit just before or after such that
+ by the end of the operation there is no change to the index outside the
+ sparse specification. Of course, if the operation hits conflicts or
+ does not make a commit, then these operations clearly can modify the
+ index outside the sparse specification.
+
+ Finally, it is important to note that at least the first four of these
+ commands also try to remove differences between the sparse
+ specification and the sparsity patterns (much like the commands in the
+ previous section).
+
+ * commands that always ignore sparsity since commits must be full-tree
+
+ * archive
+ * bundle
+ * commit
+ * format-patch
+ * fast-export
+ * fast-import
+ * commit-tree
+
+ * commands that write any modified file to the working tree (conflicted
+ or not, and whether those paths match sparsity patterns or not):
+
+ * stash
+ * apply (without `--index` or `--cached`)
+
+* Commands that may slightly differ for behavior A vs. behavior B:
+
+ Commands in this category behave mostly the same between the two
+ behaviors, but may differ in verbosity and types of warning and error
+ messages.
+
+ * commands that make modifications to which files are tracked:
+ * add
+ * rm
+ * mv
+ * update-index
+
+ The fact that files can move between the 'tracked' and 'untracked'
+ categories means some commands will have to treat untracked files
+ differently. But if we have to treat untracked files differently,
+ then additional commands may also need changes:
+
+ * status
+ * clean
+
+ In particular, `status` may need to report any untracked files outside
+ the sparsity specification as an erroneous condition (especially to
+ avoid the user trying to `git add` them, forcing `git add` to display
+ an error).
+
+ It's not clear to me exactly how (or even if) `clean` would change,
+ but it's the other command that also affects untracked files.
+
+ `update-index` may be slightly special. Its --[no-]skip-worktree flag
+ may need to ignore the sparse specification by its nature. Also, its
+ current --[no-]ignore-skip-worktree-entries default is totally bogus.
+
+ * commands for manually tweaking paths in both the index and the working tree
+ * `restore`
+ * the restore-like half of `checkout`
+
+ These commands should be similar to add/rm/mv in that they should
+ only operate on the sparse specification by default, and require a
+ special flag to operate on all files.
+
+ Also, note that these commands currently have a number of issues (see
+ the "Known bugs" section below)
+
+* Commands that significantly differ for behavior A vs. behavior B:
+
+ * commands that query history
+ * diff (with --cached or REVISION arguments)
+ * grep (with --cached or REVISION arguments)
+ * show (when given commit arguments)
+ * blame (only matters when one or more -C flags are passed)
+ * and annotate
+ * log
+ * whatchanged
+ * ls-files
+ * diff-index
+ * diff-tree
+ * ls-tree
+
+ Note: for log and whatchanged, revision walking logic is unaffected
+ but displaying of patches is affected by scoping the command to the
+ sparse-checkout. (The fact that revision walking is unaffected is
+ why rev-list, shortlog, show-branch, and bisect are not in this
+ list.)
+
+ ls-files may be slightly special in that e.g. `git ls-files -t` is
+ often used to see what is sparse and what is not. Perhaps -t should
+ always work on the full tree?
+
+* Commands I don't know how to classify
+
+ * range-diff
+
+ Is this like `log` or `format-patch`?
+
+ * cherry
+
+ See range-diff
+
+* Commands unaffected by sparse-checkouts
+
+ * shortlog
+ * show-branch
+ * rev-list
+ * bisect
+
+ * branch
+ * describe
+ * fetch
+ * gc
+ * init
+ * maintenance
+ * notes
+ * pull (merge & rebase have the necessary changes)
+ * push
+ * submodule
+ * tag
+
+ * config
+ * filter-branch (works in separate checkout without sparse-checkout setup)
+ * pack-refs
+ * prune
+ * remote
+ * repack
+ * replace
+
+ * bugreport
+ * count-objects
+ * fsck
+ * gitweb
+ * help
+ * instaweb
+ * merge-tree (doesn't touch worktree or index, and merges always compute full-tree)
+ * rerere
+ * verify-commit
+ * verify-tag
+
+ * commit-graph
+ * hash-object
+ * index-pack
+ * mktag
+ * mktree
+ * multi-pack-index
+ * pack-objects
+ * prune-packed
+ * symbolic-ref
+ * unpack-objects
+ * update-ref
+ * write-tree (operates on index, possibly optimized to use sparse dir entries)
+
+ * for-each-ref
+ * get-tar-commit-id
+ * ls-remote
+ * merge-base (merges are computed full tree, so merge base should be too)
+ * name-rev
+ * pack-redundant
+ * rev-parse
+ * show-index
+ * show-ref
+ * unpack-file
+ * var
+ * verify-pack
+
+ * <Everything under 'Interacting with Others' in 'git help --all'>
+ * <Everything under 'Low-level...Syncing' in 'git help --all'>
+ * <Everything under 'Low-level...Internal Helpers' in 'git help --all'>
+ * <Everything under 'External commands' in 'git help --all'>
+
+* Commands that might be affected, but who cares?
+
+ * merge-file
+ * merge-index
+ * gitk?
+
+
+=== Behavior classes ===
+
+From the above there are a few classes of behavior:
+
+ * "restrict"
+
+ Commands in this class only read or write files in the working tree
+ within the sparse specification.
+
+ When moving to a new commit (e.g. switch, reset --hard), these commands
+ may update index files outside the sparse specification as of the start
+ of the operation, but by the end of the operation those index files
+ will match HEAD again and thus those files will again be outside the
+ sparse specification.
+
+ When paths are explicitly specified, these paths are intersected with
+ the sparse specification and will only operate on such paths.
+ (e.g. `git restore [--staged] -- '*.png'`, `git reset -p -- '*.md'`)
+
+ Some of these commands may also attempt, at the end of their operation,
+ to cull transient differences between the sparse specification and the
+ sparsity patterns (see "Sparse specification vs. sparsity patterns" for
+ details, but this basically means either removing unmodified files not
+ matching the sparsity patterns and marking those files as
+ SKIP_WORKTREE, or vivifying files that match the sparsity patterns and
+ marking those files as !SKIP_WORKTREE).
+
+ * "restrict modulo conflicts"
+
+ Commands in this class generally behave like the "restrict" class,
+ except that:
+ (1) they will ignore the sparse specification and write files with
+ conflicts to the working tree (thus temporarily expanding the
+ sparse specification to include such files.)
+ (2) they are grouped with commands which move to a new commit, since
+ they often create a commit and then move to it, even though we
+ know there are many exceptions to moving to the new commit. (For
+ example, the user may rebase a commit that becomes empty, or have
+ a cherry-pick which conflicts, or a user could run `merge
+ --no-commit`, and we also view `apply --index` kind of like `am
+ --no-commit`.) As such, these commands can make changes to index
+ files outside the sparse specification, though they'll mark such
+ files with SKIP_WORKTREE.
+
+ * "restrict also specially applied to untracked files"
+
+ Commands in this class generally behave like the "restrict" class,
+ except that they have to handle untracked files differently too, often
+ because these commands are dealing with files changing state between
+ 'tracked' and 'untracked'. Often, this may mean printing an error
+ message if the command had nothing to do, but the arguments may have
+ referred to files whose tracked-ness state could have changed were it
+ not for the sparsity patterns excluding them.
+
+ * "no restrict"
+
+ Commands in this class ignore the sparse specification entirely.
+
+ * "restrict or no restrict dependent upon behavior A vs. behavior B"
+
+ Commands in this class behave like "no restrict" for folks in the
+ behavior B camp, and like "restrict" for folks in the behavior A camp.
+ However, when behaving like "restrict" a warning of some sort might be
+ provided that history queries have been limited by the sparse-checkout
+ specification.
+
+
+=== Subcommand-dependent defaults ===
+
+Note that we have different defaults depending on the command for the
+desired behavior :
+
+ * Commands defaulting to "restrict":
+ * diff-files
+ * diff (without --cached or REVISION arguments)
+ * grep (without --cached or REVISION arguments)
+ * switch
+ * checkout (the switch-like half)
+ * reset (<commit>)
+
+ * restore
+ * checkout (the restore-like half)
+ * checkout-index
+ * reset (with pathspec)
+
+ This behavior makes sense; these interact with the working tree.
+
+ * Commands defaulting to "restrict modulo conflicts":
+ * merge
+ * rebase
+ * cherry-pick
+ * revert
+
+ * am
+ * apply --index (which is kind of like an `am --no-commit`)
+
+ * read-tree (especially with -m or -u; is kind of like a --no-commit merge)
+ * reset (<tree-ish>, due to similarity to read-tree)
+
+ These also interact with the working tree, but require slightly
+ different behavior either so that (a) conflicts can be resolved or (b)
+ because they are kind of like a merge-without-commit operation.
+
+ (See also the "Known bugs" section below regarding `am` and `apply`)
+
+ * Commands defaulting to "no restrict":
+ * archive
+ * bundle
+ * commit
+ * format-patch
+ * fast-export
+ * fast-import
+ * commit-tree
+
+ * stash
+ * apply (without `--index`)
+
+ These have completely different defaults and perhaps deserve the most
+ detailed explanation:
+
+ In the case of commands in the first group (format-patch,
+ fast-export, bundle, archive, etc.), these are commands for
+ communicating history, which will be broken if they restrict to a
+ subset of the repository. As such, they operate on full paths and
+ have no `--restrict` option for overriding. Some of these commands may
+ take paths for manually restricting what is exported, but it needs to
+ be very explicit.
+
+ In the case of stash, it needs to vivify files to avoid losing the
+ user's changes.
+
+ In the case of apply without `--index`, that command needs to update
+ the working tree without the index (or the index without the working
+ tree if `--cached` is passed), and if we restrict those updates to the
+ sparse specification then we'll lose changes from the user.
+
+ * Commands defaulting to "restrict also specially applied to untracked files":
+ * add
+ * rm
+ * mv
+ * update-index
+ * status
+ * clean (?)
+
+ Our original implementation for the first three of these commands was
+ "no restrict", but it had some severe usability issues:
+ * `git add <somefile>` if honored and outside the sparse
+ specification, can result in the file randomly disappearing later
+ when some subsequent command is run (since various commands
+ automatically clean up unmodified files outside the sparse
+ specification).
+ * `git rm '*.jpg'` could very negatively surprise users if it deletes
+ files outside the range of the user's interest.
+ * `git mv` has similar surprises when moving into or out of the cone,
+ so best to restrict by default
+
+ So, we switched `add` and `rm` to default to "restrict", which made
+ usability problems much less severe and less frequent, but we still got
+ complaints because commands like:
+ git add <file-outside-sparse-specification>
+ git rm <file-outside-sparse-specification>
+ would silently do nothing. We should instead print an error in those
+ cases to get usability right.
+
+ update-index needs to be updated to match, and status and maybe clean
+ also need to be updated to specially handle untracked paths.
+
+ There may be a difference in here between behavior A and behavior B in
+ terms of verboseness of errors or additional warnings.
+
+ * Commands falling under "restrict or no restrict dependent upon behavior
+ A vs. behavior B"
+
+ * diff (with --cached or REVISION arguments)
+ * grep (with --cached or REVISION arguments)
+ * show (when given commit arguments)
+ * blame (only matters when one or more -C flags passed)
+ * and annotate
+ * log
+ * and variants: shortlog, gitk, show-branch, whatchanged, rev-list
+ * ls-files
+ * diff-index
+ * diff-tree
+ * ls-tree
+
+ For now, we default to behavior B for these, which want a default of
+ "no restrict".
+
+ Note that two of these commands -- diff and grep -- also appeared in a
+ different list with a default of "restrict", but only when limited to
+ searching the working tree. The working tree vs. history distinction
+ is fundamental in how behavior B operates, so this is expected. Note,
+ though, that for diff and grep with --cached, when doing "restrict"
+ behavior, the difference between sparse specification and sparsity
+ patterns is important to handle.
+
+ "restrict" may make more sense as the long term default for these[12].
+ Also, supporting "restrict" for these commands might be a fair amount
+ of work to implement, meaning it might be implemented over multiple
+ releases. If that behavior were the default in the commands that
+ supported it, that would force behavior B users to need to learn to
+ slowly add additional flags to their commands, depending on git
+ version, to get the behavior they want. That gradual switchover would
+ be painful, so we should avoid it at least until it's fully
+ implemented.
+
+
+=== Sparse specification vs. sparsity patterns ===
+
+In a well-behaved situation, the sparse specification is given directly
+by the $GIT_DIR/info/sparse-checkout file. However, it can transiently
+diverge for a few reasons:
+
+ * needing to resolve conflicts (merging will vivify conflicted files)
+ * running Git commands that implicitly vivify files (e.g. "git stash apply")
+ * running Git commands that explicitly vivify files (e.g. "git checkout
+ --ignore-skip-worktree-bits FILENAME")
+ * other commands that write to these files (perhaps a user copies it
+ from elsewhere)
+
+For the last item, note that we do automatically clear the SKIP_WORKTREE
+bit for files that are present in the working tree. This has been true
+since 82386b4496 ("Merge branch 'en/present-despite-skipped'",
+2022-03-09)
+
+However, such a situation is transient because:
+
+ * Such transient differences can and will be automatically removed as
+ a side-effect of commands which call unpack_trees() (checkout,
+ merge, reset, etc.).
+ * Users can also request such transient differences be corrected via
+ running `git sparse-checkout reapply`. Various places recommend
+ running that command.
+ * Additional commands are also welcome to implicitly fix these
+ differences; we may add more in the future.
+
+While we avoid dropping unstaged changes or files which have conflicts,
+we otherwise aggressively try to fix these transient differences. If
+users want these differences to persist, they should run the `set` or
+`add` subcommands of `git sparse-checkout` to reflect their intended
+sparse specification.
+
+However, when we need to do a query on history restricted to the
+"relevant subset of files" such a transiently expanded sparse
+specification is ignored. There are a couple reasons for this:
+
+ * The behavior wanted when doing something like
+ git grep expression REVISION
+ is roughly what the users would expect from
+ git checkout REVISION && git grep expression
+ (modulo a "REVISION:" prefix), which has a couple ramifications:
+
+ * REVISION may have paths not in the current index, so there is no
+ path we can consult for a SKIP_WORKTREE setting for those paths.
+
+ * Since `checkout` is one of those commands that tries to remove
+ transient differences in the sparse specification, it makes sense
+ to use the corrected sparse specification
+ (i.e. $GIT_DIR/info/sparse-checkout) rather than attempting to
+ consult SKIP_WORKTREE anyway.
+
+So, a transiently expanded (or restricted) sparse specification applies to
+the working tree, but not to history queries where we always use the
+sparsity patterns. (See [16] for an early discussion of this.)
+
+Similar to a transiently expanded sparse specification of the working tree
+based on additional files being present in the working tree, we also need
+to consider additional files being modified in the index. In particular,
+if the user has staged changes to files (relative to HEAD) that do not
+match the sparsity patterns, and the file is not present in the working
+tree, we still want to consider the file part of the sparse specification
+if we are specifically performing a query related to the index (e.g. git
+diff --cached [REVISION], git diff-index [REVISION], git restore --staged
+--source=REVISION -- PATHS, etc.) Note that a transiently expanded sparse
+specification for the index usually only matters under behavior A, since
+under behavior B index operations are lumped with history and tend to
+operate full-tree.
+
+
+=== Implementation Questions ===
+
+ * Do the options --scope={sparse,all} sound good to others? Are there better
+ options?
+ * Names in use, or appearing in patches, or previously suggested:
+ * --sparse/--dense
+ * --ignore-skip-worktree-bits
+ * --ignore-skip-worktree-entries
+ * --ignore-sparsity
+ * --[no-]restrict-to-sparse-paths
+ * --full-tree/--sparse-tree
+ * --[no-]restrict
+ * --scope={sparse,all}
+ * --focus/--unfocus
+ * --limit/--unlimited
+ * Rationale making me lean slightly towards --scope={sparse,all}:
+ * We want a name that works for many commands, so we need a name that
+ does not conflict
+ * We know that we have more than two possible usecases, so it is best
+ to avoid a flag that appears to be binary.
+ * --scope={sparse,all} isn't overly long and seems relatively
+ explanatory
+ * `--sparse`, as used in add/rm/mv, is totally backwards for
+ grep/log/etc. Changing the meaning of `--sparse` for these
+ commands would fix the backwardness, but possibly break existing
+ scripts. Using a new name pairing would allow us to treat
+ `--sparse` in these commands as a deprecated alias.
+ * There is a different `--sparse`/`--dense` pair for commands using
+ revision machinery, so using that naming might cause confusion
+ * There is also a `--sparse` in both pack-objects and show-branch, which
+ don't conflict but do suggest that `--sparse` is overloaded
+ * The name --ignore-skip-worktree-bits is a double negative, is
+ quite a mouthful, refers to an implementation detail that many
+ users may not be familiar with, and we'd need a negation for it
+ which would probably be even more ridiculously long. (But we
+ can make --ignore-skip-worktree-bits a deprecated alias for
+ --no-restrict.)
+
+ * If a config option is added (sparse.scope?) what should the values and
+ description be? "sparse" (behavior A), "worktree-sparse-history-dense"
+ (behavior B), "dense" (behavior C)? There's a risk of confusion,
+ because even for Behaviors A and B we want some commands to be
+ full-tree and others to operate sparsely, so the wording may need to be
+ more tied to the usecases and somehow explain that. Also, right now,
+ the primary difference we are focusing is just the history-querying
+ commands (log/diff/grep). Previous config suggestion here: [13]
+
+ * Is `--no-expand` a good alias for ls-files's `--sparse` option?
+ (`--sparse` does not map to either `--scope=sparse` or `--scope=all`,
+ because in non-cone mode it does nothing and in cone-mode it shows the
+ sparse directory entries which are technically outside the sparse
+ specification)
+
+ * Under Behavior A:
+ * Does ls-files' `--no-expand` override the default `--scope=all`, or
+ does it need an extra flag?
+ * Does ls-files' `-t` option imply `--scope=all`?
+ * Does update-index's `--[no-]skip-worktree` option imply `--scope=all`?
+
+ * sparse-checkout: once behavior A is fully implemented, should we take
+ an interim measure to ease people into switching the default? Namely,
+ if folks are not already in a sparse checkout, then require
+ `sparse-checkout init/set` to take a
+ `--set-scope=(sparse|worktree-sparse-history-dense|dense)` flag (which
+ would set sparse.scope according to the setting given), and throw an
+ error if the flag is not provided? That error would be a great place
+ to warn folks that the default may change in the future, and get them
+ used to specifying what they want so that the eventual default switch
+ is seamless for them.
+
+
+=== Implementation Goals/Plans ===
+
+ * Get buy-in on this document in general.
+
+ * Figure out answers to the 'Implementation Questions' sections (above)
+
+ * Fix bugs in the 'Known bugs' section (below)
+
+ * Provide some kind of method for backfilling the blobs within the sparse
+ specification in a partial clone
+
+ [Below here is kind of spitballing since the first two haven't been resolved]
+
+ * update-index: flip the default to --no-ignore-skip-worktree-entries,
+ nuke this stupid "Oh, there's a bug? Let me add a flag to let users
+ request that they not trigger this bug." flag
+
+ * Flags & Config
+ * Make `--sparse` in add/rm/mv a deprecated alias for `--scope=all`
+ * Make `--ignore-skip-worktree-bits` in checkout-index/checkout/restore
+ a deprecated aliases for `--scope=all`
+ * Create config option (sparse.scope?), tie it to the "Cliff notes"
+ overview
+
+ * Add --scope=sparse (and --scope=all) flag to each of the history querying
+ commands. IMPORTANT: make sure diff machinery changes don't mess with
+ format-patch, fast-export, etc.
+
+=== Known bugs ===
+
+This list used to be a lot longer (see e.g. [1,2,3,4,5,6,7,8,9]), but we've
+been working on it.
+
+0. Behavior A is not well supported in Git. (Behavior B didn't used to
+ be either, but was the easier of the two to implement.)
+
+1. am and apply:
+
+ apply, without `--index` or `--cached`, relies on files being present
+ in the working copy, and also writes to them unconditionally. As
+ such, it should first check for the files' presence, and if found to
+ be SKIP_WORKTREE, then clear the bit and vivify the paths, then do
+ its work. Currently, it just throws an error.
+
+ apply, with either `--cached` or `--index`, will not preserve the
+ SKIP_WORKTREE bit. This is fine if the file has conflicts, but
+ otherwise SKIP_WORKTREE bits should be preserved for --cached and
+ probably also for --index.
+
+ am, if there are no conflicts, will vivify files and fail to preserve
+ the SKIP_WORKTREE bit. If there are conflicts and `-3` is not
+ specified, it will vivify files and then complain the patch doesn't
+ apply. If there are conflicts and `-3` is specified, it will vivify
+ files and then complain that those vivified files would be
+ overwritten by merge.
+
+2. reset --hard:
+
+ reset --hard provides confusing error message (works correctly, but
+ misleads the user into believing it didn't):
+
+ $ touch addme
+ $ git add addme
+ $ git ls-files -t
+ H addme
+ H tracked
+ S tracked-but-maybe-skipped
+ $ git reset --hard # usually works great
+ error: Path 'addme' not uptodate; will not remove from working tree.
+ HEAD is now at bdbbb6f third
+ $ git ls-files -t
+ H tracked
+ S tracked-but-maybe-skipped
+ $ ls -1
+ tracked
+
+ `git reset --hard` DID remove addme from the index and the working tree, contrary
+ to the error message, but in line with how reset --hard should behave.
+
+3. read-tree
+
+ `read-tree` doesn't apply the 'SKIP_WORKTREE' bit to *any* of the
+ entries it reads into the index, resulting in all your files suddenly
+ appearing to be "deleted".
+
+4. Checkout, restore:
+
+ These command do not handle path & revision arguments appropriately:
+
+ $ ls
+ tracked
+ $ git ls-files -t
+ H tracked
+ S tracked-but-maybe-skipped
+ $ git status --porcelain
+ $ git checkout -- '*skipped'
+ error: pathspec '*skipped' did not match any file(s) known to git
+ $ git ls-files -- '*skipped'
+ tracked-but-maybe-skipped
+ $ git checkout HEAD -- '*skipped'
+ error: pathspec '*skipped' did not match any file(s) known to git
+ $ git ls-tree HEAD | grep skipped
+ 100644 blob 276f5a64354b791b13840f02047738c77ad0584f tracked-but-maybe-skipped
+ $ git status --porcelain
+ $ git checkout HEAD~1 -- '*skipped'
+ $ git ls-files -t
+ H tracked
+ H tracked-but-maybe-skipped
+ $ git status --porcelain
+ M tracked-but-maybe-skipped
+ $ git checkout HEAD -- '*skipped'
+ $ git status --porcelain
+ $
+
+ Note that checkout without a revision (or restore --staged) fails to
+ find a file to restore from the index, even though ls-files shows
+ such a file certainly exists.
+
+ Similar issues occur with HEAD (--source=HEAD in restore's case),
+ but suddenly works when HEAD~1 is specified. And then after that it
+ will work with HEAD specified, even though it didn't before.
+
+ Directories are also an issue:
+
+ $ git sparse-checkout set nomatches
+ $ git status
+ On branch main
+ You are in a sparse checkout with 0% of tracked files present.
+
+ nothing to commit, working tree clean
+ $ git checkout .
+ error: pathspec '.' did not match any file(s) known to git
+ $ git checkout HEAD~1 .
+ Updated 1 path from 58916d9
+ $ git ls-files -t
+ S tracked
+ H tracked-but-maybe-skipped
+
+5. checkout and restore --staged, continued:
+
+ These commands do not correctly scope operations to the sparse
+ specification, and make it worse by not setting important SKIP_WORKTREE
+ bits:
+
+ $ git restore --source OLDREV --staged outside-sparse-cone/
+ $ git status --porcelain
+ MD outside-sparse-cone/file1
+ MD outside-sparse-cone/file2
+ MD outside-sparse-cone/file3
+
+ We can add a --scope=all mode to `git restore` to let it operate outside
+ the sparse specification, but then it will be important to set the
+ SKIP_WORKTREE bits appropriately.
+
+6. Performance issues; see:
+ https://lore.kernel.org/git/CABPp-BEkJQoKZsQGCYioyga_uoDQ6iBeW+FKr8JhyuuTMK1RDw@mail.gmail.com/
+
+
+=== Reference Emails ===
+
+Emails that detail various bugs we've had in sparse-checkout:
+
+[1] (Original descriptions of behavior A & behavior B)
+ https://lore.kernel.org/git/CABPp-BGJ_Nvi5TmgriD9Bh6eNXE2EDq2f8e8QKXAeYG3BxZafA@mail.gmail.com/
+[2] (Fix stash applications in sparse checkouts; bugs from behavioral differences)
+ https://lore.kernel.org/git/ccfedc7140dbf63ba26a15f93bd3885180b26517.1606861519.git.gitgitgadget@gmail.com/
+[3] (Present-despite-skipped entries)
+ https://lore.kernel.org/git/11d46a399d26c913787b704d2b7169cafc28d639.1642175983.git.gitgitgadget@gmail.com/
+[4] (Clone --no-checkout interaction)
+ https://lore.kernel.org/git/pull.801.v2.git.git.1591324899170.gitgitgadget@gmail.com/ (clone --no-checkout)
+[5] (The need for update_sparsity() and avoiding `read-tree -mu HEAD`)
+ https://lore.kernel.org/git/3a1f084641eb47515b5a41ed4409a36128913309.1585270142.git.gitgitgadget@gmail.com/
+[6] (SKIP_WORKTREE is advisory, not mandatory)
+ https://lore.kernel.org/git/844306c3e86ef67591cc086decb2b760e7d710a3.1585270142.git.gitgitgadget@gmail.com/
+[7] (`worktree add` should copy sparsity settings from current worktree)
+ https://lore.kernel.org/git/c51cb3714e7b1d2f8c9370fe87eca9984ff4859f.1644269584.git.gitgitgadget@gmail.com/
+[8] (Avoid negative surprises in add, rm, and mv)
+ https://lore.kernel.org/git/cover.1617914011.git.matheus.bernardino@usp.br/
+ https://lore.kernel.org/git/pull.1018.v4.git.1632497954.gitgitgadget@gmail.com/
+[9] (Move from out-of-cone to in-cone)
+ https://lore.kernel.org/git/20220630023737.473690-6-shaoxuan.yuan02@gmail.com/
+ https://lore.kernel.org/git/20220630023737.473690-4-shaoxuan.yuan02@gmail.com/
+[10] (Unnecessarily downloading objects outside sparse specification)
+ https://lore.kernel.org/git/CAOLTT8QfwOi9yx_qZZgyGa8iL8kHWutEED7ok_jxwTcYT_hf9Q@mail.gmail.com/
+
+[11] (Stolee's comments on high-level usecases)
+ https://lore.kernel.org/git/1a1e33f6-3514-9afc-0a28-5a6b85bd8014@gmail.com/
+
+[12] Others commenting on eventually switching default to behavior A:
+ * https://lore.kernel.org/git/xmqqh719pcoo.fsf@gitster.g/
+ * https://lore.kernel.org/git/xmqqzgeqw0sy.fsf@gitster.g/
+ * https://lore.kernel.org/git/a86af661-cf58-a4e5-0214-a67d3a794d7e@github.com/
+
+[13] Previous config name suggestion and description
+ * https://lore.kernel.org/git/CABPp-BE6zW0nJSStcVU=_DoDBnPgLqOR8pkTXK3dW11=T01OhA@mail.gmail.com/
+
+[14] Tangential issue: switch to cone mode as default sparse specification mechanism:
+ https://lore.kernel.org/git/a1b68fd6126eb341ef3637bb93fedad4309b36d0.1650594746.git.gitgitgadget@gmail.com/
+
+[15] Lengthy email on grep behavior, covering what should be searched:
+ * https://lore.kernel.org/git/CABPp-BGVO3QdbfE84uF_3QDF0-y2iHHh6G5FAFzNRfeRitkuHw@mail.gmail.com/
+
+[16] Email explaining sparsity patterns vs. SKIP_WORKTREE and history operations,
+ search for the parenthetical comment starting "We do not check".
+ https://lore.kernel.org/git/CABPp-BFsCPPNOZ92JQRJeGyNd0e-TCW-LcLyr0i_+VSQJP+GCg@mail.gmail.com/
+
+[17] https://lore.kernel.org/git/20220207190320.2960362-1-jonathantanmy@google.com/
diff --git a/INSTALL b/INSTALL
index 89b15d71df..3344788397 100644
--- a/INSTALL
+++ b/INSTALL
@@ -133,10 +133,6 @@ Issues of note:
you are using libcurl older than 7.34.0. Otherwise you can use
NO_OPENSSL without losing git-imap-send.
- By default, git uses OpenSSL for SHA1 but it will use its own
- library (inspired by Mozilla's) with either NO_OPENSSL or
- BLK_SHA1.
-
- "libcurl" library is used for fetching and pushing
repositories over http:// or https://, as well as by
git-imap-send if the curl version is >= 7.34.0. If you do
diff --git a/Makefile b/Makefile
index 4927379184..54f3bdc6a3 100644
--- a/Makefile
+++ b/Makefile
@@ -4,8 +4,20 @@ all::
# Import tree-wide shared Makefile behavior and libraries
include shared.mak
+# == Makefile defines ==
+#
+# These defines change the behavior of the Makefile itself, but have
+# no impact on what it builds:
+#
# Define V=1 to have a more verbose compile.
#
+# == Portability and optional library defines ==
+#
+# These defines indicate what Git can expect from the OS, what
+# libraries are available etc. Much of this is auto-detected in
+# config.mak.uname, or in configure.ac when using the optional "make
+# configure && ./configure" (see INSTALL).
+#
# Define SHELL_PATH to a POSIX shell if your /bin/sh is broken.
#
# Define SANE_TOOL_PATH to a colon-separated list of paths to prepend
@@ -30,68 +42,8 @@ include shared.mak
#
# Define NO_OPENSSL environment variable if you do not have OpenSSL.
#
-# Define USE_LIBPCRE if you have and want to use libpcre. Various
-# commands such as log and grep offer runtime options to use
-# Perl-compatible regular expressions instead of standard or extended
-# POSIX regular expressions.
-#
-# Only libpcre version 2 is supported. USE_LIBPCRE2 is a synonym for
-# USE_LIBPCRE, support for the old USE_LIBPCRE1 has been removed.
-#
-# Define LIBPCREDIR=/foo/bar if your PCRE header and library files are
-# in /foo/bar/include and /foo/bar/lib directories.
-#
# Define HAVE_ALLOCA_H if you have working alloca(3) defined in that header.
#
-# Define NO_CURL if you do not have libcurl installed. git-http-fetch and
-# git-http-push are not built, and you cannot use http:// and https://
-# transports (neither smart nor dumb).
-#
-# Define CURLDIR=/foo/bar if your curl header and library files are in
-# /foo/bar/include and /foo/bar/lib directories.
-#
-# Define CURL_CONFIG to curl's configuration program that prints information
-# about the library (e.g., its version number). The default is 'curl-config'.
-#
-# Define CURL_LDFLAGS to specify flags that you need to link when using libcurl,
-# if you do not want to rely on the libraries provided by CURL_CONFIG. The
-# default value is a result of `curl-config --libs`. An example value for
-# CURL_LDFLAGS is as follows:
-#
-# CURL_LDFLAGS=-lcurl
-#
-# Define NO_EXPAT if you do not have expat installed. git-http-push is
-# not built, and you cannot push using http:// and https:// transports (dumb).
-#
-# Define EXPATDIR=/foo/bar if your expat header and library files are in
-# /foo/bar/include and /foo/bar/lib directories.
-#
-# Define EXPAT_NEEDS_XMLPARSE_H if you have an old version of expat (e.g.,
-# 1.1 or 1.2) that provides xmlparse.h instead of expat.h.
-#
-# Define NO_GETTEXT if you don't want Git output to be translated.
-# A translated Git requires GNU libintl or another gettext implementation,
-# plus libintl-perl at runtime.
-#
-# Define USE_GETTEXT_SCHEME and set it to 'fallthrough', if you don't trust
-# the installed gettext translation of the shell scripts output.
-#
-# Define HAVE_LIBCHARSET_H if you haven't set NO_GETTEXT and you can't
-# trust the langinfo.h's nl_langinfo(CODESET) function to return the
-# current character set. GNU and Solaris have a nl_langinfo(CODESET),
-# FreeBSD can use either, but MinGW and some others need to use
-# libcharset.h's locale_charset() instead.
-#
-# Define CHARSET_LIB to the library you need to link with in order to
-# use locale_charset() function. On some platforms this needs to set to
-# -lcharset, on others to -liconv .
-#
-# Define LIBC_CONTAINS_LIBINTL if your gettext implementation doesn't
-# need -lintl when linking.
-#
-# Define NO_MSGFMT_EXTENDED_OPTIONS if your implementation of msgfmt
-# doesn't support GNU extensions like --check and --statistics
-#
# Define HAVE_PATHS_H if you have paths.h and want to use the default PATH
# it specifies.
#
@@ -152,39 +104,6 @@ include shared.mak
# and do not want to use Apple's CommonCrypto library. This allows you
# to provide your own OpenSSL library, for example from MacPorts.
#
-# Define BLK_SHA1 environment variable to make use of the bundled
-# optimized C SHA1 routine.
-#
-# Define DC_SHA1 to unconditionally enable the collision-detecting sha1
-# algorithm. This is slower, but may detect attempted collision attacks.
-# Takes priority over other *_SHA1 knobs.
-#
-# Define DC_SHA1_EXTERNAL in addition to DC_SHA1 if you want to build / link
-# git with the external SHA1 collision-detect library.
-# Without this option, i.e. the default behavior is to build git with its
-# own built-in code (or submodule).
-#
-# Define DC_SHA1_SUBMODULE in addition to DC_SHA1 to use the
-# sha1collisiondetection shipped as a submodule instead of the
-# non-submodule copy in sha1dc/. This is an experimental option used
-# by the git project to migrate to using sha1collisiondetection as a
-# submodule.
-#
-# Define OPENSSL_SHA1 environment variable when running make to link
-# with the SHA1 routine from openssl library.
-#
-# Define SHA1_MAX_BLOCK_SIZE to limit the amount of data that will be hashed
-# in one call to the platform's SHA1_Update(). e.g. APPLE_COMMON_CRYPTO
-# wants 'SHA1_MAX_BLOCK_SIZE=1024L*1024L*1024L' defined.
-#
-# Define BLK_SHA256 to use the built-in SHA-256 routines.
-#
-# Define NETTLE_SHA256 to use the SHA-256 routines in libnettle.
-#
-# Define GCRYPT_SHA256 to use the SHA-256 routines in libgcrypt.
-#
-# Define OPENSSL_SHA256 to use the SHA-256 routines in OpenSSL.
-#
# Define NEEDS_CRYPTO_WITH_SSL if you need -lcrypto when using -lssl (Darwin).
#
# Define NEEDS_SSL_WITH_CRYPTO if you need -lssl when using -lcrypto (Darwin).
@@ -490,6 +409,151 @@ include shared.mak
# to the "<name>" of the corresponding `compat/fsmonitor/fsm-settings-<name>.c`
# that implements the `fsm_os_settings__*()` routines.
#
+# === Optional library: libintl ===
+#
+# Define NO_GETTEXT if you don't want Git output to be translated.
+# A translated Git requires GNU libintl or another gettext implementation,
+# plus libintl-perl at runtime.
+#
+# Define USE_GETTEXT_SCHEME and set it to 'fallthrough', if you don't trust
+# the installed gettext translation of the shell scripts output.
+#
+# Define HAVE_LIBCHARSET_H if you haven't set NO_GETTEXT and you can't
+# trust the langinfo.h's nl_langinfo(CODESET) function to return the
+# current character set. GNU and Solaris have a nl_langinfo(CODESET),
+# FreeBSD can use either, but MinGW and some others need to use
+# libcharset.h's locale_charset() instead.
+#
+# Define CHARSET_LIB to the library you need to link with in order to
+# use locale_charset() function. On some platforms this needs to set to
+# -lcharset, on others to -liconv .
+#
+# Define LIBC_CONTAINS_LIBINTL if your gettext implementation doesn't
+# need -lintl when linking.
+#
+# Define NO_MSGFMT_EXTENDED_OPTIONS if your implementation of msgfmt
+# doesn't support GNU extensions like --check and --statistics
+#
+# === Optional library: libexpat ===
+#
+# Define NO_EXPAT if you do not have expat installed. git-http-push is
+# not built, and you cannot push using http:// and https:// transports (dumb).
+#
+# Define EXPATDIR=/foo/bar if your expat header and library files are in
+# /foo/bar/include and /foo/bar/lib directories.
+#
+# Define EXPAT_NEEDS_XMLPARSE_H if you have an old version of expat (e.g.,
+# 1.1 or 1.2) that provides xmlparse.h instead of expat.h.
+
+# === Optional library: libcurl ===
+#
+# Define NO_CURL if you do not have libcurl installed. git-http-fetch and
+# git-http-push are not built, and you cannot use http:// and https://
+# transports (neither smart nor dumb).
+#
+# Define CURLDIR=/foo/bar if your curl header and library files are in
+# /foo/bar/include and /foo/bar/lib directories.
+#
+# Define CURL_CONFIG to curl's configuration program that prints information
+# about the library (e.g., its version number). The default is 'curl-config'.
+#
+# Define CURL_LDFLAGS to specify flags that you need to link when using libcurl,
+# if you do not want to rely on the libraries provided by CURL_CONFIG. The
+# default value is a result of `curl-config --libs`. An example value for
+# CURL_LDFLAGS is as follows:
+#
+# CURL_LDFLAGS=-lcurl
+#
+# === Optional library: libpcre2 ===
+#
+# Define USE_LIBPCRE if you have and want to use libpcre. Various
+# commands such as log and grep offer runtime options to use
+# Perl-compatible regular expressions instead of standard or extended
+# POSIX regular expressions.
+#
+# Only libpcre version 2 is supported. USE_LIBPCRE2 is a synonym for
+# USE_LIBPCRE, support for the old USE_LIBPCRE1 has been removed.
+#
+# Define LIBPCREDIR=/foo/bar if your PCRE header and library files are
+# in /foo/bar/include and /foo/bar/lib directories.
+#
+# == SHA-1 and SHA-256 defines ==
+#
+# === SHA-1 backend ===
+#
+# ==== Security ====
+#
+# Due to the SHAttered (https://shattered.io) attack vector on SHA-1
+# it's strongly recommended to use the sha1collisiondetection
+# counter-cryptanalysis library for SHA-1 hashing.
+#
+# If you know that you can trust the repository contents, or where
+# potential SHA-1 attacks are otherwise mitigated the other backends
+# listed in "SHA-1 implementations" are faster than
+# sha1collisiondetection.
+#
+# ==== Default SHA-1 backend ====
+#
+# If no *_SHA1 backend is picked, the first supported one listed in
+# "SHA-1 implementations" will be picked.
+#
+# ==== Options common to all SHA-1 implementations ====
+#
+# Define SHA1_MAX_BLOCK_SIZE to limit the amount of data that will be hashed
+# in one call to the platform's SHA1_Update(). e.g. APPLE_COMMON_CRYPTO
+# wants 'SHA1_MAX_BLOCK_SIZE=1024L*1024L*1024L' defined.
+#
+# ==== SHA-1 implementations ====
+#
+# Define OPENSSL_SHA1 to link to the SHA-1 routines from the OpenSSL
+# library.
+#
+# Define BLK_SHA1 to make use of optimized C SHA-1 routines bundled
+# with git (in the block-sha1/ directory).
+#
+# Define NO_APPLE_COMMON_CRYPTO on OSX to opt-out of using the
+# "APPLE_COMMON_CRYPTO" backend for SHA-1, which is currently the
+# default on that OS. On macOS 01.4 (Tiger) or older,
+# NO_APPLE_COMMON_CRYPTO is defined by default.
+#
+# If don't enable any of the *_SHA1 settings in this section, Git will
+# default to its built-in sha1collisiondetection library, which is a
+# collision-detecting sha1 This is slower, but may detect attempted
+# collision attacks.
+#
+# ==== Options for the sha1collisiondetection library ====
+#
+# Define DC_SHA1_EXTERNAL if you want to build / link
+# git with the external SHA1 collision-detect library.
+# Without this option, i.e. the default behavior is to build git with its
+# own built-in code (or submodule).
+#
+# Define DC_SHA1_SUBMODULE to use the
+# sha1collisiondetection shipped as a submodule instead of the
+# non-submodule copy in sha1dc/. This is an experimental option used
+# by the git project to migrate to using sha1collisiondetection as a
+# submodule.
+#
+# === SHA-256 backend ===
+#
+# ==== Security ====
+#
+# Unlike SHA-1 the SHA-256 algorithm does not suffer from any known
+# vulnerabilities, so any implementation will do.
+#
+# ==== SHA-256 implementations ====
+#
+# Define OPENSSL_SHA256 to use the SHA-256 routines in OpenSSL.
+#
+# Define NETTLE_SHA256 to use the SHA-256 routines in libnettle.
+#
+# Define GCRYPT_SHA256 to use the SHA-256 routines in libgcrypt.
+#
+# If don't enable any of the *_SHA256 settings in this section, Git
+# will default to its built-in sha256 implementation.
+#
+# == DEVELOPER defines ==
+#
# Define DEVELOPER to enable more compiler warnings. Compiler version
# and family are auto detected, but could be overridden by defining
# COMPILER_FEATURES (see config.mak.dev). You can still set
@@ -627,7 +691,6 @@ THIRD_PARTY_SOURCES =
# interactive shell sessions without exporting it.
unexport CDPATH
-SCRIPT_SH += git-bisect.sh
SCRIPT_SH += git-difftool--helper.sh
SCRIPT_SH += git-filter-branch.sh
SCRIPT_SH += git-merge-octopus.sh
@@ -723,6 +786,7 @@ TEST_BUILTINS_OBJS += test-advise.o
TEST_BUILTINS_OBJS += test-bitmap.o
TEST_BUILTINS_OBJS += test-bloom.o
TEST_BUILTINS_OBJS += test-bundle-uri.o
+TEST_BUILTINS_OBJS += test-cache-tree.o
TEST_BUILTINS_OBJS += test-chmtime.o
TEST_BUILTINS_OBJS += test-config.o
TEST_BUILTINS_OBJS += test-crontab.o
@@ -1057,6 +1121,8 @@ LIB_OBJS += refs/debug.o
LIB_OBJS += refs/files-backend.o
LIB_OBJS += refs/iterator.o
LIB_OBJS += refs/packed-backend.o
+LIB_OBJS += refs/packed-format-v1.o
+LIB_OBJS += refs/packed-format-v2.o
LIB_OBJS += refs/ref-cache.o
LIB_OBJS += refspec.o
LIB_OBJS += remote.o
@@ -1137,7 +1203,7 @@ BUILTIN_OBJS += builtin/am.o
BUILTIN_OBJS += builtin/annotate.o
BUILTIN_OBJS += builtin/apply.o
BUILTIN_OBJS += builtin/archive.o
-BUILTIN_OBJS += builtin/bisect--helper.o
+BUILTIN_OBJS += builtin/bisect.o
BUILTIN_OBJS += builtin/blame.o
BUILTIN_OBJS += builtin/branch.o
BUILTIN_OBJS += builtin/bugreport.o
@@ -1302,11 +1368,53 @@ SP_EXTRA_FLAGS = -Wno-universal-initializer
SANITIZE_LEAK =
SANITIZE_ADDRESS =
-# For the 'coccicheck' target; setting SPATCH_BATCH_SIZE higher will
-# usually result in less CPU usage at the cost of higher peak memory.
-# Setting it to 0 will feed all files in a single spatch invocation.
-SPATCH_FLAGS = --all-includes
-SPATCH_BATCH_SIZE = 1
+# For the 'coccicheck' target
+SPATCH_INCLUDE_FLAGS = --all-includes
+SPATCH_FLAGS =
+SPATCH_TEST_FLAGS =
+
+# If *.o files are present, have "coccicheck" depend on them, with
+# COMPUTE_HEADER_DEPENDENCIES this will speed up the common-case of
+# only needing to re-generate coccicheck results for the users of a
+# given API if it's changed, and not all files in the project. If
+# COMPUTE_HEADER_DEPENDENCIES=no this will be unset too.
+SPATCH_USE_O_DEPENDENCIES = YesPlease
+
+# Set SPATCH_CONCAT_COCCI to concatenate the contrib/cocci/*.cocci
+# files into a single contrib/cocci/ALL.cocci before running
+# "coccicheck".
+#
+# Pros:
+#
+# - Speeds up a one-shot run of "make coccicheck", as we won't have to
+# parse *.[ch] files N times for the N *.cocci rules
+#
+# Cons:
+#
+# - Will make incremental development of *.cocci slower, as
+# e.g. changing strbuf.cocci will re-run all *.cocci.
+#
+# - Makes error and performance analysis harder, as rules will be
+# applied from a monolithic ALL.cocci, rather than
+# e.g. strbuf.cocci. To work around this either undefine this, or
+# generate a specific patch, e.g. this will always use strbuf.cocci,
+# not ALL.cocci:
+#
+# make contrib/coccinelle/strbuf.cocci.patch
+SPATCH_CONCAT_COCCI = YesPlease
+
+# Rebuild 'coccicheck' if $(SPATCH), its flags etc. change
+TRACK_SPATCH_DEFINES =
+TRACK_SPATCH_DEFINES += $(SPATCH)
+TRACK_SPATCH_DEFINES += $(SPATCH_INCLUDE_FLAGS)
+TRACK_SPATCH_DEFINES += $(SPATCH_FLAGS)
+TRACK_SPATCH_DEFINES += $(SPATCH_TEST_FLAGS)
+GIT-SPATCH-DEFINES: FORCE
+ @FLAGS='$(TRACK_SPATCH_DEFINES)'; \
+ if test x"$$FLAGS" != x"`cat GIT-SPATCH-DEFINES 2>/dev/null`" ; then \
+ echo >&2 " * new spatch flags"; \
+ echo "$$FLAGS" >GIT-SPATCH-DEFINES; \
+ fi
include config.mak.uname
-include config.mak.autogen
@@ -1826,7 +1934,6 @@ ifdef APPLE_COMMON_CRYPTO
COMPAT_CFLAGS += -DCOMMON_DIGEST_FOR_OPENSSL
BASIC_CFLAGS += -DSHA1_APPLE
else
- DC_SHA1 := YesPlease
BASIC_CFLAGS += -DSHA1_DC
LIB_OBJS += sha1dc_git.o
ifdef DC_SHA1_EXTERNAL
@@ -2989,7 +3096,6 @@ GIT-BUILD-OPTIONS: FORCE
@echo NO_REGEX=\''$(subst ','\'',$(subst ','\'',$(NO_REGEX)))'\' >>$@+
@echo NO_UNIX_SOCKETS=\''$(subst ','\'',$(subst ','\'',$(NO_UNIX_SOCKETS)))'\' >>$@+
@echo PAGER_ENV=\''$(subst ','\'',$(subst ','\'',$(PAGER_ENV)))'\' >>$@+
- @echo DC_SHA1=\''$(subst ','\'',$(subst ','\'',$(DC_SHA1)))'\' >>$@+
@echo SANITIZE_LEAK=\''$(subst ','\'',$(subst ','\'',$(SANITIZE_LEAK)))'\' >>$@+
@echo SANITIZE_ADDRESS=\''$(subst ','\'',$(subst ','\'',$(SANITIZE_ADDRESS)))'\' >>$@+
@echo X=\'$(X)\' >>$@+
@@ -3045,7 +3151,6 @@ else
@echo RUNTIME_PREFIX=\'false\' >>$@+
endif
@if cmp $@+ $@ >/dev/null 2>&1; then $(RM) $@+; else mv $@+ $@; fi
- @if test -f GIT-BUILD-DIR; then rm GIT-BUILD-DIR; fi
### Detect Python interpreter path changes
ifndef NO_PYTHON
@@ -3144,35 +3249,113 @@ check: $(GENERATED_H)
exit 1; \
fi
+COCCI_GEN_ALL = .build/contrib/coccinelle/ALL.cocci
+COCCI_GLOB = $(wildcard contrib/coccinelle/*.cocci)
+COCCI_RULES_TRACKED = $(COCCI_GLOB:%=.build/%)
+COCCI_RULES_TRACKED_NO_PENDING = $(filter-out %.pending.cocci,$(COCCI_RULES_TRACKED))
+COCCI_RULES =
+COCCI_RULES += $(COCCI_GEN_ALL)
+COCCI_RULES += $(COCCI_RULES_TRACKED)
+COCCI_NAMES =
+COCCI_NAMES += $(COCCI_RULES:.build/contrib/coccinelle/%.cocci=%)
+
+COCCICHECK_PENDING = $(filter %.pending.cocci,$(COCCI_RULES))
+COCCICHECK = $(filter-out $(COCCICHECK_PENDING),$(COCCI_RULES))
+
+COCCICHECK_PATCHES = $(COCCICHECK:%=%.patch)
+COCCICHECK_PATCHES_PENDING = $(COCCICHECK_PENDING:%=%.patch)
+
+COCCICHECK_PATCHES_INTREE = $(COCCICHECK_PATCHES:.build/%=%)
+COCCICHECK_PATCHES_PENDING_INTREE = $(COCCICHECK_PATCHES_PENDING:.build/%=%)
+
+# It's expensive to compute the many=many rules below, only eval them
+# on $(MAKECMDGOALS) that match these $(COCCI_RULES)
+COCCI_RULES_GLOB =
+COCCI_RULES_GLOB += cocci%
+COCCI_RULES_GLOB += .build/contrib/coccinelle/%
+COCCI_RULES_GLOB += $(COCCICHECK_PATCHES)
+COCCI_RULES_GLOB += $(COCCICHEC_PATCHES_PENDING)
+COCCI_RULES_GLOB += $(COCCICHECK_PATCHES_INTREE)
+COCCI_RULES_GLOB += $(COCCICHECK_PATCHES_PENDING_INTREE)
+COCCI_GOALS = $(filter $(COCCI_RULES_GLOB),$(MAKECMDGOALS))
+
COCCI_TEST_RES = $(wildcard contrib/coccinelle/tests/*.res)
-%.cocci.patch: %.cocci $(COCCI_SOURCES)
- $(QUIET_SPATCH) \
- if test $(SPATCH_BATCH_SIZE) = 0; then \
- limit=; \
- else \
- limit='-n $(SPATCH_BATCH_SIZE)'; \
- fi; \
- if ! echo $(COCCI_SOURCES) | xargs $$limit \
- $(SPATCH) $(SPATCH_FLAGS) \
- --sp-file $< --patch . \
- >$@+ 2>$@.log; \
+$(COCCI_RULES_TRACKED): .build/% : %
+ $(call mkdir_p_parent_template)
+ $(QUIET_CP)cp $< $@
+
+.build/contrib/coccinelle/FOUND_H_SOURCES: $(FOUND_H_SOURCES)
+ $(call mkdir_p_parent_template)
+ $(QUIET_GEN) >$@
+
+$(COCCI_GEN_ALL): $(COCCI_RULES_TRACKED_NO_PENDING)
+ $(call mkdir_p_parent_template)
+ $(QUIET_SPATCH_CAT)cat $^ >$@
+
+ifeq ($(COMPUTE_HEADER_DEPENDENCIES),no)
+SPATCH_USE_O_DEPENDENCIES =
+endif
+define cocci-rule
+
+## Rule for .build/$(1).patch/$(2); Params:
+# $(1) = e.g. ".build/contrib/coccinelle/free.cocci"
+# $(2) = e.g. "grep.c"
+# $(3) = e.g. "grep.o"
+COCCI_$(1:.build/contrib/coccinelle/%.cocci=%) += $(1).d/$(2).patch
+$(1).d/$(2).patch: GIT-SPATCH-DEFINES
+$(1).d/$(2).patch: $(if $(and $(SPATCH_USE_O_DEPENDENCIES),$(wildcard $(3))),$(3),.build/contrib/coccinelle/FOUND_H_SOURCES)
+$(1).d/$(2).patch: $(1)
+$(1).d/$(2).patch: $(1).d/%.patch : %
+ $$(call mkdir_p_parent_template)
+ $$(QUIET_SPATCH)if ! $$(SPATCH) $$(SPATCH_FLAGS) \
+ $$(SPATCH_INCLUDE_FLAGS) \
+ --sp-file $(1) --patch . $$< \
+ >$$@ 2>$$@.log; \
then \
- cat $@.log; \
+ echo "ERROR when applying '$(1)' to '$$<'; '$$@.log' follows:"; \
+ cat $$@.log; \
exit 1; \
- fi; \
- mv $@+ $@; \
- if test -s $@; \
+ fi
+endef
+
+define cocci-matrix
+
+$(foreach s,$(COCCI_SOURCES),$(call cocci-rule,$(c),$(s),$(s:%.c=%.o)))
+endef
+
+ifdef COCCI_GOALS
+$(eval $(foreach c,$(COCCI_RULES),$(call cocci-matrix,$(c))))
+endif
+
+define spatch-rule
+
+.build/contrib/coccinelle/$(1).cocci.patch: $$(COCCI_$(1))
+ $$(QUIET_SPATCH_CAT)cat $$^ >$$@ && \
+ if test -s $$@; \
then \
- echo ' ' SPATCH result: $@; \
+ echo ' ' SPATCH result: $$@; \
fi
+contrib/coccinelle/$(1).cocci.patch: .build/contrib/coccinelle/$(1).cocci.patch
+ $$(QUIET_CP)cp $$< $$@
+
+endef
+
+ifdef COCCI_GOALS
+$(eval $(foreach n,$(COCCI_NAMES),$(call spatch-rule,$(n))))
+endif
COCCI_TEST_RES_GEN = $(addprefix .build/,$(COCCI_TEST_RES))
+$(COCCI_TEST_RES_GEN): GIT-SPATCH-DEFINES
$(COCCI_TEST_RES_GEN): .build/%.res : %.c
$(COCCI_TEST_RES_GEN): .build/%.res : %.res
+ifdef SPATCH_CONCAT_COCCI
+$(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : $(COCCI_GEN_ALL)
+else
$(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : contrib/coccinelle/%.cocci
+endif
$(call mkdir_p_parent_template)
- $(QUIET_SPATCH_T)$(SPATCH) $(SPATCH_FLAGS) \
+ $(QUIET_SPATCH_TEST)$(SPATCH) $(SPATCH_TEST_FLAGS) \
--very-quiet --no-show-diff \
--sp-file $< -o $@ \
$(@:.build/%.res=%.c) && \
@@ -3183,11 +3366,15 @@ $(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : contrib/coccinell
coccicheck-test: $(COCCI_TEST_RES_GEN)
coccicheck: coccicheck-test
-coccicheck: $(addsuffix .patch,$(filter-out %.pending.cocci,$(wildcard contrib/coccinelle/*.cocci)))
+ifdef SPATCH_CONCAT_COCCI
+coccicheck: contrib/coccinelle/ALL.cocci.patch
+else
+coccicheck: $(COCCICHECK_PATCHES_INTREE)
+endif
# See contrib/coccinelle/README
coccicheck-pending: coccicheck-test
-coccicheck-pending: $(addsuffix .patch,$(wildcard contrib/coccinelle/*.pending.cocci))
+coccicheck-pending: $(COCCICHECK_PATCHES_PENDING_INTREE)
.PHONY: coccicheck coccicheck-pending
@@ -3454,8 +3641,9 @@ profile-clean:
$(RM) $(addsuffix *.gcno,$(addprefix $(PROFILE_DIR)/, $(object_dirs)))
cocciclean:
+ $(RM) GIT-SPATCH-DEFINES
$(RM) -r .build/contrib/coccinelle
- $(RM) contrib/coccinelle/*.cocci.patch*
+ $(RM) contrib/coccinelle/*.cocci.patch
clean: profile-clean coverage-clean cocciclean
$(RM) -r .build
diff --git a/builtin.h b/builtin.h
index 8901a34d6b..aa955466b4 100644
--- a/builtin.h
+++ b/builtin.h
@@ -116,7 +116,7 @@ int cmd_am(int argc, const char **argv, const char *prefix);
int cmd_annotate(int argc, const char **argv, const char *prefix);
int cmd_apply(int argc, const char **argv, const char *prefix);
int cmd_archive(int argc, const char **argv, const char *prefix);
-int cmd_bisect__helper(int argc, const char **argv, const char *prefix);
+int cmd_bisect(int argc, const char **argv, const char *prefix);
int cmd_blame(int argc, const char **argv, const char *prefix);
int cmd_branch(int argc, const char **argv, const char *prefix);
int cmd_bugreport(int argc, const char **argv, const char *prefix);
diff --git a/builtin/add.c b/builtin/add.c
index 626c71ec6a..e9a76c1049 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -694,6 +694,6 @@ finish:
die(_("Unable to write new index file"));
dir_clear(&dir);
- UNLEAK(pathspec);
+ clear_pathspec(&pathspec);
return exit_status;
}
diff --git a/builtin/bisect--helper.c b/builtin/bisect.c
index 1d2ce8a0e1..cc9483e851 100644
--- a/builtin/bisect--helper.c
+++ b/builtin/bisect.c
@@ -20,18 +20,40 @@ static GIT_PATH_FUNC(git_path_bisect_names, "BISECT_NAMES")
static GIT_PATH_FUNC(git_path_bisect_first_parent, "BISECT_FIRST_PARENT")
static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN")
-static const char * const git_bisect_helper_usage[] = {
- N_("git bisect--helper --bisect-reset [<commit>]"),
- "git bisect--helper --bisect-terms [--term-good | --term-old | --term-bad | --term-new]",
- N_("git bisect--helper --bisect-start [--term-{new,bad}=<term> --term-{old,good}=<term>]"
- " [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<paths>...]"),
- "git bisect--helper --bisect-next",
- N_("git bisect--helper --bisect-state (bad|new) [<rev>]"),
- N_("git bisect--helper --bisect-state (good|old) [<rev>...]"),
- N_("git bisect--helper --bisect-replay <filename>"),
- N_("git bisect--helper --bisect-skip [(<rev>|<range>)...]"),
- "git bisect--helper --bisect-visualize",
- N_("git bisect--helper --bisect-run <cmd>..."),
+#define BUILTIN_GIT_BISECT_START_USAGE \
+ N_("git bisect start [--term-{new,bad}=<term> --term-{old,good}=<term>]" \
+ " [--no-checkout] [--first-parent] [<bad> [<good>...]] [--]" \
+ " [<pathspec>...]")
+#define BUILTIN_GIT_BISECT_STATE_USAGE \
+ N_("git bisect (good|bad) [<rev>...]")
+#define BUILTIN_GIT_BISECT_TERMS_USAGE \
+ "git bisect terms [--term-good | --term-bad]"
+#define BUILTIN_GIT_BISECT_SKIP_USAGE \
+ N_("git bisect skip [(<rev>|<range>)...]")
+#define BUILTIN_GIT_BISECT_NEXT_USAGE \
+ "git bisect next"
+#define BUILTIN_GIT_BISECT_RESET_USAGE \
+ N_("git bisect reset [<commit>]")
+#define BUILTIN_GIT_BISECT_VISUALIZE_USAGE \
+ "git bisect visualize"
+#define BUILTIN_GIT_BISECT_REPLAY_USAGE \
+ N_("git bisect replay <logfile>")
+#define BUILTIN_GIT_BISECT_LOG_USAGE \
+ "git bisect log"
+#define BUILTIN_GIT_BISECT_RUN_USAGE \
+ N_("git bisect run <cmd>...")
+
+static const char * const git_bisect_usage[] = {
+ BUILTIN_GIT_BISECT_START_USAGE,
+ BUILTIN_GIT_BISECT_STATE_USAGE,
+ BUILTIN_GIT_BISECT_TERMS_USAGE,
+ BUILTIN_GIT_BISECT_SKIP_USAGE,
+ BUILTIN_GIT_BISECT_NEXT_USAGE,
+ BUILTIN_GIT_BISECT_RESET_USAGE,
+ BUILTIN_GIT_BISECT_VISUALIZE_USAGE,
+ BUILTIN_GIT_BISECT_REPLAY_USAGE,
+ BUILTIN_GIT_BISECT_LOG_USAGE,
+ BUILTIN_GIT_BISECT_RUN_USAGE,
NULL
};
@@ -1191,13 +1213,13 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
if (bisect_next_check(terms, NULL))
return BISECT_FAILED;
- if (argc)
- sq_quote_argv(&command, argv);
- else {
+ if (!argc) {
error(_("bisect run failed: no command provided."));
return BISECT_FAILED;
}
+ sq_quote_argv(&command, argv);
+ strbuf_ltrim(&command);
while (1) {
res = do_bisect_run(command.buf);
@@ -1211,8 +1233,8 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
if (is_first_run && (res == 126 || res == 127)) {
int rc = verify_good(terms, command.buf);
is_first_run = 0;
- if (rc < 0) {
- error(_("unable to verify '%s' on good"
+ if (rc < 0 || 128 <= rc) {
+ error(_("unable to verify %s on good"
" revision"), command.buf);
res = BISECT_FAILED;
break;
@@ -1227,7 +1249,7 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
if (res < 0 || 128 <= res) {
error(_("bisect run failed: exit code %d from"
- " '%s' is < 0 or >= 128"), res, command.buf);
+ " %s is < 0 or >= 128"), res, command.buf);
break;
}
@@ -1261,14 +1283,14 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
if (res == BISECT_ONLY_SKIPPED_LEFT)
error(_("bisect run cannot continue any more"));
else if (res == BISECT_INTERNAL_SUCCESS_MERGE_BASE) {
- printf(_("bisect run success"));
+ puts(_("bisect run success"));
res = BISECT_OK;
} else if (res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND) {
- printf(_("bisect found first bad commit"));
+ puts(_("bisect found first bad commit"));
res = BISECT_OK;
} else if (res) {
- error(_("bisect run failed: 'git bisect--helper --bisect-state"
- " %s' exited with error code %d"), new_state, res);
+ error(_("bisect run failed: 'bisect-state %s'"
+ " exited with error code %d"), new_state, res);
} else {
continue;
}
@@ -1279,115 +1301,144 @@ static int bisect_run(struct bisect_terms *terms, const char **argv, int argc)
return res;
}
-int cmd_bisect__helper(int argc, const char **argv, const char *prefix)
+static int cmd_bisect__reset(int argc, const char **argv, const char *prefix UNUSED)
+{
+ if (argc > 1)
+ return error(_("'%s' requires either no argument or a commit"),
+ "git bisect reset");
+ return bisect_reset(argc ? argv[0] : NULL);
+}
+
+static int cmd_bisect__terms(int argc, const char **argv, const char *prefix UNUSED)
{
- enum {
- BISECT_RESET = 1,
- BISECT_NEXT_CHECK,
- BISECT_TERMS,
- BISECT_START,
- BISECT_AUTOSTART,
- BISECT_NEXT,
- BISECT_STATE,
- BISECT_LOG,
- BISECT_REPLAY,
- BISECT_SKIP,
- BISECT_VISUALIZE,
- BISECT_RUN,
- } cmdmode = 0;
- int res = 0, nolog = 0;
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (argc > 1)
+ return error(_("'%s' requires 0 or 1 argument"),
+ "git bisect terms");
+ res = bisect_terms(&terms, argc == 1 ? argv[0] : NULL);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__start(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ set_terms(&terms, "bad", "good");
+ res = bisect_start(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__next(int argc, const char **argv UNUSED, const char *prefix)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (argc)
+ return error(_("'%s' requires 0 arguments"),
+ "git bisect next");
+ get_terms(&terms);
+ res = bisect_next(&terms, prefix);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__log(int argc UNUSED, const char **argv UNUSED, const char *prefix UNUSED)
+{
+ return bisect_log();
+}
+
+static int cmd_bisect__replay(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (argc != 1)
+ return error(_("no logfile given"));
+ set_terms(&terms, "bad", "good");
+ res = bisect_replay(&terms, argv[0]);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__skip(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ set_terms(&terms, "bad", "good");
+ get_terms(&terms);
+ res = bisect_skip(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__visualize(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ get_terms(&terms);
+ res = bisect_visualize(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+static int cmd_bisect__run(int argc, const char **argv, const char *prefix UNUSED)
+{
+ int res;
+ struct bisect_terms terms = { 0 };
+
+ if (!argc)
+ return error(_("'%s' failed: no command provided."), "git bisect run");
+ get_terms(&terms);
+ res = bisect_run(&terms, argv, argc);
+ free_terms(&terms);
+ return res;
+}
+
+int cmd_bisect(int argc, const char **argv, const char *prefix)
+{
+ int res = 0;
+ parse_opt_subcommand_fn *fn = NULL;
struct option options[] = {
- OPT_CMDMODE(0, "bisect-reset", &cmdmode,
- N_("reset the bisection state"), BISECT_RESET),
- OPT_CMDMODE(0, "bisect-next-check", &cmdmode,
- N_("check whether bad or good terms exist"), BISECT_NEXT_CHECK),
- OPT_CMDMODE(0, "bisect-terms", &cmdmode,
- N_("print out the bisect terms"), BISECT_TERMS),
- OPT_CMDMODE(0, "bisect-start", &cmdmode,
- N_("start the bisect session"), BISECT_START),
- OPT_CMDMODE(0, "bisect-next", &cmdmode,
- N_("find the next bisection commit"), BISECT_NEXT),
- OPT_CMDMODE(0, "bisect-state", &cmdmode,
- N_("mark the state of ref (or refs)"), BISECT_STATE),
- OPT_CMDMODE(0, "bisect-log", &cmdmode,
- N_("list the bisection steps so far"), BISECT_LOG),
- OPT_CMDMODE(0, "bisect-replay", &cmdmode,
- N_("replay the bisection process from the given file"), BISECT_REPLAY),
- OPT_CMDMODE(0, "bisect-skip", &cmdmode,
- N_("skip some commits for checkout"), BISECT_SKIP),
- OPT_CMDMODE(0, "bisect-visualize", &cmdmode,
- N_("visualize the bisection"), BISECT_VISUALIZE),
- OPT_CMDMODE(0, "bisect-run", &cmdmode,
- N_("use <cmd>... to automatically bisect"), BISECT_RUN),
- OPT_BOOL(0, "no-log", &nolog,
- N_("no log for BISECT_WRITE")),
+ OPT_SUBCOMMAND("reset", &fn, cmd_bisect__reset),
+ OPT_SUBCOMMAND("terms", &fn, cmd_bisect__terms),
+ OPT_SUBCOMMAND("start", &fn, cmd_bisect__start),
+ OPT_SUBCOMMAND("next", &fn, cmd_bisect__next),
+ OPT_SUBCOMMAND("log", &fn, cmd_bisect__log),
+ OPT_SUBCOMMAND("replay", &fn, cmd_bisect__replay),
+ OPT_SUBCOMMAND("skip", &fn, cmd_bisect__skip),
+ OPT_SUBCOMMAND("visualize", &fn, cmd_bisect__visualize),
+ OPT_SUBCOMMAND("view", &fn, cmd_bisect__visualize),
+ OPT_SUBCOMMAND("run", &fn, cmd_bisect__run),
OPT_END()
};
- struct bisect_terms terms = { .term_good = NULL, .term_bad = NULL };
+ argc = parse_options(argc, argv, prefix, options, git_bisect_usage,
+ PARSE_OPT_SUBCOMMAND_OPTIONAL);
- argc = parse_options(argc, argv, prefix, options,
- git_bisect_helper_usage,
- PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_UNKNOWN_OPT);
+ if (!fn) {
+ struct bisect_terms terms = { 0 };
- if (!cmdmode)
- usage_with_options(git_bisect_helper_usage, options);
+ if (!argc)
+ usage_msg_opt(_("need a command"), git_bisect_usage, options);
- switch (cmdmode) {
- case BISECT_RESET:
- if (argc > 1)
- return error(_("--bisect-reset requires either no argument or a commit"));
- res = bisect_reset(argc ? argv[0] : NULL);
- break;
- case BISECT_TERMS:
- if (argc > 1)
- return error(_("--bisect-terms requires 0 or 1 argument"));
- res = bisect_terms(&terms, argc == 1 ? argv[0] : NULL);
- break;
- case BISECT_START:
- set_terms(&terms, "bad", "good");
- res = bisect_start(&terms, argv, argc);
- break;
- case BISECT_NEXT:
- if (argc)
- return error(_("--bisect-next requires 0 arguments"));
- get_terms(&terms);
- res = bisect_next(&terms, prefix);
- break;
- case BISECT_STATE:
set_terms(&terms, "bad", "good");
get_terms(&terms);
+ if (check_and_set_terms(&terms, argv[0]))
+ usage_msg_optf(_("unknown command: '%s'"), git_bisect_usage,
+ options, argv[0]);
res = bisect_state(&terms, argv, argc);
- break;
- case BISECT_LOG:
- if (argc)
- return error(_("--bisect-log requires 0 arguments"));
- res = bisect_log();
- break;
- case BISECT_REPLAY:
- if (argc != 1)
- return error(_("no logfile given"));
- set_terms(&terms, "bad", "good");
- res = bisect_replay(&terms, argv[0]);
- break;
- case BISECT_SKIP:
- set_terms(&terms, "bad", "good");
- get_terms(&terms);
- res = bisect_skip(&terms, argv, argc);
- break;
- case BISECT_VISUALIZE:
- get_terms(&terms);
- res = bisect_visualize(&terms, argv, argc);
- break;
- case BISECT_RUN:
- if (!argc)
- return error(_("bisect run failed: no command provided."));
- get_terms(&terms);
- res = bisect_run(&terms, argv, argc);
- break;
- default:
- BUG("unknown subcommand %d", cmdmode);
+ free_terms(&terms);
+ } else {
+ argc--;
+ argv++;
+ res = fn(argc, argv, prefix);
}
- free_terms(&terms);
/*
* Handle early success
diff --git a/builtin/branch.c b/builtin/branch.c
index 15be0c03ef..0879cc655e 100644
--- a/builtin/branch.c
+++ b/builtin/branch.c
@@ -40,7 +40,6 @@ static const char * const builtin_branch_usage[] = {
static const char *head;
static struct object_id head_oid;
static int recurse_submodules = 0;
-static int submodule_propagate_branches = 0;
static int branch_use_color = -1;
static char branch_colors[][COLOR_MAXLEN] = {
@@ -106,10 +105,6 @@ static int git_branch_config(const char *var, const char *value, void *cb)
recurse_submodules = git_config_bool(var, value);
return 0;
}
- if (!strcasecmp(var, "submodule.propagateBranches")) {
- submodule_propagate_branches = git_config_bool(var, value);
- return 0;
- }
return git_color_default_config(var, value, cb);
}
@@ -150,7 +145,7 @@ static int branch_merged(int kind, const char *name,
if (!reference_rev)
reference_rev = head_rev;
- merged = in_merge_bases(rev, reference_rev);
+ merged = reference_rev ? in_merge_bases(rev, reference_rev) : 0;
/*
* After the safety valve is fully redefined to "check with
@@ -160,7 +155,7 @@ static int branch_merged(int kind, const char *name,
* a gentle reminder is in order.
*/
if ((head_rev != reference_rev) &&
- in_merge_bases(rev, head_rev) != merged) {
+ (head_rev ? in_merge_bases(rev, head_rev) : 0) != merged) {
if (merged)
warning(_("deleting branch '%s' that has been merged to\n"
" '%s', but not yet merged to HEAD."),
@@ -235,11 +230,8 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
}
branch_name_pos = strcspn(fmt, "%");
- if (!force) {
+ if (!force)
head_rev = lookup_commit_reference(the_repository, &head_oid);
- if (!head_rev)
- die(_("Couldn't look up commit object for HEAD"));
- }
for (i = 0; i < argc; i++, strbuf_reset(&bname)) {
char *target = NULL;
@@ -716,7 +708,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options, builtin_branch_usage,
0);
-
+ prepare_repo_settings(the_repository);
if (!delete && !rename && !copy && !edit_description && !new_upstream &&
!show_current && !unset_upstream && argc == 0)
list = 1;
@@ -732,7 +724,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
usage_with_options(builtin_branch_usage, options);
if (recurse_submodules_explicit) {
- if (!submodule_propagate_branches)
+ if (!the_repository->settings.submodule_propagate_branches)
die(_("branch with --recurse-submodules can only be used if submodule.propagateBranches is enabled"));
if (noncreate_actions)
die(_("--recurse-submodules can only be used to create branches"));
@@ -740,7 +732,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
recurse_submodules =
(recurse_submodules || recurse_submodules_explicit) &&
- submodule_propagate_branches;
+ the_repository->settings.submodule_propagate_branches;
if (filter.abbrev == -1)
filter.abbrev = DEFAULT_ABBREV;
diff --git a/builtin/bugreport.c b/builtin/bugreport.c
index 96052541cb..5bc254be80 100644
--- a/builtin/bugreport.c
+++ b/builtin/bugreport.c
@@ -106,6 +106,7 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix)
const char *user_relative_path = NULL;
char *prefixed_filename;
size_t output_path_len;
+ int ret;
const struct option bugreport_options[] = {
OPT_CALLBACK_F(0, "diagnose", &diagnose, N_("mode"),
@@ -182,7 +183,9 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix)
user_relative_path);
free(prefixed_filename);
- UNLEAK(buffer);
- UNLEAK(report_path);
- return !!launch_editor(report_path.buf, NULL, NULL);
+ strbuf_release(&buffer);
+
+ ret = !!launch_editor(report_path.buf, NULL, NULL);
+ strbuf_release(&report_path);
+ return ret;
}
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 2a132392fb..659dd5c430 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -1470,6 +1470,8 @@ static void die_if_some_operation_in_progress(void)
"or \"git worktree add\"."));
if (state.bisect_in_progress)
warning(_("you are switching branch while bisecting"));
+
+ wt_status_state_free_buffers(&state);
}
static int checkout_branch(struct checkout_opts *opts,
diff --git a/builtin/clone.c b/builtin/clone.c
index 0e4348686b..894be8eda4 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -78,6 +78,7 @@ static int option_filter_submodules = -1; /* unspecified */
static int config_filter_submodules = -1; /* unspecified */
static struct string_list server_options = STRING_LIST_INIT_NODUP;
static int option_remote_submodules;
+static int option_detach;
static const char *bundle_uri;
static int recurse_submodules_cb(const struct option *opt,
@@ -162,6 +163,8 @@ static struct option builtin_clone_options[] = {
N_("any cloned submodules will use their remote-tracking branch")),
OPT_BOOL(0, "sparse", &option_sparse_checkout,
N_("initialize sparse-checkout file to include only files at root")),
+ OPT_BOOL(0, "detach", &option_detach,
+ N_("detach HEAD and don't create a local branch")),
OPT_STRING(0, "bundle-uri", &bundle_uri,
N_("uri"), N_("a URI for downloading bundles before fetching from origin remote")),
OPT_END()
@@ -613,10 +616,12 @@ static void update_remote_refs(const struct ref *refs,
}
static void update_head(const struct ref *our, const struct ref *remote,
- const char *unborn, const char *msg)
+ const char *unborn, int should_detach,
+ const char *msg)
{
const char *head;
- if (our && skip_prefix(our->name, "refs/heads/", &head)) {
+ if (our && !should_detach &&
+ skip_prefix(our->name, "refs/heads/", &head)) {
/* Local default branch link */
if (create_symref("HEAD", our->name, NULL) < 0)
die(_("unable to update HEAD"));
@@ -1362,7 +1367,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
branch_top.buf, reflog_msg.buf, transport,
!is_local);
- update_head(our_head_points_at, remote_head, unborn_head, reflog_msg.buf);
+ update_head(our_head_points_at, remote_head, unborn_head,
+ option_detach, reflog_msg.buf);
/*
* We want to show progress for recursive submodule clones iff
diff --git a/builtin/commit.c b/builtin/commit.c
index e22bdf23f5..f88a29167f 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -987,8 +987,11 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
struct object_id oid;
const char *parent = "HEAD";
- if (!active_nr && read_cache() < 0)
- die(_("Cannot read index"));
+ if (!active_nr) {
+ discard_cache();
+ if (read_cache() < 0)
+ die(_("Cannot read index"));
+ }
if (amend)
parent = "HEAD^1";
@@ -1871,8 +1874,8 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
apply_autostash(git_path_merge_autostash(the_repository));
cleanup:
- UNLEAK(author_ident);
- UNLEAK(err);
- UNLEAK(sb);
+ strbuf_release(&author_ident);
+ strbuf_release(&err);
+ strbuf_release(&sb);
return ret;
}
diff --git a/builtin/config.c b/builtin/config.c
index 753e5fac29..060cf9f3e0 100644
--- a/builtin/config.c
+++ b/builtin/config.c
@@ -639,8 +639,9 @@ static char *default_user_config(void)
int cmd_config(int argc, const char **argv, const char *prefix)
{
int nongit = !startup_info->have_repository;
- char *value;
+ char *value = NULL;
int flags = 0;
+ int ret = 0;
given_config_source.file = xstrdup_or_null(getenv(CONFIG_ENVIRONMENT));
@@ -856,44 +857,38 @@ int cmd_config(int argc, const char **argv, const char *prefix)
free(config_file);
}
else if (actions == ACTION_SET) {
- int ret;
check_write();
check_argc(argc, 2, 2);
value = normalize_value(argv[0], argv[1]);
- UNLEAK(value);
ret = git_config_set_in_file_gently(given_config_source.file, argv[0], value);
if (ret == CONFIG_NOTHING_SET)
error(_("cannot overwrite multiple values with a single value\n"
" Use a regexp, --add or --replace-all to change %s."), argv[0]);
- return ret;
}
else if (actions == ACTION_SET_ALL) {
check_write();
check_argc(argc, 2, 3);
value = normalize_value(argv[0], argv[1]);
- UNLEAK(value);
- return git_config_set_multivar_in_file_gently(given_config_source.file,
- argv[0], value, argv[2],
- flags);
+ ret = git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value, argv[2],
+ flags);
}
else if (actions == ACTION_ADD) {
check_write();
check_argc(argc, 2, 2);
value = normalize_value(argv[0], argv[1]);
- UNLEAK(value);
- return git_config_set_multivar_in_file_gently(given_config_source.file,
- argv[0], value,
- CONFIG_REGEX_NONE,
- flags);
+ ret = git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value,
+ CONFIG_REGEX_NONE,
+ flags);
}
else if (actions == ACTION_REPLACE_ALL) {
check_write();
check_argc(argc, 2, 3);
value = normalize_value(argv[0], argv[1]);
- UNLEAK(value);
- return git_config_set_multivar_in_file_gently(given_config_source.file,
- argv[0], value, argv[2],
- flags | CONFIG_FLAGS_MULTI_REPLACE);
+ ret = git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value, argv[2],
+ flags | CONFIG_FLAGS_MULTI_REPLACE);
}
else if (actions == ACTION_GET) {
check_argc(argc, 1, 2);
@@ -934,26 +929,28 @@ int cmd_config(int argc, const char **argv, const char *prefix)
flags | CONFIG_FLAGS_MULTI_REPLACE);
}
else if (actions == ACTION_RENAME_SECTION) {
- int ret;
check_write();
check_argc(argc, 2, 2);
ret = git_config_rename_section_in_file(given_config_source.file,
argv[0], argv[1]);
if (ret < 0)
return ret;
- if (ret == 0)
+ else if (!ret)
die(_("no such section: %s"), argv[0]);
+ else
+ ret = 0;
}
else if (actions == ACTION_REMOVE_SECTION) {
- int ret;
check_write();
check_argc(argc, 1, 1);
ret = git_config_rename_section_in_file(given_config_source.file,
argv[0], NULL);
if (ret < 0)
return ret;
- if (ret == 0)
+ else if (!ret)
die(_("no such section: %s"), argv[0]);
+ else
+ ret = 0;
}
else if (actions == ACTION_GET_COLOR) {
check_argc(argc, 1, 2);
@@ -966,5 +963,6 @@ int cmd_config(int argc, const char **argv, const char *prefix)
return get_colorbool(argv[0], argc == 2);
}
- return 0;
+ free(value);
+ return ret;
}
diff --git a/builtin/diff.c b/builtin/diff.c
index 854d2c5a5c..cb63f157dd 100644
--- a/builtin/diff.c
+++ b/builtin/diff.c
@@ -609,7 +609,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
if (1 < rev.diffopt.skip_stat_unmatch)
refresh_index_quietly();
release_revisions(&rev);
- UNLEAK(ent);
+ object_array_clear(&ent);
UNLEAK(blob);
return result;
}
diff --git a/builtin/for-each-repo.c b/builtin/for-each-repo.c
index d45d873f57..6aeac37148 100644
--- a/builtin/for-each-repo.c
+++ b/builtin/for-each-repo.c
@@ -14,13 +14,16 @@ static int run_command_on_repo(const char *path, int argc, const char ** argv)
{
int i;
struct child_process child = CHILD_PROCESS_INIT;
+ char *abspath = interpolate_path(path, 0);
child.git_cmd = 1;
- strvec_pushl(&child.args, "-C", path, NULL);
+ strvec_pushl(&child.args, "-C", abspath, NULL);
for (i = 0; i < argc; i++)
strvec_push(&child.args, argv[i]);
+ free(abspath);
+
return run_command(&child);
}
diff --git a/builtin/gc.c b/builtin/gc.c
index 6b08dcf3c5..02455fdcd7 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -1480,13 +1480,15 @@ static char *get_maintpath(void)
}
static char const * const builtin_maintenance_register_usage[] = {
- "git maintenance register",
+ "git maintenance register [--config-file <path>]",
NULL
};
static int maintenance_register(int argc, const char **argv, const char *prefix)
{
+ char *config_file = NULL;
struct option options[] = {
+ OPT_STRING(0, "config-file", &config_file, N_("file"), N_("use given config file")),
OPT_END(),
};
int found = 0;
@@ -1523,12 +1525,16 @@ static int maintenance_register(int argc, const char **argv, const char *prefix)
if (!found) {
int rc;
- char *user_config, *xdg_config;
- git_global_config(&user_config, &xdg_config);
- if (!user_config)
- die(_("$HOME not set"));
+ char *user_config = NULL, *xdg_config = NULL;
+
+ if (!config_file) {
+ git_global_config(&user_config, &xdg_config);
+ config_file = user_config;
+ if (!user_config)
+ die(_("$HOME not set"));
+ }
rc = git_config_set_multivar_in_file_gently(
- user_config, "maintenance.repo", maintpath,
+ config_file, "maintenance.repo", maintpath,
CONFIG_REGEX_NONE, 0);
free(user_config);
free(xdg_config);
@@ -1543,14 +1549,16 @@ static int maintenance_register(int argc, const char **argv, const char *prefix)
}
static char const * const builtin_maintenance_unregister_usage[] = {
- "git maintenance unregister [--force]",
+ "git maintenance unregister [--config-file <path>] [--force]",
NULL
};
static int maintenance_unregister(int argc, const char **argv, const char *prefix)
{
int force = 0;
+ char *config_file = NULL;
struct option options[] = {
+ OPT_STRING(0, "config-file", &config_file, N_("file"), N_("use given config file")),
OPT__FORCE(&force,
N_("return success even if repository was not registered"),
PARSE_OPT_NOCOMPLETE),
@@ -1561,6 +1569,7 @@ static int maintenance_unregister(int argc, const char **argv, const char *prefi
int found = 0;
struct string_list_item *item;
const struct string_list *list;
+ struct config_set cs = { { 0 } };
argc = parse_options(argc, argv, prefix, options,
builtin_maintenance_unregister_usage, 0);
@@ -1568,7 +1577,13 @@ static int maintenance_unregister(int argc, const char **argv, const char *prefi
usage_with_options(builtin_maintenance_unregister_usage,
options);
- list = git_config_get_value_multi(key);
+ if (config_file) {
+ git_configset_init(&cs);
+ git_configset_add_file(&cs, config_file);
+ list = git_configset_get_value_multi(&cs, key);
+ } else {
+ list = git_config_get_value_multi(key);
+ }
if (list) {
for_each_string_list_item(item, list) {
if (!strcmp(maintpath, item->string)) {
@@ -1580,12 +1595,15 @@ static int maintenance_unregister(int argc, const char **argv, const char *prefi
if (found) {
int rc;
- char *user_config, *xdg_config;
- git_global_config(&user_config, &xdg_config);
- if (!user_config)
- die(_("$HOME not set"));
+ char *user_config = NULL, *xdg_config = NULL;
+ if (!config_file) {
+ git_global_config(&user_config, &xdg_config);
+ config_file = user_config;
+ if (!user_config)
+ die(_("$HOME not set"));
+ }
rc = git_config_set_multivar_in_file_gently(
- user_config, key, NULL, maintpath,
+ config_file, key, NULL, maintpath,
CONFIG_FLAGS_MULTI_REPLACE | CONFIG_FLAGS_FIXED_VALUE);
free(user_config);
free(xdg_config);
@@ -1598,6 +1616,7 @@ static int maintenance_unregister(int argc, const char **argv, const char *prefi
die(_("repository '%s' is not registered"), maintpath);
}
+ git_configset_clear(&cs);
free(maintpath);
return 0;
}
diff --git a/builtin/ls-files.c b/builtin/ls-files.c
index 4cf8a23648..a03b559eca 100644
--- a/builtin/ls-files.c
+++ b/builtin/ls-files.c
@@ -613,6 +613,7 @@ void overlay_tree_on_index(struct index_state *istate,
if (!fn)
fn = read_one_entry_quick;
err = read_tree(the_repository, tree, &pathspec, fn, istate);
+ clear_pathspec(&pathspec);
if (err)
die("unable to read tree entries %s", tree_name);
diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c
index fe853aa8f9..330f779e8b 100644
--- a/builtin/merge-tree.c
+++ b/builtin/merge-tree.c
@@ -3,6 +3,7 @@
#include "tree-walk.h"
#include "xdiff-interface.h"
#include "help.h"
+#include "commit.h"
#include "commit-reach.h"
#include "merge-ort.h"
#include "object-store.h"
@@ -406,6 +407,7 @@ struct merge_tree_options {
};
static int real_merge(struct merge_tree_options *o,
+ const char *merge_base,
const char *branch1, const char *branch2,
const char *prefix)
{
@@ -432,16 +434,31 @@ static int real_merge(struct merge_tree_options *o,
opt.branch1 = branch1;
opt.branch2 = branch2;
- /*
- * Get the merge bases, in reverse order; see comment above
- * merge_incore_recursive in merge-ort.h
- */
- merge_bases = get_merge_bases(parent1, parent2);
- if (!merge_bases && !o->allow_unrelated_histories)
- die(_("refusing to merge unrelated histories"));
- merge_bases = reverse_commit_list(merge_bases);
+ if (merge_base) {
+ struct commit *base_commit;
+ struct tree *base_tree, *parent1_tree, *parent2_tree;
+
+ base_commit = lookup_commit_reference_by_name(merge_base);
+ if (!base_commit)
+ die(_("could not lookup commit %s"), merge_base);
+
+ opt.ancestor = merge_base;
+ base_tree = get_commit_tree(base_commit);
+ parent1_tree = get_commit_tree(parent1);
+ parent2_tree = get_commit_tree(parent2);
+ merge_incore_nonrecursive(&opt, base_tree, parent1_tree, parent2_tree, &result);
+ } else {
+ /*
+ * Get the merge bases, in reverse order; see comment above
+ * merge_incore_recursive in merge-ort.h
+ */
+ merge_bases = get_merge_bases(parent1, parent2);
+ if (!merge_bases && !o->allow_unrelated_histories)
+ die(_("refusing to merge unrelated histories"));
+ merge_bases = reverse_commit_list(merge_bases);
+ merge_incore_recursive(&opt, merge_bases, parent1, parent2, &result);
+ }
- merge_incore_recursive(&opt, merge_bases, parent1, parent2, &result);
if (result.clean < 0)
die(_("failure to merge"));
@@ -487,6 +504,7 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
struct merge_tree_options o = { .show_messages = -1 };
int expected_remaining_argc;
int original_argc;
+ const char *merge_base = NULL;
const char * const merge_tree_usage[] = {
N_("git merge-tree [--write-tree] [<options>] <branch1> <branch2>"),
@@ -515,6 +533,10 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
&o.use_stdin,
N_("perform multiple merges, one per line of input"),
PARSE_OPT_NONEG),
+ OPT_STRING(0, "merge-base",
+ &merge_base,
+ N_("commit"),
+ N_("specify a merge-base for the merge")),
OPT_END()
};
@@ -529,16 +551,35 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
if (o.mode == MODE_TRIVIAL)
die(_("--trivial-merge is incompatible with all other options"));
+ if (merge_base)
+ die(_("--merge-base is incompatible with --stdin"));
line_termination = '\0';
while (strbuf_getline_lf(&buf, stdin) != EOF) {
struct strbuf **split;
int result;
+ const char *input_merge_base = NULL;
split = strbuf_split(&buf, ' ');
- if (!split[0] || !split[1] || split[2])
+ if (!split[0] || !split[1])
die(_("malformed input line: '%s'."), buf.buf);
strbuf_rtrim(split[0]);
- result = real_merge(&o, split[0]->buf, split[1]->buf, prefix);
+ strbuf_rtrim(split[1]);
+
+ /* parse the merge-base */
+ if (!strcmp(split[1]->buf, "--")) {
+ input_merge_base = split[0]->buf;
+ }
+
+ if (input_merge_base && split[2] && split[3] && !split[4]) {
+ strbuf_rtrim(split[2]);
+ strbuf_rtrim(split[3]);
+ result = real_merge(&o, input_merge_base, split[2]->buf, split[3]->buf, prefix);
+ } else if (!input_merge_base && !split[2]) {
+ result = real_merge(&o, NULL, split[0]->buf, split[1]->buf, prefix);
+ } else {
+ die(_("malformed input line: '%s'."), buf.buf);
+ }
+
if (result < 0)
die(_("merging cannot continue; got unclean result of %d"), result);
strbuf_list_free(split);
@@ -581,7 +622,7 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
/* Do the relevant type of merge */
if (o.mode == MODE_REAL)
- return real_merge(&o, argv[0], argv[1], prefix);
+ return real_merge(&o, merge_base, argv[0], argv[1], prefix);
else
return trivial_merge(argv[0], argv[1], argv[2]);
}
diff --git a/builtin/merge.c b/builtin/merge.c
index b3f75f55c8..584234c50d 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -1783,5 +1783,6 @@ done:
}
strbuf_release(&buf);
free(branch_to_free);
+ discard_index(&the_index);
return ret;
}
diff --git a/builtin/notes.c b/builtin/notes.c
index be51f69225..902418df3f 100644
--- a/builtin/notes.c
+++ b/builtin/notes.c
@@ -181,7 +181,7 @@ static void prepare_note_data(const struct object_id *object, struct note_data *
strbuf_addch(&buf, '\n');
strbuf_add_commented_lines(&buf, "\n", strlen("\n"));
strbuf_add_commented_lines(&buf, _(note_template), strlen(_(note_template)));
- strbuf_addch(&buf, '\n');
+ strbuf_add_commented_lines(&buf, "\n", strlen("\n"));
write_or_die(fd, buf.buf, buf.len);
write_commented_object(fd, object);
@@ -562,13 +562,14 @@ out:
static int append_edit(int argc, const char **argv, const char *prefix)
{
int allow_empty = 0;
+ int blankline = 1;
const char *object_ref;
struct notes_tree *t;
struct object_id object, new_note;
const struct object_id *note;
- char *logmsg;
+ char *logmsg = NULL;
const char * const *usage;
- struct note_data d = { 0, 0, NULL, STRBUF_INIT };
+ struct note_data d = { .buf = STRBUF_INIT };
struct option options[] = {
OPT_CALLBACK_F('m', "message", &d, N_("message"),
N_("note contents as a string"), PARSE_OPT_NONEG,
@@ -584,6 +585,8 @@ static int append_edit(int argc, const char **argv, const char *prefix)
parse_reuse_arg),
OPT_BOOL(0, "allow-empty", &allow_empty,
N_("allow storing empty note")),
+ OPT_BOOL(0, "blank-line", &blankline,
+ N_("insert paragraph break before appending to an existing note")),
OPT_END()
};
int edit = !strcmp(argv[0], "edit");
@@ -618,8 +621,7 @@ static int append_edit(int argc, const char **argv, const char *prefix)
enum object_type type;
char *prev_buf = read_object_file(note, &type, &size);
- strbuf_grow(&d.buf, size + 1);
- if (d.buf.len && prev_buf && size)
+ if (blankline && d.buf.len && prev_buf && size)
strbuf_insertstr(&d.buf, 0, "\n");
if (prev_buf && size)
strbuf_insert(&d.buf, 0, prev_buf, size);
@@ -631,13 +633,11 @@ static int append_edit(int argc, const char **argv, const char *prefix)
if (add_note(t, &object, &new_note, combine_notes_overwrite))
BUG("combine_notes_overwrite failed");
logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]);
- } else {
- fprintf(stderr, _("Removing note for object %s\n"),
+ commit_notes(the_repository, t, logmsg);
+ } else if (!d.buf.len && !note)
+ fprintf(stderr,
+ _("Both original and appended notes are empty in %s, do nothing\n"),
oid_to_hex(&object));
- remove_note(t, object.hash);
- logmsg = xstrfmt("Notes removed by 'git notes %s'", argv[0]);
- }
- commit_notes(the_repository, t, logmsg);
free(logmsg);
free_note_data(&d);
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 573d0b20b7..30023bcebb 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -4149,21 +4149,6 @@ static int option_parse_cruft_expiration(const struct option *opt,
return 0;
}
-struct po_filter_data {
- unsigned have_revs:1;
- struct rev_info revs;
-};
-
-static struct list_objects_filter_options *po_filter_revs_init(void *value)
-{
- struct po_filter_data *data = value;
-
- repo_init_revisions(the_repository, &data->revs, NULL);
- data->have_revs = 1;
-
- return &data->revs.filter;
-}
-
int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{
int use_internal_rev_list = 0;
@@ -4174,7 +4159,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
int rev_list_index = 0;
int stdin_packs = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
- struct po_filter_data pfd = { .have_revs = 0 };
+ struct list_objects_filter_options filter_options =
+ LIST_OBJECTS_FILTER_INIT;
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
@@ -4265,7 +4251,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
&write_bitmap_index,
N_("write a bitmap index if possible"),
WRITE_BITMAP_QUIET, PARSE_OPT_HIDDEN),
- OPT_PARSE_LIST_OBJECTS_FILTER_INIT(&pfd, po_filter_revs_init),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_CALLBACK_F(0, "missing", NULL, N_("action"),
N_("handling for missing objects"), PARSE_OPT_NONEG,
option_parse_missing_action),
@@ -4385,7 +4371,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (!rev_list_all || !rev_list_reflog || !rev_list_index)
unpack_unreachable_expiration = 0;
- if (pfd.have_revs && pfd.revs.filter.choice) {
+ if (filter_options.choice) {
if (!pack_to_stdout)
die(_("cannot use --filter without --stdout"));
if (stdin_packs)
@@ -4472,16 +4458,15 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
read_cruft_objects();
} else if (!use_internal_rev_list) {
read_object_list_from_stdin();
- } else if (pfd.have_revs) {
- get_object_list(&pfd.revs, rp.nr, rp.v);
- release_revisions(&pfd.revs);
} else {
struct rev_info revs;
repo_init_revisions(the_repository, &revs, NULL);
+ list_objects_filter_copy(&revs.filter, &filter_options);
get_object_list(&revs, rp.nr, rp.v);
release_revisions(&revs);
}
+ list_objects_filter_release(&filter_options);
cleanup_preferred_base();
if (include_tag && nr_result)
for_each_tag_ref(add_ref_tag, NULL);
diff --git a/builtin/push.c b/builtin/push.c
index f0329c62a2..60ac8017e5 100644
--- a/builtin/push.c
+++ b/builtin/push.c
@@ -466,8 +466,16 @@ static int option_parse_recurse_submodules(const struct option *opt,
if (unset)
*recurse_submodules = RECURSE_SUBMODULES_OFF;
- else
- *recurse_submodules = parse_push_recurse_submodules_arg(opt->long_name, arg);
+ else {
+ if (!strcmp(arg, "only-is-on-demand")) {
+ if (*recurse_submodules == RECURSE_SUBMODULES_ONLY) {
+ warning(_("recursing into submodule with push.recurseSubmodules=only; using on-demand instead"));
+ *recurse_submodules = RECURSE_SUBMODULES_ON_DEMAND;
+ }
+ } else {
+ *recurse_submodules = parse_push_recurse_submodules_arg(opt->long_name, arg);
+ }
+ }
return 0;
}
diff --git a/builtin/read-tree.c b/builtin/read-tree.c
index f4cbe460b9..45c6652444 100644
--- a/builtin/read-tree.c
+++ b/builtin/read-tree.c
@@ -249,6 +249,10 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
if (opts.debug_unpack)
opts.fn = debug_merge;
+ /* If we're going to prime_cache_tree later, skip cache tree update */
+ if (nr_trees == 1 && !opts.prefix)
+ opts.skip_cache_tree_update = 1;
+
cache_tree_free(&active_cache_tree);
for (i = 0; i < nr_trees; i++) {
struct tree *tree = trees[i];
diff --git a/builtin/rebase.c b/builtin/rebase.c
index 5d855fd8f5..3f360eb2f3 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -30,8 +30,6 @@
#include "reset.h"
#include "hook.h"
-#define DEFAULT_REFLOG_ACTION "rebase"
-
static char const * const builtin_rebase_usage[] = {
N_("git rebase [-i] [options] [--exec <cmd>] "
"[--onto <newbase> | --keep-base] [<upstream> [<branch>]]"),
@@ -106,6 +104,7 @@ struct rebase_options {
} flags;
struct strvec git_am_opts;
enum action action;
+ char *reflog_action;
int signoff;
int allow_rerere_autoupdate;
int keep_empty;
@@ -159,6 +158,7 @@ static struct replay_opts get_replay_opts(const struct rebase_options *opts)
opts->committer_date_is_author_date;
replay.ignore_date = opts->ignore_date;
replay.gpg_sign = xstrdup_or_null(opts->gpg_sign_opt);
+ replay.reflog_action = xstrdup(opts->reflog_action);
if (opts->strategy)
replay.strategy = xstrdup_or_null(opts->strategy);
else if (!replay.strategy && replay.default_strategy) {
@@ -585,10 +585,10 @@ static int move_to_original_branch(struct rebase_options *opts)
BUG("move_to_original_branch without onto");
strbuf_addf(&branch_reflog, "%s (finish): %s onto %s",
- getenv(GIT_REFLOG_ACTION_ENVIRONMENT),
+ opts->reflog_action,
opts->head_name, oid_to_hex(&opts->onto->object.oid));
strbuf_addf(&head_reflog, "%s (finish): returning to %s",
- getenv(GIT_REFLOG_ACTION_ENVIRONMENT), opts->head_name);
+ opts->reflog_action, opts->head_name);
ropts.branch = opts->head_name;
ropts.flags = RESET_HEAD_REFS_ONLY;
ropts.branch_msg = branch_reflog.buf;
@@ -618,7 +618,7 @@ static int run_am(struct rebase_options *opts)
am.git_cmd = 1;
strvec_push(&am.args, "am");
strvec_pushf(&am.env, GIT_REFLOG_ACTION_ENVIRONMENT "=%s (pick)",
- getenv(GIT_REFLOG_ACTION_ENVIRONMENT));
+ opts->reflog_action);
if (opts->action == ACTION_CONTINUE) {
strvec_push(&am.args, "--resolved");
strvec_pushf(&am.args, "--resolvemsg=%s", resolvemsg);
@@ -685,7 +685,7 @@ static int run_am(struct rebase_options *opts)
ropts.oid = &opts->orig_head->object.oid;
ropts.branch = opts->head_name;
- ropts.default_reflog_action = DEFAULT_REFLOG_ACTION;
+ ropts.default_reflog_action = opts->reflog_action;
reset_head(the_repository, &ropts);
error(_("\ngit encountered an error while preparing the "
"patches to replay\n"
@@ -834,8 +834,7 @@ static int checkout_up_to_date(struct rebase_options *options)
int ret = 0;
strbuf_addf(&buf, "%s: checkout %s",
- getenv(GIT_REFLOG_ACTION_ENVIRONMENT),
- options->switch_to);
+ options->reflog_action, options->switch_to);
ropts.oid = &options->orig_head->object.oid;
ropts.branch = options->head_name;
ropts.flags = RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
@@ -1243,7 +1242,6 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
if (options.action != ACTION_NONE && !in_progress)
die(_("No rebase in progress?"));
- setenv(GIT_REFLOG_ACTION_ENVIRONMENT, "rebase", 0);
if (options.action == ACTION_EDIT_TODO && !is_merge(&options))
die(_("The --edit-todo action can only be used during "
@@ -1258,6 +1256,10 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
trace2_cmd_mode(action_names[options.action]);
}
+ options.reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT);
+ options.reflog_action =
+ xstrdup(options.reflog_action ? options.reflog_action : "rebase");
+
switch (options.action) {
case ACTION_CONTINUE: {
struct object_id head;
@@ -1310,7 +1312,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
exit(1);
strbuf_addf(&head_msg, "%s (abort): returning to %s",
- getenv(GIT_REFLOG_ACTION_ENVIRONMENT),
+ options.reflog_action,
options.head_name ? options.head_name
: oid_to_hex(&options.orig_head->object.oid));
ropts.oid = &options.orig_head->object.oid;
@@ -1320,6 +1322,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
if (reset_head(the_repository, &ropts) < 0)
die(_("could not move back to %s"),
oid_to_hex(&options.orig_head->object.oid));
+ strbuf_release(&head_msg);
remove_branch_state(the_repository, 0);
ret = finish_rebase(&options);
goto cleanup;
@@ -1786,13 +1789,13 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
"it...\n"));
strbuf_addf(&msg, "%s (start): checkout %s",
- getenv(GIT_REFLOG_ACTION_ENVIRONMENT), options.onto_name);
+ options.reflog_action, options.onto_name);
ropts.oid = &options.onto->object.oid;
ropts.orig_head = &options.orig_head->object.oid,
ropts.flags = RESET_HEAD_DETACH | RESET_ORIG_HEAD |
RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
ropts.head_msg = msg.buf;
- ropts.default_reflog_action = DEFAULT_REFLOG_ACTION;
+ ropts.default_reflog_action = options.reflog_action;
if (reset_head(the_repository, &ropts))
die(_("Could not detach HEAD"));
strbuf_release(&msg);
@@ -1824,11 +1827,15 @@ run_rebase:
cleanup:
strbuf_release(&buf);
strbuf_release(&revisions);
+ free(options.reflog_action);
free(options.head_name);
+ strvec_clear(&options.git_am_opts);
free(options.gpg_sign_opt);
free(options.cmd);
free(options.strategy);
strbuf_release(&options.git_format_patch_opt);
free(squash_onto_name);
+ string_list_clear(&exec, 0);
+ string_list_clear(&strategy_options, 0);
return !!ret;
}
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index 44bcea3a5b..a90af30363 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -80,6 +80,7 @@ static struct object_id push_cert_oid;
static struct signature_check sigcheck;
static const char *push_cert_nonce;
static const char *cert_nonce_seed;
+static struct string_list hidden_refs = STRING_LIST_INIT_DUP;
static const char *NONCE_UNSOLICITED = "UNSOLICITED";
static const char *NONCE_BAD = "BAD";
@@ -130,7 +131,7 @@ static enum deny_action parse_deny_action(const char *var, const char *value)
static int receive_pack_config(const char *var, const char *value, void *cb)
{
- int status = parse_hide_refs_config(var, value, "receive");
+ int status = parse_hide_refs_config(var, value, "receive", &hidden_refs);
if (status)
return status;
@@ -296,7 +297,7 @@ static int show_ref_cb(const char *path_full, const struct object_id *oid,
struct oidset *seen = data;
const char *path = strip_namespace(path_full);
- if (ref_is_hidden(path, path_full))
+ if (ref_is_hidden(path, path_full, &hidden_refs))
return 0;
/*
@@ -1794,7 +1795,7 @@ static void reject_updates_to_hidden(struct command *commands)
strbuf_setlen(&refname_full, prefix_len);
strbuf_addstr(&refname_full, cmd->ref_name);
- if (!ref_is_hidden(cmd->ref_name, refname_full.buf))
+ if (!ref_is_hidden(cmd->ref_name, refname_full.buf, &hidden_refs))
continue;
if (is_null_oid(&cmd->new_oid))
cmd->error_string = "deny deleting a hidden ref";
@@ -1928,6 +1929,8 @@ static void execute_commands(struct command *commands,
opt.err_fd = err_fd;
opt.progress = err_fd && !quiet;
opt.env = tmp_objdir_env(tmp_objdir);
+ opt.exclude_hidden_refs_section = "receive";
+
if (check_connected(iterate_receive_command_list, &data, &opt))
set_connectivity_errors(commands, si);
@@ -2591,6 +2594,7 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
packet_flush(1);
oid_array_clear(&shallow);
oid_array_clear(&ref);
+ string_list_clear(&hidden_refs, 0);
free((void *)push_cert_nonce);
return 0;
}
diff --git a/builtin/repack.c b/builtin/repack.c
index 10e23f9ee1..c1402ad038 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -32,7 +32,6 @@ static int write_bitmaps = -1;
static int use_delta_islands;
static int run_update_server_info = 1;
static char *packdir, *packtmp_name, *packtmp;
-static char *cruft_expiration;
static const char *const git_repack_usage[] = {
N_("git repack [<options>]"),
@@ -150,7 +149,8 @@ static void remove_redundant_pack(const char *dir_name, const char *base_name)
}
static void prepare_pack_objects(struct child_process *cmd,
- const struct pack_objects_args *args)
+ const struct pack_objects_args *args,
+ const char *out)
{
strvec_push(&cmd->args, "pack-objects");
if (args->window)
@@ -173,7 +173,7 @@ static void prepare_pack_objects(struct child_process *cmd,
strvec_push(&cmd->args, "--quiet");
if (delta_base_offset)
strvec_push(&cmd->args, "--delta-base-offset");
- strvec_push(&cmd->args, packtmp);
+ strvec_push(&cmd->args, out);
cmd->git_cmd = 1;
cmd->out = -1;
}
@@ -241,7 +241,7 @@ static void repack_promisor_objects(const struct pack_objects_args *args,
FILE *out;
struct strbuf line = STRBUF_INIT;
- prepare_pack_objects(&cmd, args);
+ prepare_pack_objects(&cmd, args, packtmp);
cmd.in = -1;
/*
@@ -657,7 +657,9 @@ static void remove_redundant_bitmaps(struct string_list *include,
}
static int write_cruft_pack(const struct pack_objects_args *args,
+ const char *destination,
const char *pack_prefix,
+ const char *cruft_expiration,
struct string_list *names,
struct string_list *existing_packs,
struct string_list *existing_kept_packs)
@@ -667,8 +669,10 @@ static int write_cruft_pack(const struct pack_objects_args *args,
struct string_list_item *item;
FILE *in, *out;
int ret;
+ const char *scratch;
+ int local = skip_prefix(destination, packdir, &scratch);
- prepare_pack_objects(&cmd, args);
+ prepare_pack_objects(&cmd, args, destination);
strvec_push(&cmd.args, "--cruft");
if (cruft_expiration)
@@ -693,6 +697,10 @@ static int write_cruft_pack(const struct pack_objects_args *args,
* By the time it is read here, it contains only the pack(s)
* that were just written, which is exactly the set of packs we
* want to consider kept.
+ *
+ * If `--expire-to` is given, the double-use served by `names`
+ * ensures that the pack written to `--expire-to` excludes any
+ * objects contained in the cruft pack.
*/
in = xfdopen(cmd.in, "w");
for_each_string_list_item(item, names)
@@ -710,9 +718,14 @@ static int write_cruft_pack(const struct pack_objects_args *args,
if (line.len != the_hash_algo->hexsz)
die(_("repack: Expecting full hex object ID lines only "
"from pack-objects."));
-
- item = string_list_append(names, line.buf);
- item->util = populate_pack_exts(line.buf);
+ /*
+ * avoid putting packs written outside of the repository in the
+ * list of names
+ */
+ if (local) {
+ item = string_list_append(names, line.buf);
+ item->util = populate_pack_exts(line.buf);
+ }
}
fclose(out);
@@ -744,6 +757,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
struct pack_objects_args cruft_po_args = {NULL};
int geometric_factor = 0;
int write_midx = 0;
+ const char *cruft_expiration = NULL;
+ const char *expire_to = NULL;
struct option builtin_repack_options[] = {
OPT_BIT('a', NULL, &pack_everything,
@@ -793,6 +808,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
N_("find a geometric progression with factor <N>")),
OPT_BOOL('m', "write-midx", &write_midx,
N_("write a multi-pack index of the resulting packs")),
+ OPT_STRING(0, "expire-to", &expire_to, N_("dir"),
+ N_("pack prefix to store a pack containing pruned objects")),
OPT_END()
};
@@ -858,7 +875,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
split_pack_geometry(geometry, geometric_factor);
}
- prepare_pack_objects(&cmd, &po_args);
+ prepare_pack_objects(&cmd, &po_args, packtmp);
show_progress = !po_args.quiet && isatty(2);
@@ -956,6 +973,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
item = string_list_append(&names, line.buf);
item->util = populate_pack_exts(item->string);
}
+ strbuf_release(&line);
fclose(out);
ret = finish_command(&cmd);
if (ret)
@@ -984,11 +1002,45 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
cruft_po_args.local = po_args.local;
cruft_po_args.quiet = po_args.quiet;
- ret = write_cruft_pack(&cruft_po_args, pack_prefix, &names,
+ ret = write_cruft_pack(&cruft_po_args, packtmp, pack_prefix,
+ cruft_expiration, &names,
&existing_nonkept_packs,
&existing_kept_packs);
if (ret)
return ret;
+
+ if (delete_redundant && expire_to) {
+ /*
+ * If `--expire-to` is given with `-d`, it's possible
+ * that we're about to prune some objects. With cruft
+ * packs, pruning is implicit: any objects from existing
+ * packs that weren't picked up by new packs are removed
+ * when their packs are deleted.
+ *
+ * Generate an additional cruft pack, with one twist:
+ * `names` now includes the name of the cruft pack
+ * written in the previous step. So the contents of
+ * _this_ cruft pack exclude everything contained in the
+ * existing cruft pack (that is, all of the unreachable
+ * objects which are no older than
+ * `--cruft-expiration`).
+ *
+ * To make this work, cruft_expiration must become NULL
+ * so that this cruft pack doesn't actually prune any
+ * objects. If it were non-NULL, this call would always
+ * generate an empty pack (since every object not in the
+ * cruft pack generated above will have an mtime older
+ * than the expiration).
+ */
+ ret = write_cruft_pack(&cruft_po_args, expire_to,
+ pack_prefix,
+ NULL,
+ &names,
+ &existing_nonkept_packs,
+ &existing_kept_packs);
+ if (ret)
+ return ret;
+ }
}
string_list_sort(&names);
@@ -1124,7 +1176,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
string_list_clear(&existing_nonkept_packs, 0);
string_list_clear(&existing_kept_packs, 0);
clear_pack_geometry(geometry);
- strbuf_release(&line);
return 0;
}
diff --git a/builtin/reset.c b/builtin/reset.c
index fdce6f8c85..376ec95521 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -73,9 +73,11 @@ static int reset_index(const char *ref, const struct object_id *oid, int reset_t
case HARD:
opts.update = 1;
opts.reset = UNPACK_RESET_OVERWRITE_UNTRACKED;
+ opts.skip_cache_tree_update = 1;
break;
case MIXED:
opts.reset = UNPACK_RESET_PROTECT_UNTRACKED;
+ opts.skip_cache_tree_update = 1;
/* but opts.update=0, so working tree not updated */
break;
default:
@@ -481,5 +483,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
if (!pathspec.nr)
remove_branch_state(the_repository, 0);
+ discard_index(&the_index);
+
return update_ref_status;
}
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index 3acd93f71e..d42db0b0cc 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -38,6 +38,7 @@ static const char rev_list_usage[] =
" --tags\n"
" --remotes\n"
" --stdin\n"
+" --exclude-hidden=[receive|uploadpack]\n"
" --quiet\n"
" ordering output:\n"
" --topo-order\n"
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 8f61050bde..334a9d4af2 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -39,7 +39,7 @@ static int abbrev_ref_strict;
static int output_sq;
static int stuck_long;
-static struct string_list *ref_excludes;
+static struct ref_exclusions ref_excludes = REF_EXCLUSIONS_INIT;
/*
* Some arguments are relevant "revision" arguments,
@@ -198,7 +198,7 @@ static int show_default(void)
static int show_reference(const char *refname, const struct object_id *oid,
int flag UNUSED, void *cb_data UNUSED)
{
- if (ref_excluded(ref_excludes, refname))
+ if (ref_excluded(&ref_excludes, refname))
return 0;
show_rev(NORMAL, oid, refname);
return 0;
@@ -530,6 +530,7 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix)
strbuf_addstr(&parsed, " --");
sq_quote_argv(&parsed, argv);
puts(parsed.buf);
+ strbuf_release(&parsed);
return 0;
}
@@ -585,7 +586,7 @@ static void handle_ref_opt(const char *pattern, const char *prefix)
for_each_glob_ref_in(show_reference, pattern, prefix, NULL);
else
for_each_ref_in(prefix, show_reference, NULL);
- clear_ref_exclusion(&ref_excludes);
+ clear_ref_exclusions(&ref_excludes);
}
enum format_type {
@@ -863,7 +864,7 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
}
if (!strcmp(arg, "--all")) {
for_each_ref(show_reference, NULL);
- clear_ref_exclusion(&ref_excludes);
+ clear_ref_exclusions(&ref_excludes);
continue;
}
if (skip_prefix(arg, "--disambiguate=", &arg)) {
@@ -876,10 +877,14 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
continue;
}
if (opt_with_value(arg, "--branches", &arg)) {
+ if (ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --branches"));
handle_ref_opt(arg, "refs/heads/");
continue;
}
if (opt_with_value(arg, "--tags", &arg)) {
+ if (ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --tags"));
handle_ref_opt(arg, "refs/tags/");
continue;
}
@@ -888,6 +893,8 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
continue;
}
if (opt_with_value(arg, "--remotes", &arg)) {
+ if (ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --remotes"));
handle_ref_opt(arg, "refs/remotes/");
continue;
}
@@ -895,6 +902,10 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
add_ref_exclusion(&ref_excludes, arg);
continue;
}
+ if (skip_prefix(arg, "--exclude-hidden=", &arg)) {
+ exclude_hidden_refs(&ref_excludes, arg);
+ continue;
+ }
if (!strcmp(arg, "--show-toplevel")) {
const char *work_tree = get_git_work_tree();
if (work_tree)
diff --git a/builtin/revert.c b/builtin/revert.c
index ee32c714a7..8bc87e4c77 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -221,6 +221,7 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
opts->strategy = xstrdup_or_null(opts->strategy);
if (!opts->strategy && getenv("GIT_TEST_MERGE_ALGORITHM"))
opts->strategy = xstrdup(getenv("GIT_TEST_MERGE_ALGORITHM"));
+ free(options);
if (cmd == 'q') {
int ret = sequencer_remove_state(opts);
@@ -261,6 +262,9 @@ int cmd_cherry_pick(int argc, const char **argv, const char *prefix)
opts.action = REPLAY_PICK;
sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
+ if (opts.revs)
+ release_revisions(opts.revs);
+ free(opts.revs);
if (res < 0)
die(_("cherry-pick failed"));
return res;
diff --git a/builtin/rm.c b/builtin/rm.c
index f0d025a4e2..05bfe20a46 100644
--- a/builtin/rm.c
+++ b/builtin/rm.c
@@ -86,8 +86,7 @@ static void submodules_absorb_gitdir_if_needed(void)
continue;
if (!submodule_uses_gitfile(name))
- absorb_git_dir_into_superproject(name,
- ABSORB_GITDIR_RECURSE_SUBMODULES);
+ absorb_git_dir_into_superproject(name);
}
}
diff --git a/builtin/stash.c b/builtin/stash.c
index bb5485b409..8a64d564a1 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -1686,8 +1686,10 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
}
done:
+ strbuf_release(&patch);
free_stash_info(&info);
strbuf_release(&stash_msg_buf);
+ strbuf_release(&untracked_files);
return ret;
}
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index a7683d3529..28e66a3d70 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -616,6 +616,9 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
int diff_files_result;
struct strbuf buf = STRBUF_INIT;
const char *git_dir;
+ struct setup_revision_opt opt = {
+ .free_removed_argv_elements = 1,
+ };
if (!submodule_from_path(the_repository, null_oid(), path))
die(_("no submodule mapping found in .gitmodules for path '%s'"),
@@ -649,9 +652,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
repo_init_revisions(the_repository, &rev, NULL);
rev.abbrev = 0;
- diff_files_args.nr = setup_revisions(diff_files_args.nr,
- diff_files_args.v,
- &rev, NULL);
+ setup_revisions(diff_files_args.nr, diff_files_args.v, &rev, &opt);
diff_files_result = run_diff_files(&rev, 0);
if (!diff_result_code(&rev.diffopt, diff_files_result)) {
@@ -1378,8 +1379,7 @@ static void deinit_submodule(const char *path, const char *prefix,
".git file by using absorbgitdirs."),
displaypath);
- absorb_git_dir_into_superproject(path,
- ABSORB_GITDIR_RECURSE_SUBMODULES);
+ absorb_git_dir_into_superproject(path);
}
@@ -1503,6 +1503,8 @@ struct module_clone_data {
const char *name;
const char *url;
const char *depth;
+ const char *branch;
+ const char *branch_oid;
struct list_objects_filter_options *filter_options;
unsigned int quiet: 1;
unsigned int progress: 1;
@@ -1692,6 +1694,8 @@ static int clone_submodule(const struct module_clone_data *clone_data,
strvec_push(&cp.args, clone_data->single_branch ?
"--single-branch" :
"--no-single-branch");
+ if (the_repository->settings.submodule_propagate_branches)
+ strvec_push(&cp.args, "--detach");
strvec_push(&cp.args, "--");
strvec_push(&cp.args, clone_data->url);
@@ -1704,6 +1708,21 @@ static int clone_submodule(const struct module_clone_data *clone_data,
if(run_command(&cp))
die(_("clone of '%s' into submodule path '%s' failed"),
clone_data->url, clone_data_path);
+
+ if (clone_data->branch) {
+ struct child_process branch_cp = CHILD_PROCESS_INIT;
+
+ branch_cp.git_cmd = 1;
+ prepare_other_repo_env(&branch_cp.env, sm_gitdir);
+
+ strvec_pushl(&branch_cp.args, "branch",
+ clone_data->branch, clone_data->branch_oid,
+ NULL);
+
+ if (run_command(&branch_cp))
+ die(_("could not create branch '%s' in submodule path '%s'"),
+ clone_data->branch, clone_data_path);
+ }
} else {
char *path;
@@ -1778,6 +1797,12 @@ static int module_clone(int argc, const char **argv, const char *prefix)
N_("disallow cloning into non-empty directory")),
OPT_BOOL(0, "single-branch", &clone_data.single_branch,
N_("clone only one branch, HEAD or --branch")),
+ OPT_STRING(0, "branch", &clone_data.branch,
+ N_("string"),
+ N_("name of branch to be created")),
+ OPT_STRING(0, "branch-oid", &clone_data.branch_oid,
+ N_("object-id"),
+ N_("commit id for new branch")),
OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
@@ -1785,12 +1810,14 @@ static int module_clone(int argc, const char **argv, const char *prefix)
N_("git submodule--helper clone [--prefix=<path>] [--quiet] "
"[--reference <repository>] [--name <name>] [--depth <depth>] "
"[--single-branch] [--filter <filter-spec>] "
+ "[--branch <branch> --branch-oid <oid>]"
"--url <url> --path <path>"),
NULL
};
argc = parse_options(argc, argv, prefix, module_clone_options,
git_submodule_helper_usage, 0);
+ prepare_repo_settings(the_repository);
clone_data.dissociate = !!dissociate;
clone_data.quiet = !!quiet;
@@ -1802,6 +1829,12 @@ static int module_clone(int argc, const char **argv, const char *prefix)
usage_with_options(git_submodule_helper_usage,
module_clone_options);
+ if (!!clone_data.branch != !!clone_data.branch_oid)
+ BUG("--branch and --branch-oid must be set/unset together");
+ if ((clone_data.branch &&
+ !the_repository->settings.submodule_propagate_branches))
+ BUG("--branch is only expected with submodule.propagateBranches");
+
clone_submodule(&clone_data, &reference);
list_objects_filter_release(&filter_options);
string_list_clear(&reference, 1);
@@ -1884,8 +1917,8 @@ static void submodule_update_clone_release(struct submodule_update_clone *suc)
struct update_data {
const char *prefix;
char *displaypath;
+ const char *super_branch;
enum submodule_update_type update_default;
- struct object_id suboid;
struct string_list references;
struct submodule_update_strategy update_strategy;
struct list_objects_filter_options *filter_options;
@@ -2059,6 +2092,11 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
strvec_push(&child->args, suc->update_data->single_branch ?
"--single-branch" :
"--no-single-branch");
+ if (ud->super_branch) {
+ strvec_pushf(&child->args, "--branch=%s", ud->super_branch);
+ strvec_pushf(&child->args, "--branch-oid=%s",
+ oid_to_hex(&ce->oid));
+ }
cleanup:
free(displaypath);
@@ -2222,9 +2260,14 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet,
static int run_update_command(const struct update_data *ud, int subforce)
{
struct child_process cp = CHILD_PROCESS_INIT;
- char *oid = oid_to_hex(&ud->oid);
+ const char *update_target;
int ret;
+ if (ud->update_strategy.type == SM_UPDATE_CHECKOUT && ud->super_branch)
+ update_target = ud->super_branch;
+ else
+ update_target = oid_to_hex(&ud->oid);
+
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
cp.git_cmd = 1;
@@ -2252,7 +2295,7 @@ static int run_update_command(const struct update_data *ud, int subforce)
BUG("unexpected update strategy type: %d",
ud->update_strategy.type);
}
- strvec_push(&cp.args, oid);
+ strvec_push(&cp.args, update_target);
cp.dir = ud->sm_path;
prepare_submodule_repo_env(&cp.env);
@@ -2260,20 +2303,20 @@ static int run_update_command(const struct update_data *ud, int subforce)
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
die_message(_("Unable to checkout '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ update_target, ud->displaypath);
/* No "ret" assignment, use "git checkout"'s */
break;
case SM_UPDATE_REBASE:
ret = die_message(_("Unable to rebase '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ update_target, ud->displaypath);
break;
case SM_UPDATE_MERGE:
ret = die_message(_("Unable to merge '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ update_target, ud->displaypath);
break;
case SM_UPDATE_COMMAND:
ret = die_message(_("Execution of '%s %s' failed in submodule path '%s'"),
- ud->update_strategy.command, oid, ud->displaypath);
+ ud->update_strategy.command, update_target, ud->displaypath);
break;
default:
BUG("unexpected update strategy type: %d",
@@ -2289,19 +2332,19 @@ static int run_update_command(const struct update_data *ud, int subforce)
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
printf(_("Submodule path '%s': checked out '%s'\n"),
- ud->displaypath, oid);
+ ud->displaypath, update_target);
break;
case SM_UPDATE_REBASE:
printf(_("Submodule path '%s': rebased into '%s'\n"),
- ud->displaypath, oid);
+ ud->displaypath, update_target);
break;
case SM_UPDATE_MERGE:
printf(_("Submodule path '%s': merged in '%s'\n"),
- ud->displaypath, oid);
+ ud->displaypath, update_target);
break;
case SM_UPDATE_COMMAND:
printf(_("Submodule path '%s': '%s %s'\n"),
- ud->displaypath, ud->update_strategy.command, oid);
+ ud->displaypath, ud->update_strategy.command, update_target);
break;
default:
BUG("unexpected update strategy type: %d",
@@ -2313,7 +2356,7 @@ static int run_update_command(const struct update_data *ud, int subforce)
static int run_update_procedure(const struct update_data *ud)
{
- int subforce = is_null_oid(&ud->suboid) || ud->force;
+ int subforce = ud->just_cloned || ud->force;
if (!ud->nofetch) {
/*
@@ -2488,7 +2531,10 @@ static void update_data_to_args(const struct update_data *update_data,
static int update_submodule(struct update_data *update_data)
{
+ int submodule_up_to_date;
int ret;
+ struct object_id suboid;
+ const char *submodule_head = NULL;
ret = determine_submodule_update_strategy(the_repository,
update_data->just_cloned,
@@ -2498,9 +2544,9 @@ static int update_submodule(struct update_data *update_data)
if (ret)
return ret;
- if (update_data->just_cloned)
- oidcpy(&update_data->suboid, null_oid());
- else if (resolve_gitlink_ref(update_data->sm_path, "HEAD", &update_data->suboid))
+ if (!update_data->just_cloned &&
+ resolve_gitlink_ref(update_data->sm_path, "HEAD", &suboid,
+ &submodule_head))
return die_message(_("Unable to find current revision in submodule path '%s'"),
update_data->displaypath);
@@ -2527,14 +2573,26 @@ static int update_submodule(struct update_data *update_data)
update_data->sm_path);
}
- if (resolve_gitlink_ref(update_data->sm_path, remote_ref, &update_data->oid))
+ if (resolve_gitlink_ref(update_data->sm_path, remote_ref,
+ &update_data->oid, NULL))
return die_message(_("Unable to find %s revision in submodule path '%s'"),
remote_ref, update_data->sm_path);
free(remote_ref);
}
- if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force) {
+ if (update_data->just_cloned)
+ submodule_up_to_date = 0;
+ else if (update_data->super_branch)
+ /* Check that the submodule's HEAD points to super_branch. */
+ submodule_up_to_date =
+ skip_prefix(submodule_head, "refs/heads/",
+ &submodule_head) &&
+ !strcmp(update_data->super_branch, submodule_head);
+ else
+ submodule_up_to_date = oideq(&update_data->oid, &suboid);
+
+ if (!submodule_up_to_date || update_data->force) {
ret = run_update_procedure(update_data);
if (ret)
return ret;
@@ -2546,7 +2604,6 @@ static int update_submodule(struct update_data *update_data)
next.prefix = NULL;
oidcpy(&next.oid, null_oid());
- oidcpy(&next.suboid, null_oid());
cp.dir = update_data->sm_path;
cp.git_cmd = 1;
@@ -2579,6 +2636,12 @@ static int update_submodules(struct update_data *update_data)
.data = &suc,
};
+ if (the_repository->settings.submodule_propagate_branches) {
+ struct branch *current_branch = branch_get(NULL);
+ if (current_branch)
+ update_data->super_branch = current_branch->name;
+ }
+
suc.update_data = update_data;
run_processes_parallel(&opts);
@@ -2643,9 +2706,6 @@ static int module_update(int argc, const char **argv, const char *prefix)
N_("traverse submodules recursively")),
OPT_BOOL('N', "no-fetch", &opt.nofetch,
N_("don't fetch new objects from the remote site")),
- OPT_STRING(0, "prefix", &opt.prefix,
- N_("path"),
- N_("path into the working tree")),
OPT_SET_INT(0, "checkout", &opt.update_default,
N_("use the 'checkout' update strategy (default)"),
SM_UPDATE_CHECKOUT),
@@ -2691,6 +2751,7 @@ static int module_update(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, module_update_options,
git_submodule_helper_usage, 0);
+ prepare_repo_settings(the_repository);
if (opt.require_init)
opt.init = 1;
@@ -2701,6 +2762,7 @@ static int module_update(int argc, const char **argv, const char *prefix)
}
opt.filter_options = &filter_options;
+ opt.prefix = prefix;
if (opt.update_default)
opt.update_strategy.type = opt.update_default;
@@ -2830,13 +2892,7 @@ static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
int i;
struct pathspec pathspec = { 0 };
struct module_list list = MODULE_LIST_INIT;
- unsigned flags = ABSORB_GITDIR_RECURSE_SUBMODULES;
struct option embed_gitdir_options[] = {
- OPT_STRING(0, "prefix", &prefix,
- N_("path"),
- N_("path into the working tree")),
- OPT_BIT(0, "--recursive", &flags, N_("recurse into submodules"),
- ABSORB_GITDIR_RECURSE_SUBMODULES),
OPT_END()
};
const char *const git_submodule_helper_usage[] = {
@@ -2852,7 +2908,7 @@ static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
goto cleanup;
for (i = 0; i < list.nr; i++)
- absorb_git_dir_into_superproject(list.entries[i]->name, flags);
+ absorb_git_dir_into_superproject(list.entries[i]->name);
ret = 0;
cleanup:
@@ -2861,51 +2917,6 @@ cleanup:
return ret;
}
-static int module_config(int argc, const char **argv, const char *prefix)
-{
- enum {
- CHECK_WRITEABLE = 1,
- DO_UNSET = 2
- } command = 0;
- struct option module_config_options[] = {
- OPT_CMDMODE(0, "check-writeable", &command,
- N_("check if it is safe to write to the .gitmodules file"),
- CHECK_WRITEABLE),
- OPT_CMDMODE(0, "unset", &command,
- N_("unset the config in the .gitmodules file"),
- DO_UNSET),
- OPT_END()
- };
- const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper config <name> [<value>]"),
- N_("git submodule--helper config --unset <name>"),
- "git submodule--helper config --check-writeable",
- NULL
- };
-
- argc = parse_options(argc, argv, prefix, module_config_options,
- git_submodule_helper_usage, PARSE_OPT_KEEP_ARGV0);
-
- if (argc == 1 && command == CHECK_WRITEABLE)
- return is_writing_gitmodules_ok() ? 0 : -1;
-
- /* Equivalent to ACTION_GET in builtin/config.c */
- if (argc == 2 && command != DO_UNSET)
- return print_config_from_gitmodules(the_repository, argv[1]);
-
- /* Equivalent to ACTION_SET in builtin/config.c */
- if (argc == 3 || (argc == 2 && command == DO_UNSET)) {
- const char *value = (argc == 3) ? argv[2] : NULL;
-
- if (!is_writing_gitmodules_ok())
- die(_("please make sure that the .gitmodules file is in the working tree"));
-
- return config_set_in_gitmodules_file_gently(argv[1], value);
- }
-
- usage_with_options(git_submodule_helper_usage, module_config_options);
-}
-
static int module_set_url(int argc, const char **argv, const char *prefix)
{
int quiet = 0;
@@ -3280,7 +3291,7 @@ static void die_on_repo_without_commits(const char *path)
strbuf_addstr(&sb, path);
if (is_nonbare_repository_dir(&sb)) {
struct object_id oid;
- if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ if (resolve_gitlink_ref(path, "HEAD", &oid, NULL) < 0)
die(_("'%s' does not have a commit checked out"), path);
}
strbuf_release(&sb);
@@ -3404,48 +3415,45 @@ cleanup:
return ret;
}
-#define SUPPORT_SUPER_PREFIX (1<<0)
-
-struct cmd_struct {
- const char *cmd;
- int (*fn)(int, const char **, const char *);
- unsigned option;
-};
-
-static struct cmd_struct commands[] = {
- {"clone", module_clone, SUPPORT_SUPER_PREFIX},
- {"add", module_add, 0},
- {"update", module_update, SUPPORT_SUPER_PREFIX},
- {"foreach", module_foreach, SUPPORT_SUPER_PREFIX},
- {"init", module_init, 0},
- {"status", module_status, SUPPORT_SUPER_PREFIX},
- {"sync", module_sync, SUPPORT_SUPER_PREFIX},
- {"deinit", module_deinit, 0},
- {"summary", module_summary, 0},
- {"push-check", push_check, 0},
- {"absorbgitdirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
- {"config", module_config, 0},
- {"set-url", module_set_url, 0},
- {"set-branch", module_set_branch, 0},
- {"create-branch", module_create_branch, 0},
-};
-
int cmd_submodule__helper(int argc, const char **argv, const char *prefix)
{
- int i;
- if (argc < 2 || !strcmp(argv[1], "-h"))
- usage("git submodule--helper <command>");
-
- for (i = 0; i < ARRAY_SIZE(commands); i++) {
- if (!strcmp(argv[1], commands[i].cmd)) {
- if (get_super_prefix() &&
- !(commands[i].option & SUPPORT_SUPER_PREFIX))
- die(_("%s doesn't support --super-prefix"),
- commands[i].cmd);
- return commands[i].fn(argc - 1, argv + 1, prefix);
- }
- }
+ const char *cmd = argv[0];
+ const char *subcmd;
+ parse_opt_subcommand_fn *fn = NULL;
+ const char *const usage[] = {
+ N_("git submodule--helper <command>"),
+ NULL
+ };
+ struct option options[] = {
+ OPT_SUBCOMMAND("clone", &fn, module_clone),
+ OPT_SUBCOMMAND("add", &fn, module_add),
+ OPT_SUBCOMMAND("update", &fn, module_update),
+ OPT_SUBCOMMAND("foreach", &fn, module_foreach),
+ OPT_SUBCOMMAND("init", &fn, module_init),
+ OPT_SUBCOMMAND("status", &fn, module_status),
+ OPT_SUBCOMMAND("sync", &fn, module_sync),
+ OPT_SUBCOMMAND("deinit", &fn, module_deinit),
+ OPT_SUBCOMMAND("summary", &fn, module_summary),
+ OPT_SUBCOMMAND("push-check", &fn, push_check),
+ OPT_SUBCOMMAND("absorbgitdirs", &fn, absorb_git_dirs),
+ OPT_SUBCOMMAND("set-url", &fn, module_set_url),
+ OPT_SUBCOMMAND("set-branch", &fn, module_set_branch),
+ OPT_SUBCOMMAND("create-branch", &fn, module_create_branch),
+ OPT_END()
+ };
+ argc = parse_options(argc, argv, prefix, options, usage, 0);
+ subcmd = argv[0];
+
+ if (strcmp(subcmd, "clone") && strcmp(subcmd, "update") &&
+ strcmp(subcmd, "foreach") && strcmp(subcmd, "status") &&
+ strcmp(subcmd, "sync") && strcmp(subcmd, "absorbgitdirs") &&
+ get_super_prefix())
+ /*
+ * xstrfmt() rather than "%s %s" to keep the translated
+ * string identical to git.c's.
+ */
+ die(_("%s doesn't support --super-prefix"),
+ xstrfmt("'%s %s'", cmd, subcmd));
- die(_("'%s' is not a valid submodule--helper "
- "subcommand"), argv[1]);
+ return fn(argc, argv, prefix);
}
diff --git a/builtin/unpack-file.c b/builtin/unpack-file.c
index 9e8119dd35..88de32b7d7 100644
--- a/builtin/unpack-file.c
+++ b/builtin/unpack-file.c
@@ -19,6 +19,7 @@ static char *create_temp_file(struct object_id *oid)
if (write_in_full(fd, buf, size) < 0)
die_errno("unable to write temp-file");
close(fd);
+ free(buf);
return path;
}
diff --git a/builtin/update-index.c b/builtin/update-index.c
index 7b0c924d7d..e6305656c9 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -339,7 +339,7 @@ static int process_directory(const char *path, int len, struct stat *st)
if (S_ISGITLINK(ce->ce_mode)) {
/* Do nothing to the index if there is no HEAD! */
- if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ if (resolve_gitlink_ref(path, "HEAD", &oid, NULL) < 0)
return 0;
return add_one_path(ce, path, len, st);
@@ -365,7 +365,7 @@ static int process_directory(const char *path, int len, struct stat *st)
}
/* No match - should we add it as a gitlink? */
- if (!resolve_gitlink_ref(path, "HEAD", &oid))
+ if (!resolve_gitlink_ref(path, "HEAD", &oid, NULL))
return add_one_path(NULL, path, len, st);
/* Error out. */
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 4a24d53be1..963d322ee7 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -17,7 +17,7 @@
#define BUILTIN_WORKTREE_ADD_USAGE \
N_("git worktree add [-f] [--detach] [--checkout] [--lock [--reason <string>]]\n" \
- " [-b <new-branch>] <path> [<commit-ish>]")
+ " [[-b | -B | --orphan] <new-branch>] <path> [<commit-ish>]")
#define BUILTIN_WORKTREE_LIST_USAGE \
N_("git worktree list [-v | --porcelain [-z]]")
#define BUILTIN_WORKTREE_LOCK_USAGE \
@@ -90,6 +90,7 @@ struct add_opts {
int detach;
int quiet;
int checkout;
+ const char *orphan_branch;
const char *keep_locked;
};
@@ -364,6 +365,24 @@ static int checkout_worktree(const struct add_opts *opts,
return run_command(&cp);
}
+static int make_worktree_orphan(const struct add_opts *opts,
+ struct strvec *child_env)
+{
+ int ret;
+ struct strbuf symref = STRBUF_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ cp.git_cmd = 1;
+
+ validate_new_branchname(opts->orphan_branch, &symref, 0);
+ strvec_pushl(&cp.args, "symbolic-ref", "HEAD", symref.buf, NULL);
+ if (opts->quiet)
+ strvec_push(&cp.args, "--quiet");
+ strvec_pushv(&cp.env, child_env->v);
+ ret = run_command(&cp);
+ strbuf_release(&symref);
+ return ret;
+}
+
static int add_worktree(const char *path, const char *refname,
const struct add_opts *opts)
{
@@ -393,7 +412,7 @@ static int add_worktree(const char *path, const char *refname,
die_if_checked_out(symref.buf, 0);
}
commit = lookup_commit_reference_by_name(refname);
- if (!commit)
+ if (!commit && !opts->orphan_branch)
die(_("invalid reference: %s"), refname);
name = worktree_basename(path, &len);
@@ -482,10 +501,10 @@ static int add_worktree(const char *path, const char *refname,
strvec_pushf(&child_env, "%s=%s", GIT_WORK_TREE_ENVIRONMENT, path);
cp.git_cmd = 1;
- if (!is_branch)
+ if (!is_branch && commit) {
strvec_pushl(&cp.args, "update-ref", "HEAD",
oid_to_hex(&commit->object.oid), NULL);
- else {
+ } else {
strvec_pushl(&cp.args, "symbolic-ref", "HEAD",
symref.buf, NULL);
if (opts->quiet)
@@ -497,6 +516,10 @@ static int add_worktree(const char *path, const char *refname,
if (ret)
goto done;
+ if (opts->orphan_branch &&
+ (ret = make_worktree_orphan(opts, &child_env)))
+ goto done;
+
if (opts->checkout &&
(ret = checkout_worktree(opts, &child_env)))
goto done;
@@ -516,7 +539,7 @@ done:
* Hook failure does not warrant worktree deletion, so run hook after
* is_junk is cleared, but do return appropriate code when hook fails.
*/
- if (!ret && opts->checkout) {
+ if (!ret && opts->checkout && !opts->orphan_branch) {
struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
strvec_pushl(&opt.env, "GIT_DIR", "GIT_WORK_TREE", NULL);
@@ -616,6 +639,8 @@ static int add(int ac, const char **av, const char *prefix)
N_("create a new branch")),
OPT_STRING('B', NULL, &new_branch_force, N_("branch"),
N_("create or reset a branch")),
+ OPT_STRING(0, "orphan", &opts.orphan_branch, N_("branch"),
+ N_("new unparented branch")),
OPT_BOOL('d', "detach", &opts.detach, N_("detach HEAD at named commit")),
OPT_BOOL(0, "checkout", &opts.checkout, N_("populate the new working tree")),
OPT_BOOL(0, "lock", &keep_locked, N_("keep the new working tree locked")),
@@ -629,12 +654,21 @@ static int add(int ac, const char **av, const char *prefix)
N_("try to match the new branch name with a remote-tracking branch")),
OPT_END()
};
+ int ret;
memset(&opts, 0, sizeof(opts));
opts.checkout = 1;
ac = parse_options(ac, av, prefix, options, git_worktree_add_usage, 0);
if (!!opts.detach + !!new_branch + !!new_branch_force > 1)
die(_("options '%s', '%s', and '%s' cannot be used together"), "-b", "-B", "--detach");
+ if (!!opts.detach + !!new_branch + !!new_branch_force + !!opts.orphan_branch > 1)
+ die(_("options '%s', '%s', '%s', and '%s' cannot be used together"),
+ "-b", "-B", "--orphan", "--detach");
+ if (opts.orphan_branch && opt_track)
+ die(_("'%s' and '%s' cannot be used together"), "--orphan", "--track");
+ if (opts.orphan_branch && !opts.checkout)
+ die(_("'%s' and '%s' cannot be used together"), "--orphan",
+ "--no-checkout");
if (lock_reason && !keep_locked)
die(_("the option '%s' requires '%s'"), "--reason", "--lock");
if (lock_reason)
@@ -651,6 +685,13 @@ static int add(int ac, const char **av, const char *prefix)
if (!strcmp(branch, "-"))
branch = "@{-1}";
+ /*
+ * When creating a new branch, new_branch now contains the branch to
+ * create.
+ *
+ * Past this point, new_branch_force can be treated solely as a
+ * boolean flag to indicate whether `-B` was selected.
+ */
if (new_branch_force) {
struct strbuf symref = STRBUF_INIT;
@@ -663,7 +704,17 @@ static int add(int ac, const char **av, const char *prefix)
strbuf_release(&symref);
}
- if (ac < 2 && !new_branch && !opts.detach) {
+ /*
+ * As the orphan cannot be created until the contents of branch
+ * are staged, opts.orphan_branch should be treated as both a boolean
+ * indicating that `--orphan` was selected and as the name of the new
+ * orphan branch from this point on.
+ */
+ if (opts.orphan_branch) {
+ new_branch = opts.orphan_branch;
+ }
+
+ if (ac < 2 && !new_branch && !opts.detach && !opts.orphan_branch) {
const char *s = dwim_branch(path, &new_branch);
if (s)
branch = s;
@@ -686,7 +737,7 @@ static int add(int ac, const char **av, const char *prefix)
if (!opts.quiet)
print_preparing_worktree_line(opts.detach, branch, new_branch, !!new_branch_force);
- if (new_branch) {
+ if (new_branch && !opts.orphan_branch) {
struct child_process cp = CHILD_PROCESS_INIT;
cp.git_cmd = 1;
strvec_push(&cp.args, "branch");
@@ -705,9 +756,9 @@ static int add(int ac, const char **av, const char *prefix)
die(_("--[no-]track can only be used if a new branch is created"));
}
- UNLEAK(path);
- UNLEAK(opts);
- return add_worktree(path, branch, &opts);
+ ret = add_worktree(path, branch, &opts);
+ free(path);
+ return ret;
}
static void show_worktree_porcelain(struct worktree *wt, int line_terminator)
diff --git a/cache.h b/cache.h
index 26ed03bd6d..627d584a9e 100644
--- a/cache.h
+++ b/cache.h
@@ -505,6 +505,7 @@ static inline enum object_type object_type(unsigned int mode)
#define GIT_WORK_TREE_ENVIRONMENT "GIT_WORK_TREE"
#define GIT_PREFIX_ENVIRONMENT "GIT_PREFIX"
#define GIT_SUPER_PREFIX_ENVIRONMENT "GIT_INTERNAL_SUPER_PREFIX"
+#define GIT_SUBMODULE_PROPAGATE_BRANCHES_ENVIRONMENT "GIT_INTERNAL_SUBMODULE_PROPAGATE_BRANCHES"
#define DEFAULT_GIT_DIR_ENVIRONMENT ".git"
#define DB_ENVIRONMENT "GIT_OBJECT_DIRECTORY"
#define INDEX_ENVIRONMENT "GIT_INDEX_FILE"
@@ -1155,6 +1156,8 @@ struct repository_format {
int hash_algo;
int sparse_index;
char *work_tree;
+ int ref_format_count;
+ enum ref_format_flags ref_format;
struct string_list unknown_extensions;
struct string_list v1_only_extensions;
};
diff --git a/chunk-format.c b/chunk-format.c
index 0275b74a89..e836a121c5 100644
--- a/chunk-format.c
+++ b/chunk-format.c
@@ -13,6 +13,7 @@ struct chunk_info {
chunk_write_fn write_fn;
const void *start;
+ off_t offset;
};
struct chunkfile {
@@ -56,38 +57,59 @@ void add_chunk(struct chunkfile *cf,
cf->chunks_nr++;
}
-int write_chunkfile(struct chunkfile *cf, void *data)
+int write_chunkfile(struct chunkfile *cf,
+ enum chunkfile_flags flags,
+ void *data)
{
int i, result = 0;
- uint64_t cur_offset = hashfile_total(cf->f);
trace2_region_enter("chunkfile", "write", the_repository);
- /* Add the table of contents to the current offset */
- cur_offset += (cf->chunks_nr + 1) * CHUNK_TOC_ENTRY_SIZE;
+ if (!(flags & CHUNKFILE_TRAILING_TOC)) {
+ uint64_t cur_offset = hashfile_total(cf->f);
- for (i = 0; i < cf->chunks_nr; i++) {
- hashwrite_be32(cf->f, cf->chunks[i].id);
- hashwrite_be64(cf->f, cur_offset);
+ /* Add the table of contents to the current offset */
+ cur_offset += (cf->chunks_nr + 1) * CHUNK_TOC_ENTRY_SIZE;
- cur_offset += cf->chunks[i].size;
- }
+ for (i = 0; i < cf->chunks_nr; i++) {
+ hashwrite_be32(cf->f, cf->chunks[i].id);
+ hashwrite_be64(cf->f, cur_offset);
+
+ cur_offset += cf->chunks[i].size;
+ }
- /* Trailing entry marks the end of the chunks */
- hashwrite_be32(cf->f, 0);
- hashwrite_be64(cf->f, cur_offset);
+ /* Trailing entry marks the end of the chunks */
+ hashwrite_be32(cf->f, 0);
+ hashwrite_be64(cf->f, cur_offset);
+ }
for (i = 0; i < cf->chunks_nr; i++) {
- off_t start_offset = hashfile_total(cf->f);
+ cf->chunks[i].offset = hashfile_total(cf->f);
result = cf->chunks[i].write_fn(cf->f, data);
if (result)
goto cleanup;
- if (hashfile_total(cf->f) - start_offset != cf->chunks[i].size)
- BUG("expected to write %"PRId64" bytes to chunk %"PRIx32", but wrote %"PRId64" instead",
- cf->chunks[i].size, cf->chunks[i].id,
- hashfile_total(cf->f) - start_offset);
+ if (!(flags & CHUNKFILE_TRAILING_TOC)) {
+ if (hashfile_total(cf->f) - cf->chunks[i].offset != cf->chunks[i].size)
+ BUG("expected to write %"PRId64" bytes to chunk %"PRIx32", but wrote %"PRId64" instead",
+ cf->chunks[i].size, cf->chunks[i].id,
+ hashfile_total(cf->f) - cf->chunks[i].offset);
+ }
+
+ cf->chunks[i].size = hashfile_total(cf->f) - cf->chunks[i].offset;
+ }
+
+ if (flags & CHUNKFILE_TRAILING_TOC) {
+ size_t last_chunk_tail = hashfile_total(cf->f);
+ /* First entry marks the end of the chunks */
+ hashwrite_be32(cf->f, 0);
+ hashwrite_be64(cf->f, last_chunk_tail);
+
+ for (i = cf->chunks_nr - 1; i >= 0; i--) {
+ hashwrite_be32(cf->f, cf->chunks[i].id);
+ hashwrite_be64(cf->f, cf->chunks[i].offset);
+ }
}
cleanup:
@@ -151,6 +173,59 @@ int read_table_of_contents(struct chunkfile *cf,
return 0;
}
+int read_trailing_table_of_contents(struct chunkfile *cf,
+ const unsigned char *mfile,
+ size_t mfile_size)
+{
+ int i;
+ uint32_t chunk_id;
+ const unsigned char *table_of_contents = mfile + mfile_size - the_hash_algo->rawsz;
+
+ while (1) {
+ uint64_t chunk_offset;
+
+ table_of_contents -= CHUNK_TOC_ENTRY_SIZE;
+
+ chunk_id = get_be32(table_of_contents);
+ chunk_offset = get_be64(table_of_contents + 4);
+
+ /* Calculate the previous chunk size, if it exists. */
+ if (cf->chunks_nr) {
+ off_t previous_offset = cf->chunks[cf->chunks_nr - 1].offset;
+
+ if (chunk_offset < previous_offset ||
+ chunk_offset > table_of_contents - mfile) {
+ error(_("improper chunk offset(s) %"PRIx64" and %"PRIx64""),
+ previous_offset, chunk_offset);
+ return -1;
+ }
+
+ cf->chunks[cf->chunks_nr - 1].size = chunk_offset - previous_offset;
+ }
+
+ /* Stop at the null chunk. We only need it for the last size. */
+ if (!chunk_id)
+ break;
+
+ for (i = 0; i < cf->chunks_nr; i++) {
+ if (cf->chunks[i].id == chunk_id) {
+ error(_("duplicate chunk ID %"PRIx32" found"),
+ chunk_id);
+ return -1;
+ }
+ }
+
+ ALLOC_GROW(cf->chunks, cf->chunks_nr + 1, cf->chunks_alloc);
+
+ cf->chunks[cf->chunks_nr].id = chunk_id;
+ cf->chunks[cf->chunks_nr].start = mfile + chunk_offset;
+ cf->chunks[cf->chunks_nr].offset = chunk_offset;
+ cf->chunks_nr++;
+ }
+
+ return 0;
+}
+
static int pair_chunk_fn(const unsigned char *chunk_start,
size_t chunk_size,
void *data)
diff --git a/chunk-format.h b/chunk-format.h
index 7885aa0848..acb8dfbce8 100644
--- a/chunk-format.h
+++ b/chunk-format.h
@@ -31,7 +31,14 @@ void add_chunk(struct chunkfile *cf,
uint32_t id,
size_t size,
chunk_write_fn fn);
-int write_chunkfile(struct chunkfile *cf, void *data);
+
+enum chunkfile_flags {
+ CHUNKFILE_TRAILING_TOC = (1 << 0),
+};
+
+int write_chunkfile(struct chunkfile *cf,
+ enum chunkfile_flags flags,
+ void *data);
int read_table_of_contents(struct chunkfile *cf,
const unsigned char *mfile,
@@ -39,6 +46,15 @@ int read_table_of_contents(struct chunkfile *cf,
uint64_t toc_offset,
int toc_length);
+/**
+ * Read the given chunkfile, but read the table of contents from the
+ * end of the given mfile. The file is expected to be a hashfile with
+ * the_hash_file->rawsz bytes at the end storing the hash.
+ */
+int read_trailing_table_of_contents(struct chunkfile *cf,
+ const unsigned char *mfile,
+ size_t mfile_size);
+
#define CHUNK_NOT_FOUND (-2)
/*
diff --git a/ci/lib.sh b/ci/lib.sh
index 1808e3b1ce..24d20a5d64 100755
--- a/ci/lib.sh
+++ b/ci/lib.sh
@@ -260,7 +260,7 @@ macos-latest)
else
MAKEFLAGS="$MAKEFLAGS PYTHON_PATH=$(which python2)"
MAKEFLAGS="$MAKEFLAGS NO_APPLE_COMMON_CRYPTO=NoThanks"
- MAKEFLAGS="$MAKEFLAGS DC_SHA1=YesPlease NO_OPENSSL=NoThanks"
+ MAKEFLAGS="$MAKEFLAGS NO_OPENSSL=NoThanks"
fi
;;
esac
diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh
index 8ebff42596..32eb280da0 100755
--- a/ci/run-build-and-tests.sh
+++ b/ci/run-build-and-tests.sh
@@ -30,6 +30,7 @@ linux-TEST-vars)
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=master
export GIT_TEST_WRITE_REV_INDEX=1
export GIT_TEST_CHECKOUT_WORKERS=2
+ export GIT_TEST_PACKED_REFS_VERSION=2
;;
linux-clang)
export GIT_TEST_DEFAULT_HASH=sha1
@@ -45,10 +46,19 @@ pedantic)
;;
esac
-group Build make
+mc=
+if test "$jobname" = "linux-cmake-ctest"
+then
+ cb=contrib/buildsystems
+ group CMake cmake -S "$cb" -B "$cb/out"
+ mc="-C $cb/out"
+fi
+
+group Build make $mc
+
if test -n "$run_tests"
then
- group "Run tests" make test ||
+ group "Run tests" make $mc test ||
handle_failed_tests
fi
check_unignored_build_artifacts
diff --git a/combine-diff.c b/combine-diff.c
index b0ece95480..88efcaeefa 100644
--- a/combine-diff.c
+++ b/combine-diff.c
@@ -1060,7 +1060,8 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent,
elem->mode = canon_mode(st.st_mode);
} else if (S_ISDIR(st.st_mode)) {
struct object_id oid;
- if (resolve_gitlink_ref(elem->path, "HEAD", &oid) < 0)
+ if (resolve_gitlink_ref(elem->path, "HEAD", &oid,
+ NULL) < 0)
result = grab_blob(opt->repo, &elem->oid,
elem->mode, &result_size,
NULL, NULL);
diff --git a/commit-graph.c b/commit-graph.c
index a7d8755932..c927b81250 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -1932,7 +1932,7 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
get_num_chunks(cf) * ctx->commits.nr);
}
- write_chunkfile(cf, ctx);
+ write_chunkfile(cf, 0, ctx);
stop_progress(&ctx->progress);
strbuf_release(&progress_title);
diff --git a/compat/fsmonitor/fsm-listen-darwin.c b/compat/fsmonitor/fsm-listen-darwin.c
index daeee4e465..cc9af1e3cb 100644
--- a/compat/fsmonitor/fsm-listen-darwin.c
+++ b/compat/fsmonitor/fsm-listen-darwin.c
@@ -336,7 +336,7 @@ static void fsevent_callback(ConstFSEventStreamRef streamRef,
* know how much to invalidate/refresh.
*/
- if (event_flags[k] & kFSEventStreamEventFlagItemIsFile) {
+ if (event_flags[k] & (kFSEventStreamEventFlagItemIsFile | kFSEventStreamEventFlagItemIsSymlink)) {
const char *rel = path_k +
state->path_worktree_watch.len + 1;
diff --git a/config.c b/config.c
index c058b2c70c..27f38283ad 100644
--- a/config.c
+++ b/config.c
@@ -1160,21 +1160,26 @@ static int git_parse_signed(const char *value, intmax_t *ret, intmax_t max)
if (value && *value) {
char *end;
intmax_t val;
- uintmax_t uval;
- uintmax_t factor;
+ intmax_t factor;
+
+ if (max < 0)
+ BUG("max must be a positive integer");
errno = 0;
val = strtoimax(value, &end, 0);
if (errno == ERANGE)
return 0;
+ if (end == value) {
+ errno = EINVAL;
+ return 0;
+ }
factor = get_unit_factor(end);
if (!factor) {
errno = EINVAL;
return 0;
}
- uval = val < 0 ? -val : val;
- if (unsigned_mult_overflows(factor, uval) ||
- factor * uval > max) {
+ if ((val < 0 && -max / factor > val) ||
+ (val > 0 && max / factor < val)) {
errno = ERANGE;
return 0;
}
@@ -1193,10 +1198,19 @@ static int git_parse_unsigned(const char *value, uintmax_t *ret, uintmax_t max)
uintmax_t val;
uintmax_t factor;
+ /* negative values would be accepted by strtoumax */
+ if (strchr(value, '-')) {
+ errno = EINVAL;
+ return 0;
+ }
errno = 0;
val = strtoumax(value, &end, 0);
if (errno == ERANGE)
return 0;
+ if (end == value) {
+ errno = EINVAL;
+ return 0;
+ }
factor = get_unit_factor(end);
if (!factor) {
errno = EINVAL;
diff --git a/connected.c b/connected.c
index 74a20cb32e..b90fd61790 100644
--- a/connected.c
+++ b/connected.c
@@ -85,6 +85,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
promisor_pack_found:
;
} while ((oid = fn(cb_data)) != NULL);
+ free(new_pack);
return 0;
}
@@ -100,6 +101,9 @@ no_promisor_pack_found:
strvec_push(&rev_list.args, "--exclude-promisor-objects");
if (!opt->is_deepening_fetch) {
strvec_push(&rev_list.args, "--not");
+ if (opt->exclude_hidden_refs_section)
+ strvec_pushf(&rev_list.args, "--exclude-hidden=%s",
+ opt->exclude_hidden_refs_section);
strvec_push(&rev_list.args, "--all");
}
strvec_push(&rev_list.args, "--quiet");
@@ -118,8 +122,10 @@ no_promisor_pack_found:
else
rev_list.no_stderr = opt->quiet;
- if (start_command(&rev_list))
+ if (start_command(&rev_list)) {
+ free(new_pack);
return error(_("Could not run 'git rev-list'"));
+ }
sigchain_push(SIGPIPE, SIG_IGN);
@@ -151,5 +157,6 @@ no_promisor_pack_found:
err = error_errno(_("failed to close rev-list's stdin"));
sigchain_pop(SIGPIPE);
+ free(new_pack);
return finish_command(&rev_list) || err;
}
diff --git a/connected.h b/connected.h
index 6e59c92aa3..16b2c84f2e 100644
--- a/connected.h
+++ b/connected.h
@@ -46,6 +46,13 @@ struct check_connected_options {
* during a fetch.
*/
unsigned is_deepening_fetch : 1;
+
+ /*
+ * If not NULL, use `--exclude-hidden=$section` to exclude all refs
+ * hidden via the `$section.hideRefs` config from the set of
+ * already-reachable refs.
+ */
+ const char *exclude_hidden_refs_section;
};
#define CHECK_CONNECTED_INIT { 0 }
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index 3957e4cf8c..ea1bd9602e 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -4,25 +4,67 @@
#[[
-Instructions how to use this in Visual Studio:
+== Overview ==
+
+The top-level Makefile is Git's primary build environment, and a lot
+of things are missing (and probably always will be) from this CMake
+alternative.
+
+The primary use-case for maintaining this CMake build recipe is to
+have nicer IDE integration on Windows.
+
+== Creating a build recipe ==
+
+The "cmake" command creates a build file from this recipe:
+
+ cmake -S contrib/buildsystems -B contrib/buildsystems/out -DCMAKE_BUILD_TYPE=Release
+
+Running this will create files in the contrib/buildsystems/out
+directory (our top-level .gitignore file knows to ignore contents of
+this directory).
+
+See "cmake options" below for a discussion of
+"-DCMAKE_BUILD_TYPE=Release" and other options to "cmake".
+
+== Building with Visual Visual Studio ==
+
+To use this in Visual Studio:
Open the worktree as a folder. Visual Studio 2019 and later will detect
the CMake configuration automatically and set everything up for you,
-ready to build. You can then run the tests in `t/` via a regular Git Bash.
+ready to build. See "== Running the tests ==" below for running the tests.
Note: Visual Studio also has the option of opening `CMakeLists.txt`
directly; Using this option, Visual Studio will not find the source code,
though, therefore the `File>Open>Folder...` option is preferred.
-Instructions to run CMake manually:
+By default CMake will install vcpkg locally to your source tree on configuration,
+to avoid this, add `-DNO_VCPKG=TRUE` to the command line when configuring.
- mkdir -p contrib/buildsystems/out
- cd contrib/buildsystems/out
- cmake ../ -DCMAKE_BUILD_TYPE=Release
+== Building on Windows without Visual Studio ==
-This will build the git binaries in contrib/buildsystems/out
-directory (our top-level .gitignore file knows to ignore contents of
-this directory).
+Open contrib/buildsystems/git.sln and build Git. Or use the "msbuild"
+command-line tool (see our own ".github/workflows/main.yml" for a real
+example):
+
+ msbuild git.sln
+
+== Building on *nix ==
+
+On all other platforms running "cmake" will generate a Makefile; to
+build with it run:
+
+ make -C contrib/buildsystems/out
+
+It's also possible to use other generators, e.g. Ninja has arguably
+slightly better output. Add "-G Ninja" to the cmake command above,
+then:
+
+ ninja -C contrib/buildsystems/out
+
+== cmake options ==
+
+=== -DCMAKE_BUILD_TYPE=<type> ===
Possible build configurations(-DCMAKE_BUILD_TYPE) with corresponding
compiler flags
@@ -35,17 +77,36 @@ empty(default) :
NOTE: -DCMAKE_BUILD_TYPE is optional. For multi-config generators like Visual Studio
this option is ignored
-This process generates a Makefile(Linux/*BSD/MacOS) , Visual Studio solution(Windows) by default.
-Run `make` to build Git on Linux/*BSD/MacOS.
-Open git.sln on Windows and build Git.
+== Running the tests ==
-NOTE: By default CMake uses Makefile as the build tool on Linux and Visual Studio in Windows,
-to use another tool say `ninja` add this to the command line when configuring.
-`-G Ninja`
+Once we've built in "contrib/buildsystems/out" the tests can be run at
+the top-level (note: not the generated "contrib/buildsystems/out/t/"
+drectory). If no top-level build is found (as created with the
+Makefile) the t/test-lib.sh will discover the git in
+"contrib/buildsystems/out" on e.g.:
-NOTE: By default CMake will install vcpkg locally to your source tree on configuration,
-to avoid this, add `-DNO_VCPKG=TRUE` to the command line when configuring.
+ (cd t && ./t0001-init.sh)
+ setup: had no ../git, but found & used cmake built git in ../contrib/buildsystems/out/git
+ [...]
+The tests can also be run with ctest, e.g. after building with "cmake"
+and "make" or "msbuild" run, from the top-level e.g.:
+
+ # "--test-dir" is new in cmake v3.20, so "(cd
+ # contrib/buildsystems/out && ctest ...)" on older versions.
+ ctest --test-dir contrib/buildsystems/out --jobs="$(nproc)"--output-on-failure
+
+Options can be passed by setting GIT_TEST_OPTIONS before invoking
+cmake. E.g. on a Linux system with systemd the tests can be sped up by
+using a ramdisk for the scratch files:
+
+ GIT_TEST_OPTS="--root=/dev/shm/$(id -u)/ctest" cmake -S contrib/buildsystems -B contrib/buildsystems/out
+ [...]
+ -- Using user-selected test options: --root=/dev/shm/<uid>/ctest
+
+Then running the tests with "ctest" (here with --jobs="$(nproc)"):
+
+ ctest --jobs=$(nproc) --test-dir contrib/buildsystems/out
]]
cmake_minimum_required(VERSION 3.14)
@@ -77,10 +138,17 @@ if(USE_VCPKG)
set(CMAKE_TOOLCHAIN_FILE ${VCPKG_DIR}/scripts/buildsystems/vcpkg.cmake CACHE STRING "Vcpkg toolchain file")
endif()
-find_program(SH_EXE sh PATHS "C:/Program Files/Git/bin" "$ENV{LOCALAPPDATA}/Programs/Git/bin")
-if(NOT SH_EXE)
- message(FATAL_ERROR "sh: shell interpreter was not found in your path, please install one."
- "On Windows, you can get it as part of 'Git for Windows' install at https://gitforwindows.org/")
+if(WIN32)
+ find_program(SH_EXE sh PATHS "C:/Program Files/Git/bin" "$ENV{LOCALAPPDATA}/Programs/Git/bin")
+ if(NOT SH_EXE)
+ message(FATAL_ERROR "sh: shell interpreter was not found in your path, please install one."
+ "You can get it as part of 'Git for Windows' install at https://gitforwindows.org/")
+ endif()
+else()
+ find_program(SH_EXE sh)
+ if(NOT SH_EXE)
+ message(FATAL_ERROR "cannot find 'sh' in '$PATH'")
+ endif()
endif()
#Create GIT-VERSION-FILE using GIT-VERSION-GEN
@@ -810,6 +878,19 @@ add_custom_command(OUTPUT ${git_links} ${git_http_links}
DEPENDS git git-remote-http)
add_custom_target(git-links ALL DEPENDS ${git_links} ${git_http_links})
+function(write_script path content)
+ file(WRITE ${path} ${content})
+
+ if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" VERSION_GREATER_EQUAL "3.19")
+ file(CHMOD ${path} FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE)
+ else()
+ execute_process(COMMAND chmod +x ${path}
+ RESULT_VARIABLE CHILD_ERROR)
+ if(CHILD_ERROR)
+ message(FATAL_ERROR "failed to chmod +x '${path}': '${CHILD_ERROR}'")
+ endif()
+ endif()
+endfunction()
#creating required scripts
set(SHELL_PATH /bin/sh)
@@ -835,7 +916,7 @@ foreach(script ${git_shell_scripts})
string(REPLACE "# @@BROKEN_PATH_FIX@@" "" content "${content}")
string(REPLACE "@@PERL@@" "${PERL_PATH}" content "${content}")
string(REPLACE "@@PAGER_ENV@@" "LESS=FRX LV=-c" content "${content}")
- file(WRITE ${CMAKE_BINARY_DIR}/${script} ${content})
+ write_script(${CMAKE_BINARY_DIR}/${script} "${content}")
endforeach()
#perl scripts
@@ -850,13 +931,14 @@ foreach(script ${git_perl_scripts})
file(STRINGS ${CMAKE_SOURCE_DIR}/${script}.perl content NEWLINE_CONSUME)
string(REPLACE "#!/usr/bin/perl" "#!/usr/bin/perl\n${perl_header}\n" content "${content}")
string(REPLACE "@@GIT_VERSION@@" "${PROJECT_VERSION}" content "${content}")
- file(WRITE ${CMAKE_BINARY_DIR}/${script} ${content})
+ write_script(${CMAKE_BINARY_DIR}/${script} "${content}")
endforeach()
#python script
file(STRINGS ${CMAKE_SOURCE_DIR}/git-p4.py content NEWLINE_CONSUME)
string(REPLACE "#!/usr/bin/env python" "#!/usr/bin/python" content "${content}")
-file(WRITE ${CMAKE_BINARY_DIR}/git-p4 ${content})
+write_script(${CMAKE_BINARY_DIR}/git-p4 "${content}")
+file(COPY ${CMAKE_SOURCE_DIR}/git-p4.py DESTINATION ${CMAKE_BINARY_DIR}/)
#perl modules
file(GLOB_RECURSE perl_modules "${CMAKE_SOURCE_DIR}/perl/*.pm")
@@ -897,7 +979,7 @@ if(MSGFMT_EXE)
foreach(po ${po_files})
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/po/build/locale/${po}/LC_MESSAGES)
add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/po/build/locale/${po}/LC_MESSAGES/git.mo
- COMMAND ${MSGFMT_EXE} --check --statistics -o ${CMAKE_BINARY_DIR}/po/build/locale/${po}/LC_MESSAGES/git.mo ${CMAKE_SOURCE_DIR}/po/${po}.po)
+ COMMAND ${MSGFMT_EXE} --check -o ${CMAKE_BINARY_DIR}/po/build/locale/${po}/LC_MESSAGES/git.mo ${CMAKE_SOURCE_DIR}/po/${po}.po)
list(APPEND po_gen ${CMAKE_BINARY_DIR}/po/build/locale/${po}/LC_MESSAGES/git.mo)
endforeach()
add_custom_target(po-gen ALL DEPENDS ${po_gen})
@@ -994,20 +1076,20 @@ foreach(script ${wrapper_scripts})
file(STRINGS ${CMAKE_SOURCE_DIR}/wrap-for-bin.sh content NEWLINE_CONSUME)
string(REPLACE "@@BUILD_DIR@@" "${CMAKE_BINARY_DIR}" content "${content}")
string(REPLACE "@@PROG@@" "${script}${EXE_EXTENSION}" content "${content}")
- file(WRITE ${CMAKE_BINARY_DIR}/bin-wrappers/${script} ${content})
+ write_script(${CMAKE_BINARY_DIR}/bin-wrappers/${script} "${content}")
endforeach()
foreach(script ${wrapper_test_scripts})
file(STRINGS ${CMAKE_SOURCE_DIR}/wrap-for-bin.sh content NEWLINE_CONSUME)
string(REPLACE "@@BUILD_DIR@@" "${CMAKE_BINARY_DIR}" content "${content}")
string(REPLACE "@@PROG@@" "t/helper/${script}${EXE_EXTENSION}" content "${content}")
- file(WRITE ${CMAKE_BINARY_DIR}/bin-wrappers/${script} ${content})
+ write_script(${CMAKE_BINARY_DIR}/bin-wrappers/${script} "${content}")
endforeach()
file(STRINGS ${CMAKE_SOURCE_DIR}/wrap-for-bin.sh content NEWLINE_CONSUME)
string(REPLACE "@@BUILD_DIR@@" "${CMAKE_BINARY_DIR}" content "${content}")
string(REPLACE "@@PROG@@" "git-cvsserver" content "${content}")
-file(WRITE ${CMAKE_BINARY_DIR}/bin-wrappers/git-cvsserver ${content})
+write_script(${CMAKE_BINARY_DIR}/bin-wrappers/git-cvsserver "${content}")
#options for configuring test options
option(PERL_TESTS "Perform tests that use perl" ON)
@@ -1025,7 +1107,6 @@ set(NO_PERL )
set(NO_PTHREADS )
set(NO_PYTHON )
set(PAGER_ENV "LESS=FRX LV=-c")
-set(DC_SHA1 YesPlease)
set(RUNTIME_PREFIX true)
set(NO_GETTEXT )
@@ -1057,11 +1138,13 @@ file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "PYTHON_PATH='${PYTHON_PATH}'\
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "TAR='${TAR}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_CURL='${NO_CURL}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_EXPAT='${NO_EXPAT}'\n")
+if(PCRE2_FOUND)
+ file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "USE_LIBPCRE2='YesPlease'\n")
+endif()
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_PERL='${NO_PERL}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_PTHREADS='${NO_PTHREADS}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_UNIX_SOCKETS='${NO_UNIX_SOCKETS}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "PAGER_ENV='${PAGER_ENV}'\n")
-file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "DC_SHA1='${DC_SHA1}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "X='${EXE_EXTENSION}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "NO_GETTEXT='${NO_GETTEXT}'\n")
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "RUNTIME_PREFIX='${RUNTIME_PREFIX}'\n")
@@ -1071,32 +1154,36 @@ if(USE_VCPKG)
file(APPEND ${CMAKE_BINARY_DIR}/GIT-BUILD-OPTIONS "PATH=\"$PATH:$TEST_DIRECTORY/../compat/vcbuild/vcpkg/installed/x64-windows/bin\"\n")
endif()
-#Make the tests work when building out of the source tree
-get_filename_component(CACHE_PATH ${CMAKE_CURRENT_LIST_DIR}/../../CMakeCache.txt ABSOLUTE)
-if(NOT ${CMAKE_BINARY_DIR}/CMakeCache.txt STREQUAL ${CACHE_PATH})
- #Setting the build directory in test-lib.sh before running tests
- file(WRITE ${CMAKE_BINARY_DIR}/CTestCustom.cmake
- "file(WRITE ${CMAKE_SOURCE_DIR}/GIT-BUILD-DIR \"${CMAKE_BINARY_DIR}\")")
- #misc copies
- file(COPY ${CMAKE_SOURCE_DIR}/t/chainlint.pl DESTINATION ${CMAKE_BINARY_DIR}/t/)
- file(COPY ${CMAKE_SOURCE_DIR}/po/is.po DESTINATION ${CMAKE_BINARY_DIR}/po/)
- file(GLOB mergetools "${CMAKE_SOURCE_DIR}/mergetools/*")
- file(COPY ${mergetools} DESTINATION ${CMAKE_BINARY_DIR}/mergetools/)
- file(COPY ${CMAKE_SOURCE_DIR}/contrib/completion/git-prompt.sh DESTINATION ${CMAKE_BINARY_DIR}/contrib/completion/)
- file(COPY ${CMAKE_SOURCE_DIR}/contrib/completion/git-completion.bash DESTINATION ${CMAKE_BINARY_DIR}/contrib/completion/)
-endif()
-
file(GLOB test_scipts "${CMAKE_SOURCE_DIR}/t/t[0-9]*.sh")
+if(DEFINED ENV{GIT_TEST_OPTS})
+ set(GIT_TEST_OPTS "$ENV{GIT_TEST_OPTS}"
+ CACHE STRING "test options, see t/README")
+ message(STATUS "Using user-selected test options: ${GIT_TEST_OPTS}")
+elseif(WIN32)
+ set(GIT_TEST_OPTS "--no-bin-wrappers --no-chain-lint -vx"
+ CACHE STRING "test options, see t/README")
+ message(STATUS "Using Windowns-specific default test options: ${GIT_TEST_OPTS}")
+else()
+ set(GIT_TEST_OPTS ""
+ CACHE STRING "test options, see t/README")
+ message(STATUS "No custom test options selected, set e.g. GIT_TEST_OPTS=\"-vixd\"")
+endif()
+separate_arguments(GIT_TEST_OPTS)
+
#test
foreach(tsh ${test_scipts})
add_test(NAME ${tsh}
- COMMAND ${SH_EXE} ${tsh} --no-bin-wrappers --no-chain-lint -vx
+ COMMAND ${SH_EXE} ${tsh} ${GIT_TEST_OPTS}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/t)
+ set_property(TEST ${tsh} APPEND PROPERTY ENVIRONMENT
+ GIT_TEST_BUILD_DIR=${CMAKE_BINARY_DIR})
endforeach()
-# This test script takes an extremely long time and is known to time out even
-# on fast machines because it requires in excess of one hour to run
-set_tests_properties("${CMAKE_SOURCE_DIR}/t/t7112-reset-submodule.sh" PROPERTIES TIMEOUT 4000)
+if(WIN32)
+ # This test script takes an extremely long time and is known to time out even
+ # on fast machines because it requires in excess of one hour to run
+ set_tests_properties("${CMAKE_SOURCE_DIR}/t/t7112-reset-submodule.sh" PROPERTIES TIMEOUT 4000)
+endif()
endif()#BUILD_TESTING
diff --git a/contrib/coccinelle/.gitignore b/contrib/coccinelle/.gitignore
index d3f29646dc..1d45c0a40c 100644
--- a/contrib/coccinelle/.gitignore
+++ b/contrib/coccinelle/.gitignore
@@ -1 +1 @@
-*.patch*
+*.patch
diff --git a/contrib/coccinelle/README b/contrib/coccinelle/README
index f0e80bd7f0..d1daa1f626 100644
--- a/contrib/coccinelle/README
+++ b/contrib/coccinelle/README
@@ -41,3 +41,52 @@ There are two types of semantic patches:
This allows to expose plans of pending large scale refactorings without
impacting the bad pattern checks.
+
+Git-specific tips & things to know about how we run "spatch":
+
+ * The "make coccicheck" will piggy-back on
+ "COMPUTE_HEADER_DEPENDENCIES". If you've built a given object file
+ the "coccicheck" target will consider its depednency to decide if
+ it needs to re-run on the corresponding source file.
+
+ This means that a "make coccicheck" will re-compile object files
+ before running. This might be unexpected, but speeds up the run in
+ the common case, as e.g. a change to "column.h" won't require all
+ coccinelle rules to be re-run against "grep.c" (or another file
+ that happens not to use "column.h").
+
+ To disable this behavior use the "SPATCH_USE_O_DEPENDENCIES=NoThanks"
+ flag.
+
+ * To speed up our rules the "make coccicheck" target will by default
+ concatenate all of the *.cocci files here into an "ALL.cocci", and
+ apply it to each source file.
+
+ This makes the run faster, as we don't need to run each rule
+ against each source file. See the Makefile for further discussion,
+ this behavior can be disabled with "SPATCH_CONCAT_COCCI=".
+
+ But since they're concatenated any <id> in the <rulname> (e.g. "@
+ my_name", v.s. anonymous "@@") needs to be unique across all our
+ *.cocci files. You should only need to name rules if other rules
+ depend on them (currently only one rule is named).
+
+ * To speed up incremental runs even more use the "spatchcache" tool
+ in this directory as your "SPATCH". It aimns to be a "ccache" for
+ coccinelle, and piggy-backs on "COMPUTE_HEADER_DEPENDENCIES".
+
+ It caches in Redis by default, see it source for a how-to.
+
+ In one setup with a primed cache "make coccicheck" followed by a
+ "make clean && make" takes around 10s to run, but 2m30s with the
+ default of "SPATCH_CONCAT_COCCI=Y".
+
+ With "SPATCH_CONCAT_COCCI=" the total runtime is around ~6m, sped
+ up to ~1m with "spatchcache".
+
+ Most of the 10s (or ~1m) being spent on re-running "spatch" on
+ files we couldn't cache, as we didn't compile them (in contrib/*
+ and compat/* mostly).
+
+ The absolute times will differ for you, but the relative speedup
+ from caching should be on that order.
diff --git a/contrib/coccinelle/hashmap.cocci b/contrib/coccinelle/hashmap.cocci
index d69e120ccf..c5dbb4557b 100644
--- a/contrib/coccinelle/hashmap.cocci
+++ b/contrib/coccinelle/hashmap.cocci
@@ -1,4 +1,4 @@
-@ hashmap_entry_init_usage @
+@@
expression E;
struct hashmap_entry HME;
@@
diff --git a/contrib/coccinelle/preincr.cocci b/contrib/coccinelle/preincr.cocci
index 7fe1e8d2d9..ae42cb0730 100644
--- a/contrib/coccinelle/preincr.cocci
+++ b/contrib/coccinelle/preincr.cocci
@@ -1,4 +1,4 @@
-@ preincrement @
+@@
identifier i;
@@
- ++i > 1
diff --git a/contrib/coccinelle/spatchcache b/contrib/coccinelle/spatchcache
new file mode 100755
index 0000000000..29e9352d8a
--- /dev/null
+++ b/contrib/coccinelle/spatchcache
@@ -0,0 +1,304 @@
+#!/bin/sh
+#
+# spatchcache: a poor-man's "ccache"-alike for "spatch" in git.git
+#
+# This caching command relies on the peculiarities of the Makefile
+# driving "spatch" in git.git, in particular if we invoke:
+#
+# make
+# # See "spatchCache.cacheWhenStderr" for why "--very-quiet" is
+# # used
+# make coccicheck SPATCH_FLAGS=--very-quiet
+#
+# We can with COMPUTE_HEADER_DEPENDENCIES (auto-detected as true with
+# "gcc" and "clang") write e.g. a .depend/grep.o.d for grep.c, when we
+# compile grep.o.
+#
+# The .depend/grep.o.d will have the full header dependency tree of
+# grep.c, and we can thus cache the output of "spatch" by:
+#
+# 1. Hashing all of those files
+# 2. Hashing our source file, and the *.cocci rule we're
+# applying
+# 3. Running spatch, if suggests no changes (by far the common
+# case) we invoke "spatchCache.getCmd" and
+# "spatchCache.setCmd" with a hash SHA-256 to ask "does this
+# ID have no changes" or "say that ID had no changes>
+# 4. If no "spatchCache.{set,get}Cmd" is specified we'll use
+# "redis-cli" and maintain a SET called "spatch-cache". Set
+# appropriate redis memory policies to keep it from growing
+# out of control.
+#
+# This along with the general incremental "make" support for
+# "contrib/coccinelle" makes it viable to (re-)run coccicheck
+# e.g. when merging integration branches.
+#
+# Note that the "--very-quiet" flag is currently critical. The cache
+# will refuse to cache anything that has output on STDERR (which might
+# be errors from spatch), but see spatchCache.cacheWhenStderr below.
+#
+# The STDERR (and exit code) could in principle be cached (as with
+# ccache), but then the simple structure in the Redis cache would need
+# to change, so just supply "--very-quiet" for now.
+#
+# To use this, simply set SPATCH to
+# contrib/coccinelle/spatchcache. Then optionally set:
+#
+# [spatchCache]
+# # Optional: path to a custom spatch
+# spatch = ~/g/coccicheck/spatch.opt
+#
+# As well as this trace config (debug implies trace):
+#
+# cacheWhenStderr = true
+# trace = false
+# debug = false
+#
+# The ".depend/grep.o.d" can also be customized, as a string that will
+# be eval'd, it has access to a "$dirname" and "$basename":
+#
+# [spatchCache]
+# dependFormat = "$dirname/.depend/${basename%.c}.o.d"
+#
+# Setting "trace" to "true" allows for seeing when we have a cache HIT
+# or MISS. To debug whether the cache is working do that, and run e.g.:
+#
+# redis-cli FLUSHALL
+# <make && make coccicheck, as above>
+# grep -hore HIT -e MISS -e SET -e NOCACHE -e CANTCACHE .build/contrib/coccinelle | sort | uniq -c
+# 600 CANTCACHE
+# 7365 MISS
+# 7365 SET
+#
+# A subsequent "make cocciclean && make coccicheck" should then have
+# all "HIT"'s and "CANTCACHE"'s.
+#
+# The "spatchCache.cacheWhenStderr" option is critical when using
+# spatchCache.{trace,debug} to debug whether something is set in the
+# cache, as we'll write to the spatch logs in .build/* we'd otherwise
+# always emit a NOCACHE.
+#
+# Reading the config can make the command much slower, to work around
+# this the config can be set in the environment, with environment
+# variable name corresponding to the config key. "default" can be used
+# to use whatever's the script default, e.g. setting
+# spatchCache.cacheWhenStderr=true and deferring to the defaults for
+# the rest is:
+#
+# export GIT_CONTRIB_SPATCHCACHE_DEBUG=default
+# export GIT_CONTRIB_SPATCHCACHE_TRACE=default
+# export GIT_CONTRIB_SPATCHCACHE_CACHEWHENSTDERR=true
+# export GIT_CONTRIB_SPATCHCACHE_SPATCH=default
+# export GIT_CONTRIB_SPATCHCACHE_DEPENDFORMAT=default
+# export GIT_CONTRIB_SPATCHCACHE_SETCMD=default
+# export GIT_CONTRIB_SPATCHCACHE_GETCMD=default
+
+set -e
+
+env_or_config () {
+ env="$1"
+ shift
+ if test "$env" = "default"
+ then
+ # Avoid expensive "git config" invocation
+ return
+ elif test -n "$env"
+ then
+ echo "$env"
+ else
+ git config $@ || :
+ fi
+}
+
+## Our own configuration & options
+debug=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_DEBUG" --bool "spatchCache.debug")
+if test "$debug" != "true"
+then
+ debug=
+fi
+if test -n "$debug"
+then
+ set -x
+fi
+
+trace=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_TRACE" --bool "spatchCache.trace")
+if test "$trace" != "true"
+then
+ trace=
+fi
+if test -n "$debug"
+then
+ # debug implies trace
+ trace=true
+fi
+
+cacheWhenStderr=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_CACHEWHENSTDERR" --bool "spatchCache.cacheWhenStderr")
+if test "$cacheWhenStderr" != "true"
+then
+ cacheWhenStderr=
+fi
+
+trace_it () {
+ if test -z "$trace"
+ then
+ return
+ fi
+ echo "$@" >&2
+}
+
+spatch=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_SPATCH" --path "spatchCache.spatch")
+if test -n "$spatch"
+then
+ if test -n "$debug"
+ then
+ trace_it "custom spatchCache.spatch='$spatch'"
+ fi
+else
+ spatch=spatch
+fi
+
+dependFormat='$dirname/.depend/${basename%.c}.o.d'
+dependFormatCfg=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_DEPENDFORMAT" "spatchCache.dependFormat")
+if test -n "$dependFormatCfg"
+then
+ dependFormat="$dependFormatCfg"
+fi
+
+set=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_SETCMD" "spatchCache.setCmd")
+get=$(env_or_config "$GIT_CONTRIB_SPATCHCACHE_GETCMD" "spatchCache.getCmd")
+
+## Parse spatch()-like command-line for caching info
+arg_sp=
+arg_file=
+args="$@"
+spatch_opts() {
+ while test $# != 0
+ do
+ arg_file="$1"
+ case "$1" in
+ --sp-file)
+ arg_sp="$2"
+ ;;
+ esac
+ shift
+ done
+}
+spatch_opts "$@"
+if ! test -f "$arg_file"
+then
+ arg_file=
+fi
+
+hash_for_cache() {
+ # Parameters that should affect the cache
+ echo "args=$args"
+ echo "config spatchCache.spatch=$spatch"
+ echo "config spatchCache.debug=$debug"
+ echo "config spatchCache.trace=$trace"
+ echo "config spatchCache.cacheWhenStderr=$cacheWhenStderr"
+ echo
+
+ # Our target file and its dependencies
+ git hash-object "$1" "$2" $(grep -E -o '^[^:]+:$' "$3" | tr -d ':')
+}
+
+# Sanity checks
+if ! test -f "$arg_sp" && ! test -f "$arg_file"
+then
+ echo $0: no idea how to cache "$@" >&2
+ exit 128
+fi
+
+# Main logic
+dirname=$(dirname "$arg_file")
+basename=$(basename "$arg_file")
+eval "dep=$dependFormat"
+
+if ! test -f "$dep"
+then
+ trace_it "$0: CANTCACHE have no '$dep' for '$arg_file'!"
+ exec "$spatch" "$@"
+fi
+
+if test -n "$debug"
+then
+ trace_it "$0: The full cache input for '$arg_sp' '$arg_file' '$dep'"
+ hash_for_cache "$arg_sp" "$arg_file" "$dep" >&2
+fi
+sum=$(hash_for_cache "$arg_sp" "$arg_file" "$dep" | git hash-object --stdin)
+
+trace_it "$0: processing '$arg_file' with '$arg_sp' rule, and got hash '$sum' for it + '$dep'"
+
+getret=
+if test -z "$get"
+then
+ if test $(redis-cli SISMEMBER spatch-cache "$sum") = 1
+ then
+ getret=0
+ else
+ getret=1
+ fi
+else
+ $set "$sum"
+ getret=$?
+fi
+
+if test "$getret" = 0
+then
+ trace_it "$0: HIT for '$arg_file' with '$arg_sp'"
+ exit 0
+else
+ trace_it "$0: MISS: for '$arg_file' with '$arg_sp'"
+fi
+
+out="$(mktemp)"
+err="$(mktemp)"
+
+set +e
+"$spatch" "$@" >"$out" 2>>"$err"
+ret=$?
+cat "$out"
+cat "$err" >&2
+set -e
+
+nocache=
+if test $ret != 0
+then
+ nocache="exited non-zero: $ret"
+elif test -s "$out"
+then
+ nocache="had patch output"
+elif test -z "$cacheWhenStderr" && test -s "$err"
+then
+ nocache="had stderr (use --very-quiet or spatchCache.cacheWhenStderr=true?)"
+fi
+
+if test -n "$nocache"
+then
+ trace_it "$0: NOCACHE ($nocache): for '$arg_file' with '$arg_sp'"
+ exit "$ret"
+fi
+
+trace_it "$0: SET: for '$arg_file' with '$arg_sp'"
+
+setret=
+if test -z "$set"
+then
+ if test $(redis-cli SADD spatch-cache "$sum") = 1
+ then
+ setret=0
+ else
+ setret=1
+ fi
+else
+ "$set" "$sum"
+ setret=$?
+fi
+
+if test "$setret" != 0
+then
+ echo "FAILED to set '$sum' in cache!" >&2
+ exit 128
+fi
+
+exit "$ret"
diff --git a/contrib/coccinelle/strbuf.cocci b/contrib/coccinelle/strbuf.cocci
index 0970d98ad7..5f06105df6 100644
--- a/contrib/coccinelle/strbuf.cocci
+++ b/contrib/coccinelle/strbuf.cocci
@@ -1,4 +1,4 @@
-@ strbuf_addf_with_format_only @
+@@
expression E;
constant fmt !~ "%";
@@
diff --git a/contrib/coccinelle/swap.cocci b/contrib/coccinelle/swap.cocci
index a0934d1fda..522177afb6 100644
--- a/contrib/coccinelle/swap.cocci
+++ b/contrib/coccinelle/swap.cocci
@@ -1,4 +1,4 @@
-@ swap_with_declaration @
+@@
type T;
identifier tmp;
T a, b;
diff --git a/contrib/coccinelle/the_repository.pending.cocci b/contrib/coccinelle/the_repository.pending.cocci
index 072ea0d922..747d382ff5 100644
--- a/contrib/coccinelle/the_repository.pending.cocci
+++ b/contrib/coccinelle/the_repository.pending.cocci
@@ -20,7 +20,6 @@ expression E;
@@
expression E;
-expression F;
@@
- has_object_file_with_flags(
+ repo_has_object_file_with_flags(the_repository,
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index f6e2fbdcfa..930359bcef 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -58,6 +58,11 @@
#
# When set to "1" suggest all options, including options which are
# typically hidden (e.g. '--allow-empty' for 'git commit').
+#
+# GIT_COMPLETION_IGNORE_CASE
+#
+# When set to "1", suggest refs that match case insensitively (e.g.,
+# completing "FOO" on "git checkout f<TAB>").
case "$COMP_WORDBREAKS" in
*:*) : great ;;
@@ -644,8 +649,15 @@ __git_complete_index_file ()
__git_heads ()
{
local pfx="${1-}" cur_="${2-}" sfx="${3-}"
+ local ignore_case=""
+
+ if test "${GIT_COMPLETION_IGNORE_CASE-}" = "1"
+ then
+ ignore_case="--ignore-case"
+ fi
__git for-each-ref --format="${pfx//\%/%%}%(refname:strip=2)$sfx" \
+ $ignore_case \
"refs/heads/$cur_*" "refs/heads/$cur_*/**"
}
@@ -657,8 +669,15 @@ __git_heads ()
__git_remote_heads ()
{
local pfx="${1-}" cur_="${2-}" sfx="${3-}"
+ local ignore_case=""
+
+ if test "${GIT_COMPLETION_IGNORE_CASE-}" = "1"
+ then
+ ignore_case="--ignore-case"
+ fi
__git for-each-ref --format="${pfx//\%/%%}%(refname:strip=2)$sfx" \
+ $ignore_case \
"refs/remotes/$cur_*" "refs/remotes/$cur_*/**"
}
@@ -667,8 +686,15 @@ __git_remote_heads ()
__git_tags ()
{
local pfx="${1-}" cur_="${2-}" sfx="${3-}"
+ local ignore_case=""
+
+ if test "${GIT_COMPLETION_IGNORE_CASE-}" = "1"
+ then
+ ignore_case="--ignore-case"
+ fi
__git for-each-ref --format="${pfx//\%/%%}%(refname:strip=2)$sfx" \
+ $ignore_case \
"refs/tags/$cur_*" "refs/tags/$cur_*/**"
}
@@ -682,12 +708,19 @@ __git_dwim_remote_heads ()
{
local pfx="${1-}" cur_="${2-}" sfx="${3-}"
local fer_pfx="${pfx//\%/%%}" # "escape" for-each-ref format specifiers
+ local ignore_case=""
+
+ if test "${GIT_COMPLETION_IGNORE_CASE-}" = "1"
+ then
+ ignore_case="--ignore-case"
+ fi
# employ the heuristic used by git checkout and git switch
# Try to find a remote branch that cur_es the completion word
# but only output if the branch name is unique
__git for-each-ref --format="$fer_pfx%(refname:strip=3)$sfx" \
--sort="refname:strip=3" \
+ $ignore_case \
"refs/remotes/*/$cur_*" "refs/remotes/*/$cur_*/**" | \
uniq -u
}
@@ -712,7 +745,9 @@ __git_refs ()
local format refs
local pfx="${3-}" cur_="${4-$cur}" sfx="${5-}"
local match="${4-}"
+ local umatch="${4-}"
local fer_pfx="${pfx//\%/%%}" # "escape" for-each-ref format specifiers
+ local ignore_case=""
__git_find_repo_path
dir="$__git_repo_path"
@@ -735,12 +770,20 @@ __git_refs ()
fi
fi
+ if test "${GIT_COMPLETION_IGNORE_CASE-}" = "1"
+ then
+ ignore_case="--ignore-case"
+ # use tr instead of ${match,^^} to preserve bash 3.2 compatibility
+ umatch=$(echo "$match" | tr a-z A-Z 2> /dev/null || echo "$match")
+ fi
+
if [ "$list_refs_from" = path ]; then
if [[ "$cur_" == ^* ]]; then
pfx="$pfx^"
fer_pfx="$fer_pfx^"
cur_=${cur_#^}
match=${match#^}
+ umatch=${umatch#^}
fi
case "$cur_" in
refs|refs/*)
@@ -751,7 +794,7 @@ __git_refs ()
*)
for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD CHERRY_PICK_HEAD; do
case "$i" in
- $match*)
+ $match*|$umatch*)
if [ -e "$dir/$i" ]; then
echo "$pfx$i$sfx"
fi
@@ -765,6 +808,7 @@ __git_refs ()
;;
esac
__git_dir="$dir" __git for-each-ref --format="$fer_pfx%($format)$sfx" \
+ $ignore_case \
"${refs[@]}"
if [ -n "$track" ]; then
__git_dwim_remote_heads "$pfx" "$match" "$sfx"
@@ -784,15 +828,16 @@ __git_refs ()
*)
if [ "$list_refs_from" = remote ]; then
case "HEAD" in
- $match*) echo "${pfx}HEAD$sfx" ;;
+ $match*|$umatch*) echo "${pfx}HEAD$sfx" ;;
esac
__git for-each-ref --format="$fer_pfx%(refname:strip=3)$sfx" \
+ $ignore_case \
"refs/remotes/$remote/$match*" \
"refs/remotes/$remote/$match*/**"
else
local query_symref
case "HEAD" in
- $match*) query_symref="HEAD" ;;
+ $match*|$umatch*) query_symref="HEAD" ;;
esac
__git ls-remote "$remote" $query_symref \
"refs/tags/$match*" "refs/heads/$match*" \
diff --git a/csum-file.c b/csum-file.c
index 59ef3398ca..3243473c3d 100644
--- a/csum-file.c
+++ b/csum-file.c
@@ -45,7 +45,8 @@ void hashflush(struct hashfile *f)
unsigned offset = f->offset;
if (offset) {
- the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
+ if (!f->skip_hash)
+ the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
flush(f, f->buffer, offset);
f->offset = 0;
}
@@ -64,7 +65,12 @@ int finalize_hashfile(struct hashfile *f, unsigned char *result,
int fd;
hashflush(f);
- the_hash_algo->final_fn(f->buffer, &f->ctx);
+
+ if (f->skip_hash)
+ memset(f->buffer, 0, the_hash_algo->rawsz);
+ else
+ the_hash_algo->final_fn(f->buffer, &f->ctx);
+
if (result)
hashcpy(result, f->buffer);
if (flags & CSUM_HASH_IN_STREAM)
@@ -108,7 +114,8 @@ void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
* the hashfile's buffer. In this block,
* f->offset is necessarily zero.
*/
- the_hash_algo->update_fn(&f->ctx, buf, nr);
+ if (!f->skip_hash)
+ the_hash_algo->update_fn(&f->ctx, buf, nr);
flush(f, buf, nr);
} else {
/*
@@ -153,6 +160,7 @@ static struct hashfile *hashfd_internal(int fd, const char *name,
f->tp = tp;
f->name = name;
f->do_crc = 0;
+ f->skip_hash = 0;
the_hash_algo->init_fn(&f->ctx);
f->buffer_len = buffer_len;
diff --git a/csum-file.h b/csum-file.h
index 0d29f528fb..29468067f8 100644
--- a/csum-file.h
+++ b/csum-file.h
@@ -20,6 +20,13 @@ struct hashfile {
size_t buffer_len;
unsigned char *buffer;
unsigned char *check_buffer;
+
+ /**
+ * If set to 1, skip_hash indicates that we should
+ * not actually compute the hash for this hashfile and
+ * instead only use it as a buffered write.
+ */
+ unsigned int skip_hash;
};
/* Checkpoint */
diff --git a/delta-islands.c b/delta-islands.c
index 26f9e99e1a..90c0d6958f 100644
--- a/delta-islands.c
+++ b/delta-islands.c
@@ -26,8 +26,6 @@ static kh_oid_map_t *island_marks;
static unsigned island_counter;
static unsigned island_counter_core;
-static kh_str_t *remote_islands;
-
struct remote_island {
uint64_t hash;
struct oid_array oids;
@@ -312,29 +310,55 @@ void resolve_tree_islands(struct repository *r,
free(todo);
}
-static regex_t *island_regexes;
-static unsigned int island_regexes_alloc, island_regexes_nr;
+struct island_load_data {
+ kh_str_t *remote_islands;
+ regex_t *rx;
+ size_t nr;
+ size_t alloc;
+};
static const char *core_island_name;
-static int island_config_callback(const char *k, const char *v, void *cb UNUSED)
+static void free_config_regexes(struct island_load_data *ild)
{
+ for (size_t i = 0; i < ild->nr; i++)
+ regfree(&ild->rx[i]);
+ free(ild->rx);
+}
+
+static void free_remote_islands(kh_str_t *remote_islands)
+{
+ const char *island_name;
+ struct remote_island *rl;
+
+ kh_foreach(remote_islands, island_name, rl, {
+ free((void *)island_name);
+ oid_array_clear(&rl->oids);
+ free(rl);
+ });
+ kh_destroy_str(remote_islands);
+}
+
+static int island_config_callback(const char *k, const char *v, void *cb)
+{
+ struct island_load_data *ild = cb;
+
if (!strcmp(k, "pack.island")) {
struct strbuf re = STRBUF_INIT;
if (!v)
return config_error_nonbool(k);
- ALLOC_GROW(island_regexes, island_regexes_nr + 1, island_regexes_alloc);
+ ALLOC_GROW(ild->rx, ild->nr + 1, ild->alloc);
if (*v != '^')
strbuf_addch(&re, '^');
strbuf_addstr(&re, v);
- if (regcomp(&island_regexes[island_regexes_nr], re.buf, REG_EXTENDED))
+ if (regcomp(&ild->rx[ild->nr], re.buf, REG_EXTENDED))
die(_("failed to load island regex for '%s': %s"), k, re.buf);
strbuf_release(&re);
- island_regexes_nr++;
+ ild->nr++;
return 0;
}
@@ -344,7 +368,8 @@ static int island_config_callback(const char *k, const char *v, void *cb UNUSED)
return 0;
}
-static void add_ref_to_island(const char *island_name, const struct object_id *oid)
+static void add_ref_to_island(kh_str_t *remote_islands, const char *island_name,
+ const struct object_id *oid)
{
uint64_t sha_core;
struct remote_island *rl = NULL;
@@ -365,8 +390,10 @@ static void add_ref_to_island(const char *island_name, const struct object_id *o
}
static int find_island_for_ref(const char *refname, const struct object_id *oid,
- int flags UNUSED, void *data UNUSED)
+ int flags UNUSED, void *cb)
{
+ struct island_load_data *ild = cb;
+
/*
* We should advertise 'ARRAY_SIZE(matches) - 2' as the max,
* so we can diagnose below a config with more capture groups
@@ -377,8 +404,8 @@ static int find_island_for_ref(const char *refname, const struct object_id *oid,
struct strbuf island_name = STRBUF_INIT;
/* walk backwards to get last-one-wins ordering */
- for (i = island_regexes_nr - 1; i >= 0; i--) {
- if (!regexec(&island_regexes[i], refname,
+ for (i = ild->nr - 1; i >= 0; i--) {
+ if (!regexec(&ild->rx[i], refname,
ARRAY_SIZE(matches), matches, 0))
break;
}
@@ -403,12 +430,12 @@ static int find_island_for_ref(const char *refname, const struct object_id *oid,
strbuf_add(&island_name, refname + match->rm_so, match->rm_eo - match->rm_so);
}
- add_ref_to_island(island_name.buf, oid);
+ add_ref_to_island(ild->remote_islands, island_name.buf, oid);
strbuf_release(&island_name);
return 0;
}
-static struct remote_island *get_core_island(void)
+static struct remote_island *get_core_island(kh_str_t *remote_islands)
{
if (core_island_name) {
khiter_t pos = kh_get_str(remote_islands, core_island_name);
@@ -419,7 +446,7 @@ static struct remote_island *get_core_island(void)
return NULL;
}
-static void deduplicate_islands(struct repository *r)
+static void deduplicate_islands(kh_str_t *remote_islands, struct repository *r)
{
struct remote_island *island, *core = NULL, **list;
unsigned int island_count, dst, src, ref, i = 0;
@@ -445,7 +472,7 @@ static void deduplicate_islands(struct repository *r)
}
island_bitmap_size = (island_count / 32) + 1;
- core = get_core_island();
+ core = get_core_island(remote_islands);
for (i = 0; i < island_count; ++i) {
mark_remote_island_1(r, list[i], core && list[i]->hash == core->hash);
@@ -456,12 +483,16 @@ static void deduplicate_islands(struct repository *r)
void load_delta_islands(struct repository *r, int progress)
{
+ struct island_load_data ild = { 0 };
+
island_marks = kh_init_oid_map();
- remote_islands = kh_init_str();
- git_config(island_config_callback, NULL);
- for_each_ref(find_island_for_ref, NULL);
- deduplicate_islands(r);
+ git_config(island_config_callback, &ild);
+ ild.remote_islands = kh_init_str();
+ for_each_ref(find_island_for_ref, &ild);
+ free_config_regexes(&ild);
+ deduplicate_islands(ild.remote_islands, r);
+ free_remote_islands(ild.remote_islands);
if (progress)
fprintf(stderr, _("Marked %d islands, done.\n"), island_counter);
diff --git a/diff-lib.c b/diff-lib.c
index 2edea41a23..b48f155736 100644
--- a/diff-lib.c
+++ b/diff-lib.c
@@ -14,6 +14,7 @@
#include "dir.h"
#include "fsmonitor.h"
#include "commit-reach.h"
+#include "config.h"
/*
* diff-files
@@ -53,7 +54,7 @@ static int check_removed(const struct index_state *istate, const struct cache_en
* a directory --- the blob was removed!
*/
if (!S_ISGITLINK(ce->ce_mode) &&
- resolve_gitlink_ref(ce->name, "HEAD", &sub))
+ resolve_gitlink_ref(ce->name, "HEAD", &sub, NULL))
return 1;
}
return 0;
@@ -65,26 +66,46 @@ static int check_removed(const struct index_state *istate, const struct cache_en
* Return 1 when changes are detected, 0 otherwise. If the DIRTY_SUBMODULES
* option is set, the caller does not only want to know if a submodule is
* modified at all but wants to know all the conditions that are met (new
- * commits, untracked content and/or modified content).
+ * commits, untracked content and/or modified content). If
+ * defer_submodule_status bit is set, dirty_submodule will be left to the
+ * caller to set. defer_submodule_status can also be set to 0 in this
+ * function if there is no need to check if the submodule is modified.
*/
static int match_stat_with_submodule(struct diff_options *diffopt,
const struct cache_entry *ce,
struct stat *st, unsigned ce_option,
- unsigned *dirty_submodule)
+ unsigned *dirty_submodule, int *defer_submodule_status,
+ unsigned *ignore_untracked)
{
int changed = ie_match_stat(diffopt->repo->index, ce, st, ce_option);
- if (S_ISGITLINK(ce->ce_mode)) {
- struct diff_flags orig_flags = diffopt->flags;
- if (!diffopt->flags.override_submodule_config)
- set_diffopt_flags_from_submodule_config(diffopt, ce->name);
- if (diffopt->flags.ignore_submodules)
- changed = 0;
- else if (!diffopt->flags.ignore_dirty_submodules &&
- (!changed || diffopt->flags.dirty_submodules))
+ struct diff_flags orig_flags;
+ int defer = 0;
+
+ if (!S_ISGITLINK(ce->ce_mode))
+ goto ret;
+
+ orig_flags = diffopt->flags;
+ if (!diffopt->flags.override_submodule_config)
+ set_diffopt_flags_from_submodule_config(diffopt, ce->name);
+ if (diffopt->flags.ignore_submodules) {
+ changed = 0;
+ goto cleanup;
+ }
+ if (!diffopt->flags.ignore_dirty_submodules &&
+ (!changed || diffopt->flags.dirty_submodules)) {
+ if (defer_submodule_status && *defer_submodule_status) {
+ defer = 1;
+ *ignore_untracked = diffopt->flags.ignore_untracked_in_submodules;
+ } else {
*dirty_submodule = is_submodule_modified(ce->name,
- diffopt->flags.ignore_untracked_in_submodules);
- diffopt->flags = orig_flags;
+ diffopt->flags.ignore_untracked_in_submodules);
+ }
}
+cleanup:
+ diffopt->flags = orig_flags;
+ret:
+ if (defer_submodule_status)
+ *defer_submodule_status = defer;
return changed;
}
@@ -96,6 +117,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
? CE_MATCH_RACY_IS_DIRTY : 0);
uint64_t start = getnanotime();
struct index_state *istate = revs->diffopt.repo->index;
+ struct string_list submodules = STRING_LIST_INIT_NODUP;
diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/");
@@ -220,6 +242,8 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
newmode = ce->ce_mode;
} else {
struct stat st;
+ unsigned ignore_untracked = 0;
+ int defer_submodule_status = !!revs->repo;
changed = check_removed(istate, ce, &st);
if (changed) {
@@ -241,8 +265,25 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
}
changed = match_stat_with_submodule(&revs->diffopt, ce, &st,
- ce_option, &dirty_submodule);
+ ce_option, &dirty_submodule,
+ &defer_submodule_status,
+ &ignore_untracked);
newmode = ce_mode_from_stat(ce, st.st_mode);
+ if (defer_submodule_status) {
+ struct submodule_status_util tmp = {
+ .changed = changed,
+ .dirty_submodule = 0,
+ .ignore_untracked = ignore_untracked,
+ .newmode = newmode,
+ .ce = ce,
+ };
+ struct string_list_item *item;
+
+ item = string_list_append(&submodules, ce->name);
+ item->util = xmalloc(sizeof(tmp));
+ memcpy(item->util, &tmp, sizeof(tmp));
+ continue;
+ }
}
if (!changed && !dirty_submodule) {
@@ -261,6 +302,40 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
ce->name, 0, dirty_submodule);
}
+ if (submodules.nr > 0) {
+ int parallel_jobs;
+ if (git_config_get_int("submodule.diffjobs", &parallel_jobs))
+ parallel_jobs = 1;
+ else if (!parallel_jobs)
+ parallel_jobs = online_cpus();
+ else if (parallel_jobs < 0)
+ die(_("submodule.diffjobs cannot be negative"));
+
+ if (get_submodules_status(revs->repo, &submodules, parallel_jobs))
+ die(_("submodule status failed"));
+ for (size_t i = 0; i < submodules.nr; i++) {
+ struct submodule_status_util *util = submodules.items[i].util;
+ struct cache_entry *ce = util->ce;
+ unsigned int oldmode;
+ const struct object_id *old_oid, *new_oid;
+
+ if (!util->changed && !util->dirty_submodule) {
+ ce_mark_uptodate(ce);
+ mark_fsmonitor_valid(istate, ce);
+ if (!revs->diffopt.flags.find_copies_harder)
+ continue;
+ }
+ oldmode = ce->ce_mode;
+ old_oid = &ce->oid;
+ new_oid = util->changed ? null_oid() : &ce->oid;
+ diff_change(&revs->diffopt, oldmode, util->newmode,
+ old_oid, new_oid,
+ !is_null_oid(old_oid),
+ !is_null_oid(new_oid),
+ ce->name, 0, util->dirty_submodule);
+ }
+ }
+ string_list_clear(&submodules, 1);
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
trace_performance_since(start, "diff-files");
@@ -308,7 +383,7 @@ static int get_stat_data(const struct index_state *istate,
return -1;
}
changed = match_stat_with_submodule(diffopt, ce, &st,
- 0, dirty_submodule);
+ 0, dirty_submodule, NULL, NULL);
if (changed) {
mode = ce_mode_from_stat(ce, st.st_mode);
oid = null_oid();
diff --git a/dir.c b/dir.c
index d604d1bab9..703e9d72f2 100644
--- a/dir.c
+++ b/dir.c
@@ -3251,7 +3251,7 @@ static int remove_dir_recurse(struct strbuf *path, int flag, int *kept_up)
struct object_id submodule_head;
if ((flag & REMOVE_DIR_KEEP_NESTED_GIT) &&
- !resolve_gitlink_ref(path->buf, "HEAD", &submodule_head)) {
+ !resolve_gitlink_ref(path->buf, "HEAD", &submodule_head, NULL)) {
/* Do not descend and nuke a nested git work tree. */
if (kept_up)
*kept_up = 1;
@@ -3581,8 +3581,12 @@ static void free_untracked(struct untracked_cache_dir *ucd)
void free_untracked_cache(struct untracked_cache *uc)
{
- if (uc)
- free_untracked(uc->root);
+ if (!uc)
+ return;
+
+ free(uc->exclude_per_dir_to_free);
+ strbuf_release(&uc->ident);
+ free_untracked(uc->root);
free(uc);
}
@@ -3739,7 +3743,7 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
next + offset + hashsz);
uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
exclude_per_dir = (const char *)next + exclude_per_dir_offset;
- uc->exclude_per_dir = xstrdup(exclude_per_dir);
+ uc->exclude_per_dir = uc->exclude_per_dir_to_free = xstrdup(exclude_per_dir);
/* NUL after exclude_per_dir is covered by sizeof(*ouc) */
next += exclude_per_dir_offset + strlen(exclude_per_dir) + 1;
if (next >= end)
diff --git a/dir.h b/dir.h
index 674747d93a..8acfc04418 100644
--- a/dir.h
+++ b/dir.h
@@ -188,6 +188,7 @@ struct untracked_cache {
struct oid_stat ss_info_exclude;
struct oid_stat ss_excludes_file;
const char *exclude_per_dir;
+ char *exclude_per_dir_to_free;
struct strbuf ident;
/*
* dir_struct#flags must match dir_flags or the untracked
diff --git a/git-bisect.sh b/git-bisect.sh
deleted file mode 100755
index 405cf76f2a..0000000000
--- a/git-bisect.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/bin/sh
-
-USAGE='[help|start|bad|good|new|old|terms|skip|next|reset|visualize|view|replay|log|run]'
-LONG_USAGE='git bisect help
- print this long help message.
-git bisect start [--term-{new,bad}=<term> --term-{old,good}=<term>]
- [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<pathspec>...]
- reset bisect state and start bisection.
-git bisect (bad|new) [<rev>]
- mark <rev> a known-bad revision/
- a revision after change in a given property.
-git bisect (good|old) [<rev>...]
- mark <rev>... known-good revisions/
- revisions before change in a given property.
-git bisect terms [--term-good | --term-bad]
- show the terms used for old and new commits (default: bad, good)
-git bisect skip [(<rev>|<range>)...]
- mark <rev>... untestable revisions.
-git bisect next
- find next bisection to test and check it out.
-git bisect reset [<commit>]
- finish bisection search and go back to commit.
-git bisect (visualize|view)
- show bisect status in gitk.
-git bisect replay <logfile>
- replay bisection log.
-git bisect log
- show bisect log.
-git bisect run <cmd>...
- use <cmd>... to automatically bisect.
-
-Please use "git help bisect" to get the full man page.'
-
-OPTIONS_SPEC=
-. git-sh-setup
-
-TERM_BAD=bad
-TERM_GOOD=good
-
-get_terms () {
- if test -s "$GIT_DIR/BISECT_TERMS"
- then
- {
- read TERM_BAD
- read TERM_GOOD
- } <"$GIT_DIR/BISECT_TERMS"
- fi
-}
-
-case "$#" in
-0)
- usage ;;
-*)
- cmd="$1"
- get_terms
- shift
- case "$cmd" in
- help)
- git bisect -h ;;
- start)
- git bisect--helper --bisect-start "$@" ;;
- bad|good|new|old|"$TERM_BAD"|"$TERM_GOOD")
- git bisect--helper --bisect-state "$cmd" "$@" ;;
- skip)
- git bisect--helper --bisect-skip "$@" || exit;;
- next)
- # Not sure we want "next" at the UI level anymore.
- git bisect--helper --bisect-next "$@" || exit ;;
- visualize|view)
- git bisect--helper --bisect-visualize "$@" || exit;;
- reset)
- git bisect--helper --bisect-reset "$@" ;;
- replay)
- git bisect--helper --bisect-replay "$@" || exit;;
- log)
- git bisect--helper --bisect-log || exit ;;
- run)
- git bisect--helper --bisect-run "$@" || exit;;
- terms)
- git bisect--helper --bisect-terms "$@" || exit;;
- *)
- usage ;;
- esac
-esac
diff --git a/git-submodule.sh b/git-submodule.sh
index 5e5d21c010..9a50f2e912 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -343,7 +343,6 @@ cmd_update()
${recursive:+--recursive} \
${init:+--init} \
${nofetch:+--no-fetch} \
- ${wt_prefix:+--prefix "$wt_prefix"} \
${rebase:+--rebase} \
${merge:+--merge} \
${checkout:+--checkout} \
@@ -557,7 +556,7 @@ cmd_sync()
cmd_absorbgitdirs()
{
- git submodule--helper absorbgitdirs --prefix "$wt_prefix" "$@"
+ git ${wt_prefix:+-C "$wt_prefix"} submodule--helper absorbgitdirs "$@"
}
# This loop parses the command line arguments to find the
diff --git a/git.c b/git.c
index 6662548986..277a8cce84 100644
--- a/git.c
+++ b/git.c
@@ -492,7 +492,7 @@ static struct cmd_struct commands[] = {
{ "annotate", cmd_annotate, RUN_SETUP },
{ "apply", cmd_apply, RUN_SETUP_GENTLY },
{ "archive", cmd_archive, RUN_SETUP_GENTLY },
- { "bisect--helper", cmd_bisect__helper, RUN_SETUP },
+ { "bisect", cmd_bisect, RUN_SETUP },
{ "blame", cmd_blame, RUN_SETUP },
{ "branch", cmd_branch, RUN_SETUP | DELAY_PAGER_CONFIG },
{ "bugreport", cmd_bugreport, RUN_SETUP_GENTLY },
@@ -610,7 +610,7 @@ static struct cmd_struct commands[] = {
{ "stash", cmd_stash, RUN_SETUP | NEED_WORK_TREE },
{ "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
{ "stripspace", cmd_stripspace },
- { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX | NO_PARSEOPT },
+ { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX },
{ "switch", cmd_switch, RUN_SETUP | NEED_WORK_TREE },
{ "symbolic-ref", cmd_symbolic_ref, RUN_SETUP },
{ "tag", cmd_tag, RUN_SETUP | DELAY_PAGER_CONFIG },
diff --git a/http.c b/http.c
index 5d0502f51f..8a5ba3f477 100644
--- a/http.c
+++ b/http.c
@@ -560,13 +560,15 @@ static void set_curl_keepalive(CURL *c)
}
#endif
-static void redact_sensitive_header(struct strbuf *header)
+/* Return 1 if redactions have been made, 0 otherwise. */
+static int redact_sensitive_header(struct strbuf *header, size_t offset)
{
+ int ret = 0;
const char *sensitive_header;
if (trace_curl_redact &&
- (skip_iprefix(header->buf, "Authorization:", &sensitive_header) ||
- skip_iprefix(header->buf, "Proxy-Authorization:", &sensitive_header))) {
+ (skip_iprefix(header->buf + offset, "Authorization:", &sensitive_header) ||
+ skip_iprefix(header->buf + offset, "Proxy-Authorization:", &sensitive_header))) {
/* The first token is the type, which is OK to log */
while (isspace(*sensitive_header))
sensitive_header++;
@@ -575,8 +577,9 @@ static void redact_sensitive_header(struct strbuf *header)
/* Everything else is opaque and possibly sensitive */
strbuf_setlen(header, sensitive_header - header->buf);
strbuf_addstr(header, " <redacted>");
+ ret = 1;
} else if (trace_curl_redact &&
- skip_iprefix(header->buf, "Cookie:", &sensitive_header)) {
+ skip_iprefix(header->buf + offset, "Cookie:", &sensitive_header)) {
struct strbuf redacted_header = STRBUF_INIT;
const char *cookie;
@@ -612,6 +615,26 @@ static void redact_sensitive_header(struct strbuf *header)
strbuf_setlen(header, sensitive_header - header->buf);
strbuf_addbuf(header, &redacted_header);
+ ret = 1;
+ }
+ return ret;
+}
+
+/* Redact headers in info */
+static void redact_sensitive_info_header(struct strbuf *header)
+{
+ const char *sensitive_header;
+
+ /*
+ * curl's h2h3 prints headers in info, e.g.:
+ * h2h3 [<header-name>: <header-val>]
+ */
+ if (trace_curl_redact &&
+ skip_iprefix(header->buf, "h2h3 [", &sensitive_header)) {
+ if (redact_sensitive_header(header, sensitive_header - header->buf)) {
+ /* redaction ate our closing bracket */
+ strbuf_addch(header, ']');
+ }
}
}
@@ -629,7 +652,7 @@ static void curl_dump_header(const char *text, unsigned char *ptr, size_t size,
for (header = headers; *header; header++) {
if (hide_sensitive_header)
- redact_sensitive_header(*header);
+ redact_sensitive_header(*header, 0);
strbuf_insertstr((*header), 0, text);
strbuf_insertstr((*header), strlen(text), ": ");
strbuf_rtrim((*header));
@@ -668,6 +691,18 @@ static void curl_dump_data(const char *text, unsigned char *ptr, size_t size)
strbuf_release(&out);
}
+static void curl_dump_info(char *data, size_t size)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_add(&buf, data, size);
+
+ redact_sensitive_info_header(&buf);
+ trace_printf_key(&trace_curl, "== Info: %s", buf.buf);
+
+ strbuf_release(&buf);
+}
+
static int curl_trace(CURL *handle, curl_infotype type, char *data, size_t size, void *userp)
{
const char *text;
@@ -675,7 +710,7 @@ static int curl_trace(CURL *handle, curl_infotype type, char *data, size_t size,
switch (type) {
case CURLINFO_TEXT:
- trace_printf_key(&trace_curl, "== Info: %s", data);
+ curl_dump_info(data, size);
break;
case CURLINFO_HEADER_OUT:
text = "=> Send header";
diff --git a/list-objects-filter-options.c b/list-objects-filter-options.c
index 5339660238..ee01bcd2cc 100644
--- a/list-objects-filter-options.c
+++ b/list-objects-filter-options.c
@@ -290,10 +290,6 @@ int opt_parse_list_objects_filter(const struct option *opt,
const char *arg, int unset)
{
struct list_objects_filter_options *filter_options = opt->value;
- opt_lof_init init = (opt_lof_init)opt->defval;
-
- if (init)
- filter_options = init(opt->value);
if (unset || !arg)
list_objects_filter_set_no_filter(filter_options);
diff --git a/list-objects-filter-options.h b/list-objects-filter-options.h
index 7eeadab2dd..1fe393f447 100644
--- a/list-objects-filter-options.h
+++ b/list-objects-filter-options.h
@@ -111,27 +111,13 @@ void parse_list_objects_filter(
* The opt->value to opt_parse_list_objects_filter() is either a
* "struct list_objects_filter_option *" when using
* OPT_PARSE_LIST_OBJECTS_FILTER().
- *
- * Or, if using no "struct option" field is used by the callback,
- * except the "defval" which is expected to be an "opt_lof_init"
- * function, which is called with the "opt->value" and must return a
- * pointer to the ""struct list_objects_filter_option *" to be used.
- *
- * The OPT_PARSE_LIST_OBJECTS_FILTER_INIT() can be used e.g. the
- * "struct list_objects_filter_option" is embedded in a "struct
- * rev_info", which the "defval" could be tasked with lazily
- * initializing. See cmd_pack_objects() for an example.
*/
int opt_parse_list_objects_filter(const struct option *opt,
const char *arg, int unset);
-typedef struct list_objects_filter_options *(*opt_lof_init)(void *);
-#define OPT_PARSE_LIST_OBJECTS_FILTER_INIT(fo, init) \
- { OPTION_CALLBACK, 0, "filter", (fo), N_("args"), \
- N_("object filtering"), 0, opt_parse_list_objects_filter, \
- (intptr_t)(init) }
#define OPT_PARSE_LIST_OBJECTS_FILTER(fo) \
- OPT_PARSE_LIST_OBJECTS_FILTER_INIT((fo), NULL)
+ OPT_CALLBACK(0, "filter", (fo), N_("args"), \
+ N_("object filtering"), opt_parse_list_objects_filter)
/*
* Translates abbreviated numbers in the filter's filter_spec into their
diff --git a/ls-refs.c b/ls-refs.c
index fa0d01b47c..fb6769742c 100644
--- a/ls-refs.c
+++ b/ls-refs.c
@@ -6,6 +6,7 @@
#include "ls-refs.h"
#include "pkt-line.h"
#include "config.h"
+#include "string-list.h"
static int config_read;
static int advertise_unborn;
@@ -73,6 +74,7 @@ struct ls_refs_data {
unsigned symrefs;
struct strvec prefixes;
struct strbuf buf;
+ struct string_list hidden_refs;
unsigned unborn : 1;
};
@@ -84,7 +86,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
strbuf_reset(&data->buf);
- if (ref_is_hidden(refname_nons, refname))
+ if (ref_is_hidden(refname_nons, refname, &data->hidden_refs))
return 0;
if (!ref_match(&data->prefixes, refname_nons))
@@ -137,14 +139,15 @@ static void send_possibly_unborn_head(struct ls_refs_data *data)
}
static int ls_refs_config(const char *var, const char *value,
- void *data UNUSED)
+ void *cb_data)
{
+ struct ls_refs_data *data = cb_data;
/*
* We only serve fetches over v2 for now, so respect only "uploadpack"
* config. This may need to eventually be expanded to "receive", but we
* don't yet know how that information will be passed to ls-refs.
*/
- return parse_hide_refs_config(var, value, "uploadpack");
+ return parse_hide_refs_config(var, value, "uploadpack", &data->hidden_refs);
}
int ls_refs(struct repository *r, struct packet_reader *request)
@@ -154,9 +157,10 @@ int ls_refs(struct repository *r, struct packet_reader *request)
memset(&data, 0, sizeof(data));
strvec_init(&data.prefixes);
strbuf_init(&data.buf, 0);
+ string_list_init_dup(&data.hidden_refs);
ensure_config_read();
- git_config(ls_refs_config, NULL);
+ git_config(ls_refs_config, &data);
while (packet_reader_read(request) == PACKET_READ_NORMAL) {
const char *arg = request->line;
@@ -195,6 +199,7 @@ int ls_refs(struct repository *r, struct packet_reader *request)
packet_fflush(stdout);
strvec_clear(&data.prefixes);
strbuf_release(&data.buf);
+ string_list_clear(&data.hidden_refs, 0);
return 0;
}
diff --git a/midx.c b/midx.c
index 7cfad04a24..03d947a5d3 100644
--- a/midx.c
+++ b/midx.c
@@ -1510,7 +1510,7 @@ static int write_midx_internal(const char *object_dir,
}
write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
- write_chunkfile(cf, &ctx);
+ write_chunkfile(cf, 0, &ctx);
finalize_hashfile(f, midx_hash, FSYNC_COMPONENT_PACK_METADATA,
CSUM_FSYNC | CSUM_HASH_IN_STREAM);
diff --git a/object-file.c b/object-file.c
index 957790098f..c118e62a55 100644
--- a/object-file.c
+++ b/object-file.c
@@ -2527,7 +2527,7 @@ int index_path(struct index_state *istate, struct object_id *oid,
strbuf_release(&sb);
break;
case S_IFDIR:
- return resolve_gitlink_ref(path, "HEAD", oid);
+ return resolve_gitlink_ref(path, "HEAD", oid, NULL);
default:
return error(_("%s: unsupported file type"), path);
}
diff --git a/object.c b/object.c
index 8a74eb85e9..fad1a5af4a 100644
--- a/object.c
+++ b/object.c
@@ -286,9 +286,8 @@ struct object *parse_object_with_flags(struct repository *r,
return &commit->object;
}
- if ((obj && obj->type == OBJ_BLOB && repo_has_object_file(r, oid)) ||
- (!obj && repo_has_object_file(r, oid) &&
- oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
+ if ((!obj || (obj && obj->type == OBJ_BLOB)) &&
+ oid_object_info(r, oid, NULL) == OBJ_BLOB) {
if (!skip_hash && stream_object_signature(r, repl) < 0) {
error(_("hash mismatch %s"), oid_to_hex(oid));
return NULL;
diff --git a/pack-bitmap.c b/pack-bitmap.c
index 440407f1be..aaa2d9a104 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -354,8 +354,8 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
if (bitmap_git->pack || bitmap_git->midx) {
struct strbuf buf = STRBUF_INIT;
get_midx_filename(&buf, midx->object_dir);
- /* ignore extra bitmap file; we can only handle one */
- warning(_("ignoring extra bitmap file: '%s'"), buf.buf);
+ trace2_data_string("bitmap", the_repository,
+ "ignoring extra midx bitmap file", buf.buf);
close(fd);
strbuf_release(&buf);
return -1;
@@ -411,9 +411,6 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
struct stat st;
char *bitmap_name;
- if (open_pack_index(packfile))
- return -1;
-
bitmap_name = pack_bitmap_filename(packfile);
fd = git_open(bitmap_name);
@@ -432,8 +429,8 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
}
if (bitmap_git->pack || bitmap_git->midx) {
- /* ignore extra bitmap file; we can only handle one */
- warning(_("ignoring extra bitmap file: '%s'"), packfile->pack_name);
+ trace2_data_string("bitmap", the_repository,
+ "ignoring extra bitmap file", packfile->pack_name);
close(fd);
return -1;
}
@@ -458,6 +455,8 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
return -1;
}
+ trace2_data_string("bitmap", the_repository, "opened bitmap file",
+ packfile->pack_name);
return 0;
}
diff --git a/read-cache.c b/read-cache.c
index 3202402927..57293825a8 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -285,7 +285,7 @@ static int ce_compare_gitlink(const struct cache_entry *ce)
*
* If so, we consider it always to match.
*/
- if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0)
+ if (resolve_gitlink_ref(ce->name, "HEAD", &oid, NULL) < 0)
return 0;
return !oideq(&oid, &ce->oid);
}
@@ -781,7 +781,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
namelen = strlen(path);
if (S_ISDIR(st_mode)) {
- if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ if (resolve_gitlink_ref(path, "HEAD", &oid, NULL) < 0)
return error(_("'%s' does not have a commit checked out"), path);
while (namelen && path[namelen-1] == '/')
namelen--;
@@ -1817,6 +1817,8 @@ static int verify_hdr(const struct cache_header *hdr, unsigned long size)
git_hash_ctx c;
unsigned char hash[GIT_MAX_RAWSZ];
int hdr_version;
+ int all_zeroes = 1;
+ unsigned char *start, *end;
if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
return error(_("bad signature 0x%08x"), hdr->hdr_signature);
@@ -1827,10 +1829,23 @@ static int verify_hdr(const struct cache_header *hdr, unsigned long size)
if (!verify_index_checksum)
return 0;
+ end = (unsigned char *)hdr + size;
+ start = end - the_hash_algo->rawsz;
+ while (start < end) {
+ if (*start != 0) {
+ all_zeroes = 0;
+ break;
+ }
+ start++;
+ }
+
+ if (all_zeroes)
+ return 0;
+
the_hash_algo->init_fn(&c);
the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
the_hash_algo->final_fn(hash, &c);
- if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
+ if (!hasheq(hash, end - the_hash_algo->rawsz))
return error(_("bad index file sha1 signature"));
return 0;
}
@@ -2558,6 +2573,11 @@ int discard_index(struct index_state *istate)
free_untracked_cache(istate->untracked);
istate->untracked = NULL;
+ if (istate->sparse_checkout_patterns) {
+ clear_pattern_list(istate->sparse_checkout_patterns);
+ FREE_AND_NULL(istate->sparse_checkout_patterns);
+ }
+
if (istate->ce_mem_pool) {
mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
FREE_AND_NULL(istate->ce_mem_pool);
@@ -2917,9 +2937,14 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
int ieot_entries = 1;
struct index_entry_offset_table *ieot = NULL;
int nr, nr_threads;
+ int compute_hash;
f = hashfd(tempfile->fd, tempfile->filename.buf);
+ if (!git_config_get_maybe_bool("index.computehash", &compute_hash) &&
+ !compute_hash)
+ f->skip_hash = 1;
+
for (i = removed = extended = 0; i < entries; i++) {
if (cache[i]->ce_flags & CE_REMOVE)
removed++;
diff --git a/ref-filter.c b/ref-filter.c
index 9dc2cd1451..caf10ab23e 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -1358,6 +1358,7 @@ static void find_subpos(const char *buf,
/* parse signature first; we might not even have a subject line */
parse_signature(buf, end - buf, &payload, &signature);
+ strbuf_release(&payload);
/* skip past header until we hit empty line */
while (*buf && *buf != '\n') {
diff --git a/refs.c b/refs.c
index 1491ae937e..23ddc95909 100644
--- a/refs.c
+++ b/refs.c
@@ -1414,9 +1414,8 @@ char *shorten_unambiguous_ref(const char *refname, int strict)
refname, strict);
}
-static struct string_list *hide_refs;
-
-int parse_hide_refs_config(const char *var, const char *value, const char *section)
+int parse_hide_refs_config(const char *var, const char *value, const char *section,
+ struct string_list *hide_refs)
{
const char *key;
if (!strcmp("transfer.hiderefs", var) ||
@@ -1431,21 +1430,16 @@ int parse_hide_refs_config(const char *var, const char *value, const char *secti
len = strlen(ref);
while (len && ref[len - 1] == '/')
ref[--len] = '\0';
- if (!hide_refs) {
- CALLOC_ARRAY(hide_refs, 1);
- hide_refs->strdup_strings = 1;
- }
- string_list_append(hide_refs, ref);
+ string_list_append_nodup(hide_refs, ref);
}
return 0;
}
-int ref_is_hidden(const char *refname, const char *refname_full)
+int ref_is_hidden(const char *refname, const char *refname_full,
+ const struct string_list *hide_refs)
{
int i;
- if (!hide_refs)
- return 0;
for (i = hide_refs->nr - 1; i >= 0; i--) {
const char *match = hide_refs->items[i].string;
const char *subject;
@@ -1904,19 +1898,21 @@ const char *resolve_ref_unsafe(const char *refname, int resolve_flags,
}
int resolve_gitlink_ref(const char *submodule, const char *refname,
- struct object_id *oid)
+ struct object_id *oid, const char **target_out)
{
struct ref_store *refs;
int flags;
+ const char *target;
refs = get_submodule_ref_store(submodule);
if (!refs)
return -1;
-
- if (!refs_resolve_ref_unsafe(refs, refname, 0, oid, &flags) ||
- is_null_oid(oid))
+ target = refs_resolve_ref_unsafe(refs, refname, 0, oid, &flags);
+ if (!target || is_null_oid(oid))
return -1;
+ if (target_out)
+ *target_out = target;
return 0;
}
@@ -1982,6 +1978,17 @@ static struct ref_store *lookup_ref_store_map(struct hashmap *map,
return entry ? entry->refs : NULL;
}
+static int add_ref_format_flags(enum ref_format_flags flags, int caps) {
+ if (flags & REF_FORMAT_FILES)
+ caps |= REF_STORE_FORMAT_FILES;
+ if (flags & REF_FORMAT_PACKED)
+ caps |= REF_STORE_FORMAT_PACKED;
+ if (flags & REF_FORMAT_PACKED_V2)
+ caps |= REF_STORE_FORMAT_PACKED_V2;
+
+ return caps;
+}
+
/*
* Create, record, and return a ref_store instance for the specified
* gitdir.
@@ -1991,9 +1998,17 @@ static struct ref_store *ref_store_init(struct repository *repo,
unsigned int flags)
{
const char *be_name = "files";
- struct ref_storage_be *be = find_ref_storage_backend(be_name);
+ struct ref_storage_be *be;
struct ref_store *refs;
+ flags = add_ref_format_flags(repo->ref_format, flags);
+
+ if (!(flags & REF_STORE_FORMAT_FILES) &&
+ packed_refs_enabled(flags))
+ be_name = "packed";
+
+ be = find_ref_storage_backend(be_name);
+
if (!be)
BUG("reference backend %s is unknown", be_name);
@@ -2009,7 +2024,8 @@ struct ref_store *get_main_ref_store(struct repository *r)
if (!r->gitdir)
BUG("attempting to get main_ref_store outside of repository");
- r->refs_private = ref_store_init(r, r->gitdir, REF_STORE_ALL_CAPS);
+ r->refs_private = ref_store_init(r, r->gitdir,
+ REF_STORE_ALL_CAPS);
r->refs_private = maybe_debug_wrap_ref_store(r->gitdir, r->refs_private);
return r->refs_private;
}
diff --git a/refs.h b/refs.h
index 8958717a17..ee8675d4aa 100644
--- a/refs.h
+++ b/refs.h
@@ -137,9 +137,12 @@ int peel_iterated_oid(const struct object_id *base, struct object_id *peeled);
* submodule (which must be non-NULL). If the resolution is
* successful, return 0 and set oid to the name of the object;
* otherwise, return a non-zero value.
+ *
+ * FIXME: Return "target" just like refs_resolve_ref_unsafe(). This will be
+ * safe to do once we merge resolve_gitlink_ref() into master.
*/
int resolve_gitlink_ref(const char *submodule, const char *refname,
- struct object_id *oid);
+ struct object_id *oid, const char **target);
/*
* Return true iff abbrev_name is a possible abbreviation for
@@ -808,7 +811,8 @@ int update_ref(const char *msg, const char *refname,
const struct object_id *new_oid, const struct object_id *old_oid,
unsigned int flags, enum action_on_err onerr);
-int parse_hide_refs_config(const char *var, const char *value, const char *);
+int parse_hide_refs_config(const char *var, const char *value, const char *,
+ struct string_list *);
/*
* Check whether a ref is hidden. If no namespace is set, both the first and
@@ -818,7 +822,7 @@ int parse_hide_refs_config(const char *var, const char *value, const char *);
* the ref is outside that namespace, the first parameter is NULL. The second
* parameter always points to the full ref name.
*/
-int ref_is_hidden(const char *, const char *);
+int ref_is_hidden(const char *, const char *, const struct string_list *);
/* Is this a per-worktree ref living in the refs/ namespace? */
int is_per_worktree_ref(const char *refname);
diff --git a/refs/files-backend.c b/refs/files-backend.c
index b89954355d..4a18aed620 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -1198,6 +1198,12 @@ static int files_pack_refs(struct ref_store *ref_store, unsigned int flags)
struct strbuf err = STRBUF_INIT;
struct ref_transaction *transaction;
+ if (!packed_refs_enabled(refs->store_flags)) {
+ warning(_("refusing to create '%s' file because '%s' is not set"),
+ "packed-refs", "extensions.refFormat=packed");
+ return -1;
+ }
+
transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
if (!transaction)
return -1;
@@ -3274,7 +3280,7 @@ static int files_init_db(struct ref_store *ref_store, struct strbuf *err UNUSED)
}
struct ref_storage_be refs_be_files = {
- .next = NULL,
+ .next = &refs_be_packed,
.name = "files",
.init = files_ref_store_create,
.init_db = files_init_db,
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index c1c71d183e..e84f669c42 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -36,121 +36,6 @@ static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
static enum mmap_strategy mmap_strategy = MMAP_OK;
#endif
-struct packed_ref_store;
-
-/*
- * A `snapshot` represents one snapshot of a `packed-refs` file.
- *
- * Normally, this will be a mmapped view of the contents of the
- * `packed-refs` file at the time the snapshot was created. However,
- * if the `packed-refs` file was not sorted, this might point at heap
- * memory holding the contents of the `packed-refs` file with its
- * records sorted by refname.
- *
- * `snapshot` instances are reference counted (via
- * `acquire_snapshot()` and `release_snapshot()`). This is to prevent
- * an instance from disappearing while an iterator is still iterating
- * over it. Instances are garbage collected when their `referrers`
- * count goes to zero.
- *
- * The most recent `snapshot`, if available, is referenced by the
- * `packed_ref_store`. Its freshness is checked whenever
- * `get_snapshot()` is called; if the existing snapshot is obsolete, a
- * new snapshot is taken.
- */
-struct snapshot {
- /*
- * A back-pointer to the packed_ref_store with which this
- * snapshot is associated:
- */
- struct packed_ref_store *refs;
-
- /* Is the `packed-refs` file currently mmapped? */
- int mmapped;
-
- /*
- * The contents of the `packed-refs` file:
- *
- * - buf -- a pointer to the start of the memory
- * - start -- a pointer to the first byte of actual references
- * (i.e., after the header line, if one is present)
- * - eof -- a pointer just past the end of the reference
- * contents
- *
- * If the `packed-refs` file was already sorted, `buf` points
- * at the mmapped contents of the file. If not, it points at
- * heap-allocated memory containing the contents, sorted. If
- * there were no contents (e.g., because the file didn't
- * exist), `buf`, `start`, and `eof` are all NULL.
- */
- char *buf, *start, *eof;
-
- /*
- * What is the peeled state of the `packed-refs` file that
- * this snapshot represents? (This is usually determined from
- * the file's header.)
- */
- enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
-
- /*
- * Count of references to this instance, including the pointer
- * from `packed_ref_store::snapshot`, if any. The instance
- * will not be freed as long as the reference count is
- * nonzero.
- */
- unsigned int referrers;
-
- /*
- * The metadata of the `packed-refs` file from which this
- * snapshot was created, used to tell if the file has been
- * replaced since we read it.
- */
- struct stat_validity validity;
-};
-
-/*
- * A `ref_store` representing references stored in a `packed-refs`
- * file. It implements the `ref_store` interface, though it has some
- * limitations:
- *
- * - It cannot store symbolic references.
- *
- * - It cannot store reflogs.
- *
- * - It does not support reference renaming (though it could).
- *
- * On the other hand, it can be locked outside of a reference
- * transaction. In that case, it remains locked even after the
- * transaction is done and the new `packed-refs` file is activated.
- */
-struct packed_ref_store {
- struct ref_store base;
-
- unsigned int store_flags;
-
- /* The path of the "packed-refs" file: */
- char *path;
-
- /*
- * A snapshot of the values read from the `packed-refs` file,
- * if it might still be current; otherwise, NULL.
- */
- struct snapshot *snapshot;
-
- /*
- * Lock used for the "packed-refs" file. Note that this (and
- * thus the enclosing `packed_ref_store`) must not be freed.
- */
- struct lock_file lock;
-
- /*
- * Temporary file used when rewriting new contents to the
- * "packed-refs" file. Note that this (and thus the enclosing
- * `packed_ref_store`) must not be freed.
- */
- struct tempfile *tempfile;
-};
-
/*
* Increment the reference count of `*snapshot`.
*/
@@ -164,7 +49,7 @@ static void acquire_snapshot(struct snapshot *snapshot)
* memory and close the file, or free the memory. Then set the buffer
* pointers to NULL.
*/
-static void clear_snapshot_buffer(struct snapshot *snapshot)
+void clear_snapshot_buffer(struct snapshot *snapshot)
{
if (snapshot->mmapped) {
if (munmap(snapshot->buf, snapshot->eof - snapshot->buf))
@@ -181,7 +66,7 @@ static void clear_snapshot_buffer(struct snapshot *snapshot)
* Decrease the reference count of `*snapshot`. If it goes to zero,
* free `*snapshot` and return true; otherwise return false.
*/
-static int release_snapshot(struct snapshot *snapshot)
+int release_snapshot(struct snapshot *snapshot)
{
if (!--snapshot->referrers) {
stat_validity_clear(&snapshot->validity);
@@ -245,224 +130,6 @@ static void clear_snapshot(struct packed_ref_store *refs)
}
}
-static NORETURN void die_unterminated_line(const char *path,
- const char *p, size_t len)
-{
- if (len < 80)
- die("unterminated line in %s: %.*s", path, (int)len, p);
- else
- die("unterminated line in %s: %.75s...", path, p);
-}
-
-static NORETURN void die_invalid_line(const char *path,
- const char *p, size_t len)
-{
- const char *eol = memchr(p, '\n', len);
-
- if (!eol)
- die_unterminated_line(path, p, len);
- else if (eol - p < 80)
- die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
- else
- die("unexpected line in %s: %.75s...", path, p);
-
-}
-
-struct snapshot_record {
- const char *start;
- size_t len;
-};
-
-static int cmp_packed_ref_records(const void *v1, const void *v2)
-{
- const struct snapshot_record *e1 = v1, *e2 = v2;
- const char *r1 = e1->start + the_hash_algo->hexsz + 1;
- const char *r2 = e2->start + the_hash_algo->hexsz + 1;
-
- while (1) {
- if (*r1 == '\n')
- return *r2 == '\n' ? 0 : -1;
- if (*r1 != *r2) {
- if (*r2 == '\n')
- return 1;
- else
- return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
- }
- r1++;
- r2++;
- }
-}
-
-/*
- * Compare a snapshot record at `rec` to the specified NUL-terminated
- * refname.
- */
-static int cmp_record_to_refname(const char *rec, const char *refname)
-{
- const char *r1 = rec + the_hash_algo->hexsz + 1;
- const char *r2 = refname;
-
- while (1) {
- if (*r1 == '\n')
- return *r2 ? -1 : 0;
- if (!*r2)
- return 1;
- if (*r1 != *r2)
- return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
- r1++;
- r2++;
- }
-}
-
-/*
- * `snapshot->buf` is not known to be sorted. Check whether it is, and
- * if not, sort it into new memory and munmap/free the old storage.
- */
-static void sort_snapshot(struct snapshot *snapshot)
-{
- struct snapshot_record *records = NULL;
- size_t alloc = 0, nr = 0;
- int sorted = 1;
- const char *pos, *eof, *eol;
- size_t len, i;
- char *new_buffer, *dst;
-
- pos = snapshot->start;
- eof = snapshot->eof;
-
- if (pos == eof)
- return;
-
- len = eof - pos;
-
- /*
- * Initialize records based on a crude estimate of the number
- * of references in the file (we'll grow it below if needed):
- */
- ALLOC_GROW(records, len / 80 + 20, alloc);
-
- while (pos < eof) {
- eol = memchr(pos, '\n', eof - pos);
- if (!eol)
- /* The safety check should prevent this. */
- BUG("unterminated line found in packed-refs");
- if (eol - pos < the_hash_algo->hexsz + 2)
- die_invalid_line(snapshot->refs->path,
- pos, eof - pos);
- eol++;
- if (eol < eof && *eol == '^') {
- /*
- * Keep any peeled line together with its
- * reference:
- */
- const char *peeled_start = eol;
-
- eol = memchr(peeled_start, '\n', eof - peeled_start);
- if (!eol)
- /* The safety check should prevent this. */
- BUG("unterminated peeled line found in packed-refs");
- eol++;
- }
-
- ALLOC_GROW(records, nr + 1, alloc);
- records[nr].start = pos;
- records[nr].len = eol - pos;
- nr++;
-
- if (sorted &&
- nr > 1 &&
- cmp_packed_ref_records(&records[nr - 2],
- &records[nr - 1]) >= 0)
- sorted = 0;
-
- pos = eol;
- }
-
- if (sorted)
- goto cleanup;
-
- /* We need to sort the memory. First we sort the records array: */
- QSORT(records, nr, cmp_packed_ref_records);
-
- /*
- * Allocate a new chunk of memory, and copy the old memory to
- * the new in the order indicated by `records` (not bothering
- * with the header line):
- */
- new_buffer = xmalloc(len);
- for (dst = new_buffer, i = 0; i < nr; i++) {
- memcpy(dst, records[i].start, records[i].len);
- dst += records[i].len;
- }
-
- /*
- * Now munmap the old buffer and use the sorted buffer in its
- * place:
- */
- clear_snapshot_buffer(snapshot);
- snapshot->buf = snapshot->start = new_buffer;
- snapshot->eof = new_buffer + len;
-
-cleanup:
- free(records);
-}
-
-/*
- * Return a pointer to the start of the record that contains the
- * character `*p` (which must be within the buffer). If no other
- * record start is found, return `buf`.
- */
-static const char *find_start_of_record(const char *buf, const char *p)
-{
- while (p > buf && (p[-1] != '\n' || p[0] == '^'))
- p--;
- return p;
-}
-
-/*
- * Return a pointer to the start of the record following the record
- * that contains `*p`. If none is found before `end`, return `end`.
- */
-static const char *find_end_of_record(const char *p, const char *end)
-{
- while (++p < end && (p[-1] != '\n' || p[0] == '^'))
- ;
- return p;
-}
-
-/*
- * We want to be able to compare mmapped reference records quickly,
- * without totally parsing them. We can do so because the records are
- * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
- * + 1) bytes past the beginning of the record.
- *
- * But what if the `packed-refs` file contains garbage? We're willing
- * to tolerate not detecting the problem, as long as we don't produce
- * totally garbled output (we can't afford to check the integrity of
- * the whole file during every Git invocation). But we do want to be
- * sure that we never read past the end of the buffer in memory and
- * perform an illegal memory access.
- *
- * Guarantee that minimum level of safety by verifying that the last
- * record in the file is LF-terminated, and that it has at least
- * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
- * these checks fails.
- */
-static void verify_buffer_safe(struct snapshot *snapshot)
-{
- const char *start = snapshot->start;
- const char *eof = snapshot->eof;
- const char *last_line;
-
- if (start == eof)
- return;
-
- last_line = find_start_of_record(start, eof - 1);
- if (*(eof - 1) != '\n' || eof - last_line < the_hash_algo->hexsz + 2)
- die_invalid_line(snapshot->refs->path,
- last_line, eof - last_line);
-}
-
#define SMALL_FILE_SIZE (32*1024)
/*
@@ -475,9 +142,11 @@ static int load_contents(struct snapshot *snapshot)
{
int fd;
struct stat st;
- size_t size;
ssize_t bytes_read;
+ if (!packed_refs_enabled(snapshot->refs->store_flags))
+ return 0;
+
fd = open(snapshot->refs->path, O_RDONLY);
if (fd < 0) {
if (errno == ENOENT) {
@@ -498,91 +167,30 @@ static int load_contents(struct snapshot *snapshot)
if (fstat(fd, &st) < 0)
die_errno("couldn't stat %s", snapshot->refs->path);
- size = xsize_t(st.st_size);
+ snapshot->buflen = xsize_t(st.st_size);
- if (!size) {
+ if (!snapshot->buflen) {
close(fd);
return 0;
- } else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) {
- snapshot->buf = xmalloc(size);
- bytes_read = read_in_full(fd, snapshot->buf, size);
- if (bytes_read < 0 || bytes_read != size)
+ } else if (mmap_strategy == MMAP_NONE || snapshot->buflen <= SMALL_FILE_SIZE) {
+ snapshot->buf = xmalloc(snapshot->buflen);
+ bytes_read = read_in_full(fd, snapshot->buf, snapshot->buflen);
+ if (bytes_read < 0 || bytes_read != snapshot->buflen)
die_errno("couldn't read %s", snapshot->refs->path);
snapshot->mmapped = 0;
} else {
- snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+ snapshot->buf = xmmap(NULL, snapshot->buflen, PROT_READ, MAP_PRIVATE, fd, 0);
snapshot->mmapped = 1;
}
close(fd);
snapshot->start = snapshot->buf;
- snapshot->eof = snapshot->buf + size;
+ snapshot->eof = snapshot->buf + snapshot->buflen;
return 1;
}
/*
- * Find the place in `snapshot->buf` where the start of the record for
- * `refname` starts. If `mustexist` is true and the reference doesn't
- * exist, then return NULL. If `mustexist` is false and the reference
- * doesn't exist, then return the point where that reference would be
- * inserted, or `snapshot->eof` (which might be NULL) if it would be
- * inserted at the end of the file. In the latter mode, `refname`
- * doesn't have to be a proper reference name; for example, one could
- * search for "refs/replace/" to find the start of any replace
- * references.
- *
- * The record is sought using a binary search, so `snapshot->buf` must
- * be sorted.
- */
-static const char *find_reference_location(struct snapshot *snapshot,
- const char *refname, int mustexist)
-{
- /*
- * This is not *quite* a garden-variety binary search, because
- * the data we're searching is made up of records, and we
- * always need to find the beginning of a record to do a
- * comparison. A "record" here is one line for the reference
- * itself and zero or one peel lines that start with '^'. Our
- * loop invariant is described in the next two comments.
- */
-
- /*
- * A pointer to the character at the start of a record whose
- * preceding records all have reference names that come
- * *before* `refname`.
- */
- const char *lo = snapshot->start;
-
- /*
- * A pointer to a the first character of a record whose
- * reference name comes *after* `refname`.
- */
- const char *hi = snapshot->eof;
-
- while (lo != hi) {
- const char *mid, *rec;
- int cmp;
-
- mid = lo + (hi - lo) / 2;
- rec = find_start_of_record(lo, mid);
- cmp = cmp_record_to_refname(rec, refname);
- if (cmp < 0) {
- lo = find_end_of_record(mid, hi);
- } else if (cmp > 0) {
- hi = rec;
- } else {
- return rec;
- }
- }
-
- if (mustexist)
- return NULL;
- else
- return lo;
-}
-
-/*
* Create a newly-allocated `snapshot` of the `packed-refs` file in
* its current state and return it. The return value will already have
* its reference count incremented.
@@ -623,72 +231,52 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
snapshot->refs = refs;
acquire_snapshot(snapshot);
snapshot->peeled = PEELED_NONE;
+ snapshot->version = 1;
if (!load_contents(snapshot))
return snapshot;
- /* If the file has a header line, process it: */
- if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
- char *tmp, *p, *eol;
- struct string_list traits = STRING_LIST_INIT_NODUP;
-
- eol = memchr(snapshot->buf, '\n',
- snapshot->eof - snapshot->buf);
- if (!eol)
- die_unterminated_line(refs->path,
- snapshot->buf,
- snapshot->eof - snapshot->buf);
-
- tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
-
- if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
- die_invalid_line(refs->path,
- snapshot->buf,
- snapshot->eof - snapshot->buf);
-
- string_list_split_in_place(&traits, p, ' ', -1);
-
- if (unsorted_string_list_has_string(&traits, "fully-peeled"))
- snapshot->peeled = PEELED_FULLY;
- else if (unsorted_string_list_has_string(&traits, "peeled"))
- snapshot->peeled = PEELED_TAGS;
+ if ((refs->store_flags & REF_STORE_FORMAT_PACKED) &&
+ !detect_packed_format_v2_header(refs, snapshot)) {
+ parse_packed_format_v1_header(refs, snapshot, &sorted);
+ snapshot->version = 1;
+ verify_buffer_safe_v1(snapshot);
- sorted = unsorted_string_list_has_string(&traits, "sorted");
+ if (!sorted) {
+ sort_snapshot_v1(snapshot);
- /* perhaps other traits later as well */
-
- /* The "+ 1" is for the LF character. */
- snapshot->start = eol + 1;
-
- string_list_clear(&traits, 0);
- free(tmp);
- }
-
- verify_buffer_safe(snapshot);
+ /*
+ * Reordering the records might have moved a short one
+ * to the end of the buffer, so verify the buffer's
+ * safety again:
+ */
+ verify_buffer_safe_v1(snapshot);
+ }
- if (!sorted) {
- sort_snapshot(snapshot);
+ if (mmap_strategy != MMAP_OK && snapshot->mmapped) {
+ /*
+ * We don't want to leave the file mmapped, so we are
+ * forced to make a copy now:
+ */
+ char *buf_copy = xmalloc(snapshot->buflen);
+
+ memcpy(buf_copy, snapshot->start, snapshot->buflen);
+ clear_snapshot_buffer(snapshot);
+ snapshot->buf = snapshot->start = buf_copy;
+ snapshot->eof = buf_copy + snapshot->buflen;
+ }
- /*
- * Reordering the records might have moved a short one
- * to the end of the buffer, so verify the buffer's
- * safety again:
- */
- verify_buffer_safe(snapshot);
+ return snapshot;
}
- if (mmap_strategy != MMAP_OK && snapshot->mmapped) {
+ if (refs->store_flags & REF_STORE_FORMAT_PACKED_V2) {
/*
- * We don't want to leave the file mmapped, so we are
- * forced to make a copy now:
+ * Assume we are in v2 format mode, now.
+ *
+ * fill_snapshot_v2() will die() if parsing fails.
*/
- size_t size = snapshot->eof - snapshot->start;
- char *buf_copy = xmalloc(size);
-
- memcpy(buf_copy, snapshot->start, size);
- clear_snapshot_buffer(snapshot);
- snapshot->buf = snapshot->start = buf_copy;
- snapshot->eof = buf_copy + size;
+ fill_snapshot_v2(snapshot);
+ snapshot->version = 2;
}
return snapshot;
@@ -732,54 +320,26 @@ static int packed_read_raw_ref(struct ref_store *ref_store, const char *refname,
struct packed_ref_store *refs =
packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
struct snapshot *snapshot = get_snapshot(refs);
- const char *rec;
-
- *type = 0;
- rec = find_reference_location(snapshot, refname, 1);
-
- if (!rec) {
+ if (!snapshot) {
/* refname is not a packed reference. */
*failure_errno = ENOENT;
return -1;
}
- if (get_oid_hex(rec, oid))
- die_invalid_line(refs->path, rec, snapshot->eof - rec);
-
- *type = REF_ISPACKED;
- return 0;
-}
-
-/*
- * This value is set in `base.flags` if the peeled value of the
- * current reference is known. In that case, `peeled` contains the
- * correct peeled value for the reference, which might be `null_oid`
- * if the reference is not a tag or if it is broken.
- */
-#define REF_KNOWS_PEELED 0x40
-
-/*
- * An iterator over a snapshot of a `packed-refs` file.
- */
-struct packed_ref_iterator {
- struct ref_iterator base;
-
- struct snapshot *snapshot;
-
- /* The current position in the snapshot's buffer: */
- const char *pos;
+ switch (snapshot->version) {
+ case 1:
+ return packed_read_raw_ref_v1(refs, snapshot, refname,
+ oid, type, failure_errno);
- /* The end of the part of the buffer that will be iterated over: */
- const char *eof;
+ case 2:
+ return packed_read_raw_ref_v2(refs, snapshot, refname,
+ oid, type, failure_errno);
- /* Scratch space for current values: */
- struct object_id oid, peeled;
- struct strbuf refname_buf;
-
- struct repository *repo;
- unsigned int flags;
-};
+ default:
+ return -1;
+ }
+}
/*
* Move the iterator to the next record in the snapshot, without
@@ -790,68 +350,16 @@ struct packed_ref_iterator {
*/
static int next_record(struct packed_ref_iterator *iter)
{
- const char *p = iter->pos, *eol;
-
- strbuf_reset(&iter->refname_buf);
+ switch (iter->version) {
+ case 1:
+ return next_record_v1(iter);
- if (iter->pos == iter->eof)
- return ITER_DONE;
-
- iter->base.flags = REF_ISPACKED;
-
- if (iter->eof - p < the_hash_algo->hexsz + 2 ||
- parse_oid_hex(p, &iter->oid, &p) ||
- !isspace(*p++))
- die_invalid_line(iter->snapshot->refs->path,
- iter->pos, iter->eof - iter->pos);
-
- eol = memchr(p, '\n', iter->eof - p);
- if (!eol)
- die_unterminated_line(iter->snapshot->refs->path,
- iter->pos, iter->eof - iter->pos);
-
- strbuf_add(&iter->refname_buf, p, eol - p);
- iter->base.refname = iter->refname_buf.buf;
-
- if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
- if (!refname_is_safe(iter->base.refname))
- die("packed refname is dangerous: %s",
- iter->base.refname);
- oidclr(&iter->oid);
- iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
- }
- if (iter->snapshot->peeled == PEELED_FULLY ||
- (iter->snapshot->peeled == PEELED_TAGS &&
- starts_with(iter->base.refname, "refs/tags/")))
- iter->base.flags |= REF_KNOWS_PEELED;
-
- iter->pos = eol + 1;
-
- if (iter->pos < iter->eof && *iter->pos == '^') {
- p = iter->pos + 1;
- if (iter->eof - p < the_hash_algo->hexsz + 1 ||
- parse_oid_hex(p, &iter->peeled, &p) ||
- *p++ != '\n')
- die_invalid_line(iter->snapshot->refs->path,
- iter->pos, iter->eof - iter->pos);
- iter->pos = p;
+ case 2:
+ return next_record_v2(iter);
- /*
- * Regardless of what the file header said, we
- * definitely know the value of *this* reference. But
- * we suppress it if the reference is broken:
- */
- if ((iter->base.flags & REF_ISBROKEN)) {
- oidclr(&iter->peeled);
- iter->base.flags &= ~REF_KNOWS_PEELED;
- } else {
- iter->base.flags |= REF_KNOWS_PEELED;
- }
- } else {
- oidclr(&iter->peeled);
+ default:
+ return -1;
}
-
- return ITER_OK;
}
static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
@@ -926,6 +434,7 @@ static struct ref_iterator *packed_ref_iterator_begin(
struct packed_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
+ size_t v2_row = 0;
if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
required_flags |= REF_STORE_ODB;
@@ -938,10 +447,21 @@ static struct ref_iterator *packed_ref_iterator_begin(
*/
snapshot = get_snapshot(refs);
- if (prefix && *prefix)
- start = find_reference_location(snapshot, prefix, 0);
- else
- start = snapshot->start;
+ if (!snapshot || snapshot->version < 0 || snapshot->version > 2)
+ return empty_ref_iterator_begin();
+
+ if (prefix && *prefix) {
+ if (snapshot->version == 1)
+ start = find_reference_location_v1(snapshot, prefix, 0);
+ else
+ start = find_reference_location_v2(snapshot, prefix, 0,
+ &v2_row);
+ } else {
+ if (snapshot->version == 1)
+ start = snapshot->start;
+ else
+ start = snapshot->refs_chunk;
+ }
if (start == snapshot->eof)
return empty_ref_iterator_begin();
@@ -952,6 +472,10 @@ static struct ref_iterator *packed_ref_iterator_begin(
iter->snapshot = snapshot;
acquire_snapshot(snapshot);
+ iter->version = snapshot->version;
+ iter->row = v2_row;
+
+ init_iterator_prefix_info(prefix, iter);
iter->pos = start;
iter->eof = snapshot->eof;
@@ -969,23 +493,6 @@ static struct ref_iterator *packed_ref_iterator_begin(
return ref_iterator;
}
-/*
- * Write an entry to the packed-refs file for the specified refname.
- * If peeled is non-NULL, write it as the entry's peeled value. On
- * error, return a nonzero value and leave errno set at the value left
- * by the failing call to `fprintf()`.
- */
-static int write_packed_entry(FILE *fh, const char *refname,
- const struct object_id *oid,
- const struct object_id *peeled)
-{
- if (fprintf(fh, "%s %s\n", oid_to_hex(oid), refname) < 0 ||
- (peeled && fprintf(fh, "^%s\n", oid_to_hex(peeled)) < 0))
- return -1;
-
- return 0;
-}
-
int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
{
struct packed_ref_store *refs =
@@ -1067,17 +574,6 @@ int packed_refs_is_locked(struct ref_store *ref_store)
return is_lock_file_locked(&refs->lock);
}
-/*
- * The packed-refs header line that we write out. Perhaps other traits
- * will be added later.
- *
- * Note that earlier versions of Git used to parse these traits by
- * looking for " trait " in the line. For this reason, the space after
- * the colon and the trailing space are required.
- */
-static const char PACKED_REFS_HEADER[] =
- "# pack-refs with: peeled fully-peeled sorted \n";
-
static int packed_init_db(struct ref_store *ref_store UNUSED,
struct strbuf *err UNUSED)
{
@@ -1085,56 +581,20 @@ static int packed_init_db(struct ref_store *ref_store UNUSED,
return 0;
}
-/*
- * Write the packed refs from the current snapshot to the packed-refs
- * tempfile, incorporating any changes from `updates`. `updates` must
- * be a sorted string list whose keys are the refnames and whose util
- * values are `struct ref_update *`. On error, rollback the tempfile,
- * write an error message to `err`, and return a nonzero value.
- *
- * The packfile must be locked before calling this function and will
- * remain locked when it is done.
- */
-static int write_with_updates(struct packed_ref_store *refs,
- struct string_list *updates,
- struct strbuf *err)
+static void add_write_error(struct packed_ref_store *refs, struct strbuf *err)
{
- struct ref_iterator *iter = NULL;
- size_t i;
- int ok;
- FILE *out;
- struct strbuf sb = STRBUF_INIT;
- char *packed_refs_path;
-
- if (!is_lock_file_locked(&refs->lock))
- BUG("write_with_updates() called while unlocked");
-
- /*
- * If packed-refs is a symlink, we want to overwrite the
- * symlinked-to file, not the symlink itself. Also, put the
- * staging file next to it:
- */
- packed_refs_path = get_locked_file_path(&refs->lock);
- strbuf_addf(&sb, "%s.new", packed_refs_path);
- free(packed_refs_path);
- refs->tempfile = create_tempfile(sb.buf);
- if (!refs->tempfile) {
- strbuf_addf(err, "unable to create file %s: %s",
- sb.buf, strerror(errno));
- strbuf_release(&sb);
- return -1;
- }
- strbuf_release(&sb);
-
- out = fdopen_tempfile(refs->tempfile, "w");
- if (!out) {
- strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
- strerror(errno));
- goto error;
- }
+ strbuf_addf(err, "error writing to %s: %s",
+ get_tempfile_path(refs->tempfile), strerror(errno));
+}
- if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
- goto write_error;
+int merge_iterator_and_updates(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err,
+ write_ref_fn write_fn,
+ void *write_data)
+{
+ struct ref_iterator *iter = NULL;
+ int ok, i;
/*
* We iterate in parallel through the current list of refs and
@@ -1227,10 +687,13 @@ static int write_with_updates(struct packed_ref_store *refs,
struct object_id peeled;
int peel_error = ref_iterator_peel(iter, &peeled);
- if (write_packed_entry(out, iter->refname,
- iter->oid,
- peel_error ? NULL : &peeled))
- goto write_error;
+ if (write_fn(iter->refname,
+ iter->oid,
+ peel_error ? NULL : &peeled,
+ write_data)) {
+ add_write_error(refs, err);
+ goto error;
+ }
if ((ok = ref_iterator_advance(iter)) != ITER_OK)
iter = NULL;
@@ -1248,15 +711,133 @@ static int write_with_updates(struct packed_ref_store *refs,
int peel_error = peel_object(&update->new_oid,
&peeled);
- if (write_packed_entry(out, update->refname,
- &update->new_oid,
- peel_error ? NULL : &peeled))
- goto write_error;
+ if (write_fn(update->refname,
+ &update->new_oid,
+ peel_error ? NULL : &peeled,
+ write_data)) {
+ add_write_error(refs, err);
+ goto error;
+ }
i++;
}
}
+error:
+ if (iter)
+ ref_iterator_abort(iter);
+ return ok;
+}
+
+static int write_with_updates_v1(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err)
+{
+ FILE *out;
+
+ out = fdopen_tempfile(refs->tempfile, "w");
+ if (!out) {
+ strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
+ strerror(errno));
+ goto error;
+ }
+
+ if (write_packed_file_header_v1(out) < 0) {
+ add_write_error(refs, err);
+ goto error;
+ }
+
+ return merge_iterator_and_updates(refs, updates, err,
+ write_packed_entry_v1, out);
+
+error:
+ return -1;
+}
+
+static int write_with_updates_v2(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err)
+{
+ struct write_packed_refs_v2_context *ctx = create_v2_context(refs, updates, err);
+ int ok = -1;
+
+ if ((ok = write_packed_refs_v2(ctx)) < 0)
+ add_write_error(refs, err);
+
+ free_v2_context(ctx);
+ return ok;
+}
+
+/*
+ * Write the packed refs from the current snapshot to the packed-refs
+ * tempfile, incorporating any changes from `updates`. `updates` must
+ * be a sorted string list whose keys are the refnames and whose util
+ * values are `struct ref_update *`. On error, rollback the tempfile,
+ * write an error message to `err`, and return a nonzero value.
+ *
+ * The packfile must be locked before calling this function and will
+ * remain locked when it is done.
+ */
+static int write_with_updates(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err)
+{
+ int ok;
+ struct strbuf sb = STRBUF_INIT;
+ char *packed_refs_path;
+ int version;
+
+ if (!is_lock_file_locked(&refs->lock))
+ BUG("write_with_updates() called while unlocked");
+
+ /*
+ * If packed-refs is a symlink, we want to overwrite the
+ * symlinked-to file, not the symlink itself. Also, put the
+ * staging file next to it:
+ */
+ packed_refs_path = get_locked_file_path(&refs->lock);
+ strbuf_addf(&sb, "%s.new", packed_refs_path);
+ free(packed_refs_path);
+ refs->tempfile = create_tempfile(sb.buf);
+ if (!refs->tempfile) {
+ strbuf_addf(err, "unable to create file %s: %s",
+ sb.buf, strerror(errno));
+ strbuf_release(&sb);
+ return -1;
+ }
+ strbuf_release(&sb);
+
+ if (!(version = git_env_ulong("GIT_TEST_PACKED_REFS_VERSION", 0)) &&
+ git_config_get_int("refs.packedrefsversion", &version)) {
+ /*
+ * Set the default depending on the current extension
+ * list. Default to version 1 if available, but allow a
+ * default of 2 if only "packed-v2" exists.
+ */
+ if (refs->store_flags & REF_STORE_FORMAT_PACKED)
+ version = 1;
+ else if (refs->store_flags & REF_STORE_FORMAT_PACKED_V2)
+ version = 2;
+ else
+ BUG("writing a packed-refs file without an extension");
+ }
+
+ switch (version) {
+ case 1:
+ ok = write_with_updates_v1(refs, updates, err);
+ break;
+
+ case 2:
+ /* Convert the normal error codes to ITER_DONE. */
+ ok = write_with_updates_v2(refs, updates, err) ? -2 : ITER_DONE;
+ break;
+
+ default:
+ strbuf_addf(err, "unknown packed-refs version: %d",
+ version);
+ goto error;
+ }
+
if (ok != ITER_DONE) {
strbuf_addstr(err, "unable to write packed-refs file: "
"error iterating over old contents");
@@ -1275,14 +856,7 @@ static int write_with_updates(struct packed_ref_store *refs,
return 0;
-write_error:
- strbuf_addf(err, "error writing to %s: %s",
- get_tempfile_path(refs->tempfile), strerror(errno));
-
error:
- if (iter)
- ref_iterator_abort(iter);
-
delete_tempfile(&refs->tempfile);
return -1;
}
diff --git a/refs/packed-backend.h b/refs/packed-backend.h
index 9dd8a344c3..1936bb5c76 100644
--- a/refs/packed-backend.h
+++ b/refs/packed-backend.h
@@ -1,6 +1,10 @@
#ifndef REFS_PACKED_BACKEND_H
#define REFS_PACKED_BACKEND_H
+#include "../cache.h"
+#include "refs-internal.h"
+#include "../lockfile.h"
+
struct repository;
struct ref_transaction;
@@ -36,4 +40,281 @@ int packed_refs_is_locked(struct ref_store *ref_store);
int is_packed_transaction_needed(struct ref_store *ref_store,
struct ref_transaction *transaction);
+struct packed_ref_store;
+
+/*
+ * A `snapshot` represents one snapshot of a `packed-refs` file.
+ *
+ * Normally, this will be a mmapped view of the contents of the
+ * `packed-refs` file at the time the snapshot was created. However,
+ * if the `packed-refs` file was not sorted, this might point at heap
+ * memory holding the contents of the `packed-refs` file with its
+ * records sorted by refname.
+ *
+ * `snapshot` instances are reference counted (via
+ * `acquire_snapshot()` and `release_snapshot()`). This is to prevent
+ * an instance from disappearing while an iterator is still iterating
+ * over it. Instances are garbage collected when their `referrers`
+ * count goes to zero.
+ *
+ * The most recent `snapshot`, if available, is referenced by the
+ * `packed_ref_store`. Its freshness is checked whenever
+ * `get_snapshot()` is called; if the existing snapshot is obsolete, a
+ * new snapshot is taken.
+ */
+struct snapshot {
+ /*
+ * A back-pointer to the packed_ref_store with which this
+ * snapshot is associated:
+ */
+ struct packed_ref_store *refs;
+
+ /* Is the `packed-refs` file currently mmapped? */
+ int mmapped;
+
+ /* which file format version is this file? */
+ int version;
+
+ /*
+ * The contents of the `packed-refs` file:
+ *
+ * - buf -- a pointer to the start of the memory
+ * - start -- a pointer to the first byte of actual references
+ * (i.e., after the header line, if one is present)
+ * - eof -- a pointer just past the end of the reference
+ * contents
+ *
+ * If the `packed-refs` file was already sorted, `buf` points
+ * at the mmapped contents of the file. If not, it points at
+ * heap-allocated memory containing the contents, sorted. If
+ * there were no contents (e.g., because the file didn't
+ * exist), `buf`, `start`, and `eof` are all NULL.
+ */
+ char *buf, *start, *eof;
+
+ /*
+ * What is the peeled state of the `packed-refs` file that
+ * this snapshot represents? (This is usually determined from
+ * the file's header.)
+ */
+ enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
+
+ /*************************
+ * packed-refs v2 values *
+ *************************/
+ size_t nr;
+ size_t prefixes_nr;
+ size_t buflen;
+ const unsigned char *offset_chunk;
+ const char *refs_chunk;
+ const unsigned char *prefix_offsets_chunk;
+ const char *prefix_chunk;
+
+ /*
+ * Count of references to this instance, including the pointer
+ * from `packed_ref_store::snapshot`, if any. The instance
+ * will not be freed as long as the reference count is
+ * nonzero.
+ */
+ unsigned int referrers;
+
+ /*
+ * The metadata of the `packed-refs` file from which this
+ * snapshot was created, used to tell if the file has been
+ * replaced since we read it.
+ */
+ struct stat_validity validity;
+};
+
+int release_snapshot(struct snapshot *snapshot);
+
+/*
+ * If the buffer in `snapshot` is active, then either munmap the
+ * memory and close the file, or free the memory. Then set the buffer
+ * pointers to NULL.
+ */
+void clear_snapshot_buffer(struct snapshot *snapshot);
+
+/*
+ * A `ref_store` representing references stored in a `packed-refs`
+ * file. It implements the `ref_store` interface, though it has some
+ * limitations:
+ *
+ * - It cannot store symbolic references.
+ *
+ * - It cannot store reflogs.
+ *
+ * - It does not support reference renaming (though it could).
+ *
+ * On the other hand, it can be locked outside of a reference
+ * transaction. In that case, it remains locked even after the
+ * transaction is done and the new `packed-refs` file is activated.
+ */
+struct packed_ref_store {
+ struct ref_store base;
+
+ unsigned int store_flags;
+
+ /* The path of the "packed-refs" file: */
+ char *path;
+
+ /*
+ * A snapshot of the values read from the `packed-refs` file,
+ * if it might still be current; otherwise, NULL.
+ */
+ struct snapshot *snapshot;
+
+ /*
+ * Lock used for the "packed-refs" file. Note that this (and
+ * thus the enclosing `packed_ref_store`) must not be freed.
+ */
+ struct lock_file lock;
+
+ /*
+ * Temporary file used when rewriting new contents to the
+ * "packed-refs" file. Note that this (and thus the enclosing
+ * `packed_ref_store`) must not be freed.
+ */
+ struct tempfile *tempfile;
+};
+
+/*
+ * This value is set in `base.flags` if the peeled value of the
+ * current reference is known. In that case, `peeled` contains the
+ * correct peeled value for the reference, which might be `null_oid`
+ * if the reference is not a tag or if it is broken.
+ */
+#define REF_KNOWS_PEELED 0x40
+
+/*
+ * An iterator over a snapshot of a `packed-refs` file.
+ */
+struct packed_ref_iterator {
+ struct ref_iterator base;
+ struct snapshot *snapshot;
+ struct repository *repo;
+ unsigned int flags;
+ int version;
+
+ /* Scratch space for current values: */
+ struct object_id oid, peeled;
+ struct strbuf refname_buf;
+
+ /* The current position in the snapshot's buffer: */
+ const char *pos;
+
+ /***********************************
+ * packed-refs v1 iterator values. *
+ ***********************************/
+
+ /* The end of the part of the buffer that will be iterated over: */
+ const char *eof;
+
+ /***********************************
+ * packed-refs v2 iterator values. *
+ ***********************************/
+ size_t nr;
+ size_t row;
+ size_t prefix_row_end;
+ size_t prefix_i;
+ const char *cur_prefix;
+};
+
+typedef int (*write_ref_fn)(const char *refname,
+ const struct object_id *oid,
+ const struct object_id *peeled,
+ void *write_data);
+
+int merge_iterator_and_updates(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err,
+ write_ref_fn write_fn,
+ void *write_data);
+
+/**
+ * Parse the buffer at the given snapshot to verify that it is a
+ * packed-refs file in version 1 format. Update the snapshot->peeled
+ * value according to the header information. Update the given
+ * 'sorted' value with whether or not the packed-refs file is sorted.
+ */
+int parse_packed_format_v1_header(struct packed_ref_store *refs,
+ struct snapshot *snapshot,
+ int *sorted);
+
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+const char *find_reference_location_v1(struct snapshot *snapshot,
+ const char *refname, int mustexist);
+
+int packed_read_raw_ref_v1(struct packed_ref_store *refs, struct snapshot *snapshot,
+ const char *refname, struct object_id *oid,
+ unsigned int *type, int *failure_errno);
+
+void verify_buffer_safe_v1(struct snapshot *snapshot);
+void sort_snapshot_v1(struct snapshot *snapshot);
+int write_packed_file_header_v1(FILE *out);
+int next_record_v1(struct packed_ref_iterator *iter);
+int write_packed_entry_v1(const char *refname,
+ const struct object_id *oid,
+ const struct object_id *peeled,
+ void *write_data);
+
+/**
+ * Parse the buffer at the given snapshot to verify that it is a
+ * packed-refs file in version 1 format. Update the snapshot->peeled
+ * value according to the header information. Update the given
+ * 'sorted' value with whether or not the packed-refs file is sorted.
+ */
+int parse_packed_format_v1_header(struct packed_ref_store *refs,
+ struct snapshot *snapshot,
+ int *sorted);
+
+int detect_packed_format_v2_header(struct packed_ref_store *refs,
+ struct snapshot *snapshot);
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+const char *find_reference_location_v2(struct snapshot *snapshot,
+ const char *refname, int mustexist,
+ size_t *pos);
+
+int packed_read_raw_ref_v2(struct packed_ref_store *refs, struct snapshot *snapshot,
+ const char *refname, struct object_id *oid,
+ unsigned int *type, int *failure_errno);
+int next_record_v2(struct packed_ref_iterator *iter);
+void fill_snapshot_v2(struct snapshot *snapshot);
+
+struct write_packed_refs_v2_context;
+struct write_packed_refs_v2_context *create_v2_context(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err);
+int write_packed_refs_v2(struct write_packed_refs_v2_context *ctx);
+void free_v2_context(struct write_packed_refs_v2_context *ctx);
+
+void init_iterator_prefix_info(const char *prefix,
+ struct packed_ref_iterator *iter);
+
#endif /* REFS_PACKED_BACKEND_H */
diff --git a/refs/packed-format-v1.c b/refs/packed-format-v1.c
new file mode 100644
index 0000000000..2d071567c0
--- /dev/null
+++ b/refs/packed-format-v1.c
@@ -0,0 +1,456 @@
+#include "../cache.h"
+#include "../config.h"
+#include "../refs.h"
+#include "refs-internal.h"
+#include "packed-backend.h"
+#include "../iterator.h"
+#include "../lockfile.h"
+#include "../chdir-notify.h"
+
+static NORETURN void die_unterminated_line(const char *path,
+ const char *p, size_t len)
+{
+ if (len < 80)
+ die("unterminated line in %s: %.*s", path, (int)len, p);
+ else
+ die("unterminated line in %s: %.75s...", path, p);
+}
+
+static NORETURN void die_invalid_line(const char *path,
+ const char *p, size_t len)
+{
+ const char *eol = memchr(p, '\n', len);
+
+ if (!eol)
+ die_unterminated_line(path, p, len);
+ else if (eol - p < 80)
+ die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
+ else
+ die("unexpected line in %s: %.75s...", path, p);
+}
+
+struct snapshot_record {
+ const char *start;
+ size_t len;
+};
+
+static int cmp_packed_ref_records(const void *v1, const void *v2)
+{
+ const struct snapshot_record *e1 = v1, *e2 = v2;
+ const char *r1 = e1->start + the_hash_algo->hexsz + 1;
+ const char *r2 = e2->start + the_hash_algo->hexsz + 1;
+
+ while (1) {
+ if (*r1 == '\n')
+ return *r2 == '\n' ? 0 : -1;
+ if (*r1 != *r2) {
+ if (*r2 == '\n')
+ return 1;
+ else
+ return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
+ }
+ r1++;
+ r2++;
+ }
+}
+
+/*
+ * Compare a snapshot record at `rec` to the specified NUL-terminated
+ * refname.
+ */
+static int cmp_record_to_refname(const char *rec, const char *refname)
+{
+ const char *r1 = rec + the_hash_algo->hexsz + 1;
+ const char *r2 = refname;
+
+ while (1) {
+ if (*r1 == '\n')
+ return *r2 ? -1 : 0;
+ if (!*r2)
+ return 1;
+ if (*r1 != *r2)
+ return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
+ r1++;
+ r2++;
+ }
+}
+
+/*
+ * `snapshot->buf` is not known to be sorted. Check whether it is, and
+ * if not, sort it into new memory and munmap/free the old storage.
+ */
+void sort_snapshot_v1(struct snapshot *snapshot)
+{
+ struct snapshot_record *records = NULL;
+ size_t alloc = 0, nr = 0;
+ int sorted = 1;
+ const char *pos, *eof, *eol;
+ size_t len, i;
+ char *new_buffer, *dst;
+
+ pos = snapshot->start;
+ eof = snapshot->eof;
+
+ if (pos == eof)
+ return;
+
+ len = eof - pos;
+
+ /*
+ * Initialize records based on a crude estimate of the number
+ * of references in the file (we'll grow it below if needed):
+ */
+ ALLOC_GROW(records, len / 80 + 20, alloc);
+
+ while (pos < eof) {
+ eol = memchr(pos, '\n', eof - pos);
+ if (!eol)
+ /* The safety check should prevent this. */
+ BUG("unterminated line found in packed-refs");
+ if (eol - pos < the_hash_algo->hexsz + 2)
+ die_invalid_line(snapshot->refs->path,
+ pos, eof - pos);
+ eol++;
+ if (eol < eof && *eol == '^') {
+ /*
+ * Keep any peeled line together with its
+ * reference:
+ */
+ const char *peeled_start = eol;
+
+ eol = memchr(peeled_start, '\n', eof - peeled_start);
+ if (!eol)
+ /* The safety check should prevent this. */
+ BUG("unterminated peeled line found in packed-refs");
+ eol++;
+ }
+
+ ALLOC_GROW(records, nr + 1, alloc);
+ records[nr].start = pos;
+ records[nr].len = eol - pos;
+ nr++;
+
+ if (sorted &&
+ nr > 1 &&
+ cmp_packed_ref_records(&records[nr - 2],
+ &records[nr - 1]) >= 0)
+ sorted = 0;
+
+ pos = eol;
+ }
+
+ if (sorted)
+ goto cleanup;
+
+ /* We need to sort the memory. First we sort the records array: */
+ QSORT(records, nr, cmp_packed_ref_records);
+
+ /*
+ * Allocate a new chunk of memory, and copy the old memory to
+ * the new in the order indicated by `records` (not bothering
+ * with the header line):
+ */
+ new_buffer = xmalloc(len);
+ for (dst = new_buffer, i = 0; i < nr; i++) {
+ memcpy(dst, records[i].start, records[i].len);
+ dst += records[i].len;
+ }
+
+ /*
+ * Now munmap the old buffer and use the sorted buffer in its
+ * place:
+ */
+ clear_snapshot_buffer(snapshot);
+ snapshot->buf = snapshot->start = new_buffer;
+ snapshot->eof = new_buffer + len;
+
+cleanup:
+ free(records);
+}
+
+/*
+ * Return a pointer to the start of the record that contains the
+ * character `*p` (which must be within the buffer). If no other
+ * record start is found, return `buf`.
+ */
+static const char *find_start_of_record(const char *buf, const char *p)
+{
+ while (p > buf && (p[-1] != '\n' || p[0] == '^'))
+ p--;
+ return p;
+}
+
+/*
+ * Return a pointer to the start of the record following the record
+ * that contains `*p`. If none is found before `end`, return `end`.
+ */
+static const char *find_end_of_record(const char *p, const char *end)
+{
+ while (++p < end && (p[-1] != '\n' || p[0] == '^'))
+ ;
+ return p;
+}
+
+/*
+ * We want to be able to compare mmapped reference records quickly,
+ * without totally parsing them. We can do so because the records are
+ * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
+ * + 1) bytes past the beginning of the record.
+ *
+ * But what if the `packed-refs` file contains garbage? We're willing
+ * to tolerate not detecting the problem, as long as we don't produce
+ * totally garbled output (we can't afford to check the integrity of
+ * the whole file during every Git invocation). But we do want to be
+ * sure that we never read past the end of the buffer in memory and
+ * perform an illegal memory access.
+ *
+ * Guarantee that minimum level of safety by verifying that the last
+ * record in the file is LF-terminated, and that it has at least
+ * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
+ * these checks fails.
+ */
+void verify_buffer_safe_v1(struct snapshot *snapshot)
+{
+ const char *start = snapshot->start;
+ const char *eof = snapshot->eof;
+ const char *last_line;
+
+ if (start == eof)
+ return;
+
+ last_line = find_start_of_record(start, eof - 1);
+ if (*(eof - 1) != '\n' || eof - last_line < the_hash_algo->hexsz + 2)
+ die_invalid_line(snapshot->refs->path,
+ last_line, eof - last_line);
+}
+
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+const char *find_reference_location_v1(struct snapshot *snapshot,
+ const char *refname, int mustexist)
+{
+ /*
+ * This is not *quite* a garden-variety binary search, because
+ * the data we're searching is made up of records, and we
+ * always need to find the beginning of a record to do a
+ * comparison. A "record" here is one line for the reference
+ * itself and zero or one peel lines that start with '^'. Our
+ * loop invariant is described in the next two comments.
+ */
+
+ /*
+ * A pointer to the character at the start of a record whose
+ * preceding records all have reference names that come
+ * *before* `refname`.
+ */
+ const char *lo = snapshot->start;
+
+ /*
+ * A pointer to a the first character of a record whose
+ * reference name comes *after* `refname`.
+ */
+ const char *hi = snapshot->eof;
+
+ while (lo != hi) {
+ const char *mid, *rec;
+ int cmp;
+
+ mid = lo + (hi - lo) / 2;
+ rec = find_start_of_record(lo, mid);
+ cmp = cmp_record_to_refname(rec, refname);
+ if (cmp < 0) {
+ lo = find_end_of_record(mid, hi);
+ } else if (cmp > 0) {
+ hi = rec;
+ } else {
+ return rec;
+ }
+ }
+
+ if (mustexist)
+ return NULL;
+ else
+ return lo;
+}
+
+int parse_packed_format_v1_header(struct packed_ref_store *refs,
+ struct snapshot *snapshot,
+ int *sorted)
+{
+ *sorted = 0;
+ /* If the file has a header line, process it: */
+ if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
+ char *tmp, *p, *eol;
+ struct string_list traits = STRING_LIST_INIT_NODUP;
+
+ eol = memchr(snapshot->buf, '\n',
+ snapshot->eof - snapshot->buf);
+ if (!eol)
+ die_unterminated_line(refs->path,
+ snapshot->buf,
+ snapshot->eof - snapshot->buf);
+
+ tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
+
+ if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
+ die_invalid_line(refs->path,
+ snapshot->buf,
+ snapshot->eof - snapshot->buf);
+
+ string_list_split_in_place(&traits, p, ' ', -1);
+
+ if (unsorted_string_list_has_string(&traits, "fully-peeled"))
+ snapshot->peeled = PEELED_FULLY;
+ else if (unsorted_string_list_has_string(&traits, "peeled"))
+ snapshot->peeled = PEELED_TAGS;
+
+ *sorted = unsorted_string_list_has_string(&traits, "sorted");
+
+ /* perhaps other traits later as well */
+
+ /* The "+ 1" is for the LF character. */
+ snapshot->start = eol + 1;
+
+ string_list_clear(&traits, 0);
+ free(tmp);
+ }
+
+ return 0;
+}
+
+int packed_read_raw_ref_v1(struct packed_ref_store *refs, struct snapshot *snapshot,
+ const char *refname, struct object_id *oid,
+ unsigned int *type, int *failure_errno)
+{
+ const char *rec;
+
+ *type = 0;
+
+ rec = find_reference_location_v1(snapshot, refname, 1);
+
+ if (!rec) {
+ /* refname is not a packed reference. */
+ *failure_errno = ENOENT;
+ return -1;
+ }
+
+ if (get_oid_hex(rec, oid))
+ die_invalid_line(refs->path, rec, snapshot->eof - rec);
+
+ *type = REF_ISPACKED;
+ return 0;
+}
+
+int next_record_v1(struct packed_ref_iterator *iter)
+{
+ const char *p = iter->pos, *eol;
+
+ strbuf_reset(&iter->refname_buf);
+
+ if (iter->pos == iter->eof)
+ return ITER_DONE;
+
+ iter->base.flags = REF_ISPACKED;
+
+ if (iter->eof - p < the_hash_algo->hexsz + 2 ||
+ parse_oid_hex(p, &iter->oid, &p) ||
+ !isspace(*p++))
+ die_invalid_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+
+ eol = memchr(p, '\n', iter->eof - p);
+ if (!eol)
+ die_unterminated_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+
+ strbuf_add(&iter->refname_buf, p, eol - p);
+ iter->base.refname = iter->refname_buf.buf;
+
+ if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!refname_is_safe(iter->base.refname))
+ die("packed refname is dangerous: %s",
+ iter->base.refname);
+ oidclr(&iter->oid);
+ iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
+ }
+ if (iter->snapshot->peeled == PEELED_FULLY ||
+ (iter->snapshot->peeled == PEELED_TAGS &&
+ starts_with(iter->base.refname, "refs/tags/")))
+ iter->base.flags |= REF_KNOWS_PEELED;
+
+ iter->pos = eol + 1;
+
+ if (iter->pos < iter->eof && *iter->pos == '^') {
+ p = iter->pos + 1;
+ if (iter->eof - p < the_hash_algo->hexsz + 1 ||
+ parse_oid_hex(p, &iter->peeled, &p) ||
+ *p++ != '\n')
+ die_invalid_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+ iter->pos = p;
+
+ /*
+ * Regardless of what the file header said, we
+ * definitely know the value of *this* reference. But
+ * we suppress it if the reference is broken:
+ */
+ if ((iter->base.flags & REF_ISBROKEN)) {
+ oidclr(&iter->peeled);
+ iter->base.flags &= ~REF_KNOWS_PEELED;
+ } else {
+ iter->base.flags |= REF_KNOWS_PEELED;
+ }
+ } else {
+ oidclr(&iter->peeled);
+ }
+
+ return ITER_OK;
+}
+
+/*
+ * The packed-refs header line that we write out. Perhaps other traits
+ * will be added later.
+ *
+ * Note that earlier versions of Git used to parse these traits by
+ * looking for " trait " in the line. For this reason, the space after
+ * the colon and the trailing space are required.
+ */
+static const char PACKED_REFS_HEADER[] =
+ "# pack-refs with: peeled fully-peeled sorted \n";
+
+int write_packed_file_header_v1(FILE *out)
+{
+ return fprintf(out, "%s", PACKED_REFS_HEADER);
+}
+
+/*
+ * Write an entry to the packed-refs file for the specified refname.
+ * If peeled is non-NULL, write it as the entry's peeled value. On
+ * error, return a nonzero value and leave errno set at the value left
+ * by the failing call to `fprintf()`.
+ */
+int write_packed_entry_v1(const char *refname,
+ const struct object_id *oid,
+ const struct object_id *peeled,
+ void *write_data)
+{
+ FILE *fh = write_data;
+
+ if (fprintf(fh, "%s %s\n", oid_to_hex(oid), refname) < 0 ||
+ (peeled && fprintf(fh, "^%s\n", oid_to_hex(peeled)) < 0))
+ return -1;
+
+ return 0;
+}
diff --git a/refs/packed-format-v2.c b/refs/packed-format-v2.c
new file mode 100644
index 0000000000..ada34bf9bf
--- /dev/null
+++ b/refs/packed-format-v2.c
@@ -0,0 +1,624 @@
+#include "../cache.h"
+#include "../config.h"
+#include "../refs.h"
+#include "refs-internal.h"
+#include "packed-backend.h"
+#include "../iterator.h"
+#include "../lockfile.h"
+#include "../chdir-notify.h"
+#include "../chunk-format.h"
+#include "../csum-file.h"
+
+#define OFFSET_IS_PEELED (((uint64_t)1) << 63)
+
+#define PACKED_REFS_SIGNATURE 0x50524546 /* "PREF" */
+#define CHREFS_CHUNKID_OFFSETS 0x524F4646 /* "ROFF" */
+#define CHREFS_CHUNKID_REFS 0x52454653 /* "REFS" */
+#define CHREFS_CHUNKID_PREFIX_DATA 0x50465844 /* "PFXD" */
+#define CHREFS_CHUNKID_PREFIX_OFFSETS 0x5046584F /* "PFXO" */
+
+static const char *get_nth_prefix(struct snapshot *snapshot,
+ size_t n, size_t *len)
+{
+ uint64_t offset, next_offset;
+
+ if (n >= snapshot->prefixes_nr)
+ BUG("asking for prefix %"PRIu64" outside of bounds (%"PRIu64")",
+ (uint64_t)n, (uint64_t)snapshot->prefixes_nr);
+
+ if (n)
+ offset = get_be32(snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * (n - 1));
+ else
+ offset = 0;
+
+ if (len) {
+ next_offset = get_be32(snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * n);
+
+ /* Prefix includes null terminator. */
+ *len = next_offset - offset - 1;
+ }
+
+ return snapshot->prefix_chunk + offset;
+}
+
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+static const char *find_prefix_location(struct snapshot *snapshot,
+ const char *refname, size_t *pos)
+{
+ size_t lo = 0, hi = snapshot->prefixes_nr;
+
+ while (lo != hi) {
+ const char *rec;
+ int cmp;
+ size_t len;
+ size_t mid = lo + (hi - lo) / 2;
+
+ rec = get_nth_prefix(snapshot, mid, &len);
+ cmp = strncmp(rec, refname, len);
+ if (cmp < 0) {
+ lo = mid + 1;
+ } else if (cmp > 0) {
+ hi = mid;
+ } else {
+ /* we have a prefix match! */
+ *pos = mid;
+ return rec;
+ }
+ }
+
+ *pos = lo;
+ if (lo < snapshot->prefixes_nr)
+ return get_nth_prefix(snapshot, lo, NULL);
+ else
+ return NULL;
+}
+
+int detect_packed_format_v2_header(struct packed_ref_store *refs,
+ struct snapshot *snapshot)
+{
+ /*
+ * packed-refs v1 might not have a header, so check instead
+ * that the v2 signature is not present.
+ */
+ return get_be32(snapshot->buf) == PACKED_REFS_SIGNATURE;
+}
+
+static const char *get_nth_ref(struct snapshot *snapshot,
+ size_t n)
+{
+ uint64_t offset;
+
+ if (n >= snapshot->nr)
+ BUG("asking for position %"PRIu64" outside of bounds (%"PRIu64")",
+ (uint64_t)n, (uint64_t)snapshot->nr);
+
+ if (n)
+ offset = get_be64(snapshot->offset_chunk + (n-1) * sizeof(uint64_t))
+ & ~OFFSET_IS_PEELED;
+ else
+ offset = 0;
+
+ return snapshot->refs_chunk + offset;
+}
+
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+const char *find_reference_location_v2(struct snapshot *snapshot,
+ const char *refname, int mustexist,
+ size_t *pos)
+{
+ size_t lo = 0, hi = snapshot->nr;
+
+ if (snapshot->prefix_chunk) {
+ size_t prefix_row;
+ const char *prefix;
+ int found = 1;
+
+ prefix = find_prefix_location(snapshot, refname, &prefix_row);
+
+ if (!prefix || !starts_with(refname, prefix)) {
+ if (mustexist)
+ return NULL;
+ found = 0;
+ }
+
+ /* The second 4-byte column of the prefix offsets */
+ if (prefix_row) {
+ /* if prefix_row == 0, then lo = 0, which is already true. */
+ lo = get_be32(snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * (prefix_row - 1) + sizeof(uint32_t));
+ }
+
+ if (!found) {
+ const char *ret;
+ /* Terminate early with this lo position as the insertion point. */
+ if (pos)
+ *pos = lo;
+
+ if (lo >= snapshot->nr)
+ return NULL;
+
+ ret = get_nth_ref(snapshot, lo);
+ return ret;
+ }
+
+ hi = get_be32(snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * prefix_row + sizeof(uint32_t));
+
+ if (prefix)
+ refname += strlen(prefix);
+ }
+
+ while (lo != hi) {
+ const char *rec;
+ int cmp;
+ size_t mid = lo + (hi - lo) / 2;
+
+ rec = get_nth_ref(snapshot, mid);
+ cmp = strcmp(rec, refname);
+ if (cmp < 0) {
+ lo = mid + 1;
+ } else if (cmp > 0) {
+ hi = mid;
+ } else {
+ if (pos)
+ *pos = mid;
+ return rec;
+ }
+ }
+
+ if (mustexist) {
+ return NULL;
+ } else {
+ const char *ret;
+ /*
+ * We are likely doing a prefix match, so use the current
+ * 'lo' position as the indicator.
+ */
+ if (pos)
+ *pos = lo;
+ if (lo >= snapshot->nr)
+ return NULL;
+
+ ret = get_nth_ref(snapshot, lo);
+ return ret;
+ }
+}
+
+int packed_read_raw_ref_v2(struct packed_ref_store *refs, struct snapshot *snapshot,
+ const char *refname, struct object_id *oid,
+ unsigned int *type, int *failure_errno)
+{
+ const char *rec;
+
+ *type = 0;
+
+ rec = find_reference_location_v2(snapshot, refname, 1, NULL);
+
+ if (!rec) {
+ /* refname is not a packed reference. */
+ *failure_errno = ENOENT;
+ return -1;
+ }
+
+ hashcpy(oid->hash, (const unsigned char *)rec + strlen(rec) + 1);
+ oid->algo = hash_algo_by_ptr(the_hash_algo);
+
+ *type = REF_ISPACKED;
+ return 0;
+}
+
+static int packed_refs_read_offsets(const unsigned char *chunk_start,
+ size_t chunk_size, void *data)
+{
+ struct snapshot *snapshot = data;
+
+ snapshot->offset_chunk = chunk_start;
+ snapshot->nr = chunk_size / sizeof(uint64_t);
+ return 0;
+}
+
+static int packed_refs_read_prefix_offsets(const unsigned char *chunk_start,
+ size_t chunk_size, void *data)
+{
+ struct snapshot *snapshot = data;
+
+ snapshot->prefix_offsets_chunk = chunk_start;
+ snapshot->prefixes_nr = chunk_size / sizeof(uint64_t);
+ return 0;
+}
+
+void fill_snapshot_v2(struct snapshot *snapshot)
+{
+ uint32_t file_signature, file_version, hash_version;
+ struct chunkfile *cf;
+
+ file_signature = get_be32(snapshot->buf);
+ if (file_signature != PACKED_REFS_SIGNATURE)
+ die(_("%s file signature %X does not match signature %X"),
+ "packed-ref", file_signature, PACKED_REFS_SIGNATURE);
+
+ file_version = get_be32(snapshot->buf + sizeof(uint32_t));
+ if (file_version != 2)
+ die(_("format version %u does not match expected file version %u"),
+ file_version, 2);
+
+ hash_version = get_be32(snapshot->buf + 2 * sizeof(uint32_t));
+ if (hash_version != the_hash_algo->format_id)
+ die(_("hash version %X does not match expected hash version %X"),
+ hash_version, the_hash_algo->format_id);
+
+ cf = init_chunkfile(NULL);
+
+ if (read_trailing_table_of_contents(cf, (const unsigned char *)snapshot->buf, snapshot->buflen)) {
+ release_snapshot(snapshot);
+ snapshot = NULL;
+ goto cleanup;
+ }
+
+ read_chunk(cf, CHREFS_CHUNKID_OFFSETS, packed_refs_read_offsets, snapshot);
+ pair_chunk(cf, CHREFS_CHUNKID_REFS, (const unsigned char**)&snapshot->refs_chunk);
+
+ read_chunk(cf, CHREFS_CHUNKID_PREFIX_OFFSETS, packed_refs_read_prefix_offsets, snapshot);
+ pair_chunk(cf, CHREFS_CHUNKID_PREFIX_DATA, (const unsigned char**)&snapshot->prefix_chunk);
+
+ /* TODO: add error checks for invalid chunk combinations. */
+
+cleanup:
+ free_chunkfile(cf);
+}
+
+/*
+ * Move the iterator to the next record in the snapshot, without
+ * respect for whether the record is actually required by the current
+ * iteration. Adjust the fields in `iter` and return `ITER_OK` or
+ * `ITER_DONE`. This function does not free the iterator in the case
+ * of `ITER_DONE`.
+ */
+int next_record_v2(struct packed_ref_iterator *iter)
+{
+ uint64_t offset;
+ const char *pos = iter->pos;
+ strbuf_reset(&iter->refname_buf);
+
+ if (iter->row == iter->snapshot->nr)
+ return ITER_DONE;
+
+ iter->base.flags = REF_ISPACKED;
+
+ if (iter->cur_prefix)
+ strbuf_addstr(&iter->refname_buf, iter->cur_prefix);
+ strbuf_addstr(&iter->refname_buf, pos);
+ iter->base.refname = iter->refname_buf.buf;
+ pos += strlen(pos) + 1;
+
+ hashcpy(iter->oid.hash, (const unsigned char *)pos);
+ iter->oid.algo = hash_algo_by_ptr(the_hash_algo);
+ pos += the_hash_algo->rawsz;
+
+ if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!refname_is_safe(iter->base.refname))
+ die("packed refname is dangerous: %s",
+ iter->base.refname);
+ oidclr(&iter->oid);
+ iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
+ }
+
+ /* We always know the peeled value! */
+ iter->base.flags |= REF_KNOWS_PEELED;
+
+ offset = get_be64(iter->snapshot->offset_chunk + sizeof(uint64_t) * iter->row);
+ if (offset & OFFSET_IS_PEELED) {
+ hashcpy(iter->peeled.hash, (const unsigned char *)pos);
+ iter->peeled.algo = hash_algo_by_ptr(the_hash_algo);
+ } else {
+ oidclr(&iter->peeled);
+ }
+
+ /* TODO: somehow all tags are getting OFFSET_IS_PEELED even though
+ * some are not annotated tags.
+ */
+ iter->pos = iter->snapshot->refs_chunk + (offset & (~OFFSET_IS_PEELED));
+
+ iter->row++;
+
+ if (iter->row == iter->prefix_row_end && iter->snapshot->prefix_chunk) {
+ size_t prefix_pos = get_be32(iter->snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * iter->prefix_i);
+ iter->cur_prefix = iter->snapshot->prefix_chunk + prefix_pos;
+ iter->prefix_i++;
+ iter->prefix_row_end = get_be32(iter->snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * iter->prefix_i + sizeof(uint32_t));
+ }
+
+ return ITER_OK;
+}
+
+void init_iterator_prefix_info(const char *prefix,
+ struct packed_ref_iterator *iter)
+{
+ struct snapshot *snapshot = iter->snapshot;
+
+ if (snapshot->version != 2 || !snapshot->prefix_chunk) {
+ iter->prefix_row_end = snapshot->nr;
+ return;
+ }
+
+ if (prefix)
+ iter->cur_prefix = find_prefix_location(snapshot, prefix, &iter->prefix_i);
+ else {
+ iter->cur_prefix = snapshot->prefix_chunk;
+ iter->prefix_i = 0;
+ }
+
+ iter->prefix_row_end = get_be32(snapshot->prefix_offsets_chunk +
+ 2 * sizeof(uint32_t) * iter->prefix_i +
+ sizeof(uint32_t));
+}
+
+struct write_packed_refs_v2_context {
+ struct packed_ref_store *refs;
+ struct string_list *updates;
+ struct strbuf *err;
+
+ struct hashfile *f;
+ struct chunkfile *cf;
+
+ /*
+ * As we stream the ref names to the refs chunk, store these
+ * values in-memory. These arrays are populated one for every ref.
+ */
+ uint64_t *offsets;
+ size_t nr;
+ size_t offsets_alloc;
+
+ int write_prefixes;
+ const char *cur_prefix;
+ size_t cur_prefix_len;
+
+ char **prefixes;
+ uint32_t *prefix_offsets;
+ uint32_t *prefix_rows;
+ size_t prefix_nr;
+ size_t prefixes_alloc;
+ size_t prefix_offsets_alloc;
+ size_t prefix_rows_alloc;
+};
+
+struct write_packed_refs_v2_context *create_v2_context(struct packed_ref_store *refs,
+ struct string_list *updates,
+ struct strbuf *err)
+{
+ struct write_packed_refs_v2_context *ctx;
+ int do_skip_hash;
+ CALLOC_ARRAY(ctx, 1);
+
+ ctx->refs = refs;
+ ctx->updates = updates;
+ ctx->err = err;
+
+ if (!fdopen_tempfile(refs->tempfile, "w")) {
+ strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
+ strerror(errno));
+ return ctx;
+ }
+
+ ctx->f = hashfd(refs->tempfile->fd, refs->tempfile->filename.buf);
+
+ /* Default to true, so skip_hash if not set. */
+ if (git_config_get_maybe_bool("refs.hashpackedrefs", &do_skip_hash) ||
+ do_skip_hash)
+ ctx->f->skip_hash = 1;
+
+ ctx->cf = init_chunkfile(ctx->f);
+
+ return ctx;
+}
+
+static int write_packed_entry_v2(const char *refname,
+ const struct object_id *oid,
+ const struct object_id *peeled,
+ void *write_data)
+{
+ struct write_packed_refs_v2_context *ctx = write_data;
+ size_t reflen = strlen(refname) + 1;
+ size_t i = ctx->nr;
+
+ ALLOC_GROW(ctx->offsets, i + 1, ctx->offsets_alloc);
+
+ if (ctx->write_prefixes) {
+ if (ctx->cur_prefix && starts_with(refname, ctx->cur_prefix)) {
+ /* skip ahead! */
+ refname += ctx->cur_prefix_len;
+ reflen -= ctx->cur_prefix_len;
+ } else {
+ size_t len;
+ const char *slash, *slashslash = NULL;
+ if (ctx->prefix_nr) {
+ /* close out the old prefix. */
+ ctx->prefix_rows[ctx->prefix_nr - 1] = ctx->nr;
+ }
+
+ /* Find the new prefix. */
+ slash = strchr(refname, '/');
+ if (slash)
+ slashslash = strchr(slash + 1, '/');
+ /* If there are two slashes, use that. */
+ slash = slashslash ? slashslash : slash;
+ /*
+ * If there is at least one slash, use that,
+ * and include the slash in the string.
+ * Otherwise, use the end of the ref.
+ */
+ slash = slash ? slash + 1 : refname + strlen(refname);
+
+ len = slash - refname;
+ ALLOC_GROW(ctx->prefixes, ctx->prefix_nr + 1, ctx->prefixes_alloc);
+ ALLOC_GROW(ctx->prefix_offsets, ctx->prefix_nr + 1, ctx->prefix_offsets_alloc);
+ ALLOC_GROW(ctx->prefix_rows, ctx->prefix_nr + 1, ctx->prefix_rows_alloc);
+
+ if (ctx->prefix_nr)
+ ctx->prefix_offsets[ctx->prefix_nr] = ctx->prefix_offsets[ctx->prefix_nr - 1] + len + 1;
+ else
+ ctx->prefix_offsets[ctx->prefix_nr] = len + 1;
+
+ ctx->prefixes[ctx->prefix_nr] = xstrndup(refname, len);
+ ctx->cur_prefix = ctx->prefixes[ctx->prefix_nr];
+ ctx->prefix_nr++;
+
+ refname += len;
+ reflen -= len;
+ ctx->cur_prefix_len = len;
+ }
+
+ /* Update the last row continually. */
+ ctx->prefix_rows[ctx->prefix_nr - 1] = i + 1;
+ }
+
+
+ /* Write entire ref, including null terminator. */
+ hashwrite(ctx->f, refname, reflen);
+ hashwrite(ctx->f, oid->hash, the_hash_algo->rawsz);
+ if (peeled)
+ hashwrite(ctx->f, peeled->hash, the_hash_algo->rawsz);
+
+ if (i)
+ ctx->offsets[i] = (ctx->offsets[i - 1] & (~OFFSET_IS_PEELED));
+ else
+ ctx->offsets[i] = 0;
+ ctx->offsets[i] += reflen + the_hash_algo->rawsz;
+
+ if (peeled) {
+ ctx->offsets[i] += the_hash_algo->rawsz;
+ ctx->offsets[i] |= OFFSET_IS_PEELED;
+ }
+
+ ctx->nr++;
+ return 0;
+}
+
+static int write_refs_chunk_refs(struct hashfile *f,
+ void *data)
+{
+ struct write_packed_refs_v2_context *ctx = data;
+ int ok;
+
+ trace2_region_enter("refs", "refs-chunk", the_repository);
+ ok = merge_iterator_and_updates(ctx->refs, ctx->updates, ctx->err,
+ write_packed_entry_v2, ctx);
+ trace2_region_leave("refs", "refs-chunk", the_repository);
+
+ return ok != ITER_DONE;
+}
+
+static int write_refs_chunk_offsets(struct hashfile *f,
+ void *data)
+{
+ struct write_packed_refs_v2_context *ctx = data;
+ size_t i;
+
+ trace2_region_enter("refs", "offsets", the_repository);
+ for (i = 0; i < ctx->nr; i++)
+ hashwrite_be64(f, ctx->offsets[i]);
+
+ trace2_region_leave("refs", "offsets", the_repository);
+ return 0;
+}
+
+static int write_refs_chunk_prefix_data(struct hashfile *f,
+ void *data)
+{
+ struct write_packed_refs_v2_context *ctx = data;
+ size_t i;
+
+ trace2_region_enter("refs", "prefix-data", the_repository);
+ for (i = 0; i < ctx->prefix_nr; i++) {
+ size_t len = strlen(ctx->prefixes[i]) + 1;
+ hashwrite(f, ctx->prefixes[i], len);
+
+ /* TODO: assert the prefix lengths match the stored offsets? */
+ }
+
+ trace2_region_leave("refs", "prefix-data", the_repository);
+ return 0;
+}
+
+static int write_refs_chunk_prefix_offsets(struct hashfile *f,
+ void *data)
+{
+ struct write_packed_refs_v2_context *ctx = data;
+ size_t i;
+
+ trace2_region_enter("refs", "prefix-offsets", the_repository);
+ for (i = 0; i < ctx->prefix_nr; i++) {
+ hashwrite_be32(f, ctx->prefix_offsets[i]);
+ hashwrite_be32(f, ctx->prefix_rows[i]);
+ }
+
+ trace2_region_leave("refs", "prefix-offsets", the_repository);
+ return 0;
+}
+
+int write_packed_refs_v2(struct write_packed_refs_v2_context *ctx)
+{
+ unsigned char file_hash[GIT_MAX_RAWSZ];
+
+ ctx->write_prefixes = git_env_bool("GIT_TEST_WRITE_PACKED_REFS_PREFIXES", 1);
+
+ add_chunk(ctx->cf, CHREFS_CHUNKID_REFS, 0, write_refs_chunk_refs);
+ add_chunk(ctx->cf, CHREFS_CHUNKID_OFFSETS, 0, write_refs_chunk_offsets);
+
+ if (ctx->write_prefixes) {
+ add_chunk(ctx->cf, CHREFS_CHUNKID_PREFIX_DATA, 0, write_refs_chunk_prefix_data);
+ add_chunk(ctx->cf, CHREFS_CHUNKID_PREFIX_OFFSETS, 0, write_refs_chunk_prefix_offsets);
+ }
+
+ hashwrite_be32(ctx->f, PACKED_REFS_SIGNATURE);
+ hashwrite_be32(ctx->f, 2);
+ hashwrite_be32(ctx->f, the_hash_algo->format_id);
+
+ if (write_chunkfile(ctx->cf, CHUNKFILE_TRAILING_TOC, ctx))
+ goto failure;
+
+ finalize_hashfile(ctx->f, file_hash, FSYNC_COMPONENT_REFERENCE,
+ CSUM_HASH_IN_STREAM | CSUM_FSYNC);
+
+ return 0;
+
+failure:
+ return -1;
+}
+
+void free_v2_context(struct write_packed_refs_v2_context *ctx)
+{
+ if (ctx->cf)
+ free_chunkfile(ctx->cf);
+ free(ctx);
+}
diff --git a/refs/refs-internal.h b/refs/refs-internal.h
index 69f93b0e2a..39b93fce97 100644
--- a/refs/refs-internal.h
+++ b/refs/refs-internal.h
@@ -521,6 +521,15 @@ struct ref_store;
REF_STORE_ODB | \
REF_STORE_MAIN)
+#define REF_STORE_FORMAT_FILES (1 << 8) /* can use loose ref files */
+#define REF_STORE_FORMAT_PACKED (1 << 9) /* can use v1 packed-refs file */
+#define REF_STORE_FORMAT_PACKED_V2 (1 << 10) /* can use v2 packed-refs file */
+
+static inline int packed_refs_enabled(int flags)
+{
+ return flags & (REF_STORE_FORMAT_PACKED | REF_STORE_FORMAT_PACKED_V2);
+}
+
/*
* Initialize the ref_store for the specified gitdir. These functions
* should call base_ref_store_init() to initialize the shared part of
diff --git a/repo-settings.c b/repo-settings.c
index 3021921c53..3ec0a53ea6 100644
--- a/repo-settings.c
+++ b/repo-settings.c
@@ -73,6 +73,19 @@ void prepare_repo_settings(struct repository *r)
r->settings.core_multi_pack_index = 1;
/*
+ * If the environment variable is set, assume that it was set by an
+ * invocation of git running in a superproject with
+ * submodule.propagateBranches set and that is recursing into this repo
+ * as a submodule. Therefore, we should ignore whatever is set in this
+ * repo's config.
+ */
+ r->settings.submodule_propagate_branches =
+ git_env_bool(GIT_SUBMODULE_PROPAGATE_BRANCHES_ENVIRONMENT, -1);
+ if (r->settings.submodule_propagate_branches == -1)
+ repo_cfg_bool(r, "submodule.propagateBranches",
+ &r->settings.submodule_propagate_branches, 0);
+
+ /*
* Non-boolean config
*/
if (!repo_config_get_int(r, "index.version", &value))
diff --git a/repository.c b/repository.c
index 5d166b692c..96533fc76b 100644
--- a/repository.c
+++ b/repository.c
@@ -182,6 +182,8 @@ int repo_init(struct repository *repo,
repo->repository_format_partial_clone = format.partial_clone;
format.partial_clone = NULL;
+ repo->ref_format = format.ref_format;
+
if (worktree)
repo_set_worktree(repo, worktree);
diff --git a/repository.h b/repository.h
index 6c461c5b9d..9f5ce21f70 100644
--- a/repository.h
+++ b/repository.h
@@ -38,6 +38,7 @@ struct repo_settings {
int fetch_write_commit_graph;
int command_requires_full_index;
int sparse_index;
+ int submodule_propagate_branches;
struct fsmonitor_settings *fsmonitor; /* lazily loaded */
@@ -62,6 +63,12 @@ struct repo_path_cache {
char *shallow;
};
+enum ref_format_flags {
+ REF_FORMAT_FILES = (1 << 0),
+ REF_FORMAT_PACKED = (1 << 1),
+ REF_FORMAT_PACKED_V2 = (1 << 2),
+};
+
struct repository {
/* Environment */
/*
@@ -96,6 +103,7 @@ struct repository {
* the ref object.
*/
struct ref_store *refs_private;
+ enum ref_format_flags ref_format;
/*
* Contains path to often used file names.
diff --git a/reset.c b/reset.c
index e3383a9334..5ded23611f 100644
--- a/reset.c
+++ b/reset.c
@@ -128,6 +128,7 @@ int reset_head(struct repository *r, const struct reset_head_opts *opts)
unpack_tree_opts.update = 1;
unpack_tree_opts.merge = 1;
unpack_tree_opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
+ unpack_tree_opts.skip_cache_tree_update = 1;
init_checkout_metadata(&unpack_tree_opts.meta, switch_to_branch, oid, NULL);
if (reset_hard)
unpack_tree_opts.reset = UNPACK_RESET_PROTECT_UNTRACKED;
diff --git a/revision.c b/revision.c
index 0760e78936..c86e76e471 100644
--- a/revision.c
+++ b/revision.c
@@ -1,4 +1,5 @@
#include "cache.h"
+#include "config.h"
#include "object-store.h"
#include "tag.h"
#include "blob.h"
@@ -1517,27 +1518,77 @@ static void add_rev_cmdline_list(struct rev_info *revs,
}
}
-struct all_refs_cb {
- int all_flags;
- int warned_bad_reflog;
- struct rev_info *all_revs;
- const char *name_for_errormsg;
- struct worktree *wt;
-};
-
-int ref_excluded(struct string_list *ref_excludes, const char *path)
+int ref_excluded(const struct ref_exclusions *exclusions, const char *path)
{
+ const char *stripped_path = strip_namespace(path);
struct string_list_item *item;
- if (!ref_excludes)
- return 0;
- for_each_string_list_item(item, ref_excludes) {
+ for_each_string_list_item(item, &exclusions->excluded_refs) {
if (!wildmatch(item->string, path, 0))
return 1;
}
+
+ if (ref_is_hidden(stripped_path, path, &exclusions->hidden_refs))
+ return 1;
+
return 0;
}
+void init_ref_exclusions(struct ref_exclusions *exclusions)
+{
+ struct ref_exclusions blank = REF_EXCLUSIONS_INIT;
+ memcpy(exclusions, &blank, sizeof(*exclusions));
+}
+
+void clear_ref_exclusions(struct ref_exclusions *exclusions)
+{
+ string_list_clear(&exclusions->excluded_refs, 0);
+ string_list_clear(&exclusions->hidden_refs, 0);
+ exclusions->hidden_refs_configured = 0;
+}
+
+void add_ref_exclusion(struct ref_exclusions *exclusions, const char *exclude)
+{
+ string_list_append(&exclusions->excluded_refs, exclude);
+}
+
+struct exclude_hidden_refs_cb {
+ struct ref_exclusions *exclusions;
+ const char *section;
+};
+
+static int hide_refs_config(const char *var, const char *value, void *cb_data)
+{
+ struct exclude_hidden_refs_cb *cb = cb_data;
+ cb->exclusions->hidden_refs_configured = 1;
+ return parse_hide_refs_config(var, value, cb->section,
+ &cb->exclusions->hidden_refs);
+}
+
+void exclude_hidden_refs(struct ref_exclusions *exclusions, const char *section)
+{
+ struct exclude_hidden_refs_cb cb;
+
+ if (strcmp(section, "receive") && strcmp(section, "uploadpack"))
+ die(_("unsupported section for hidden refs: %s"), section);
+
+ if (exclusions->hidden_refs_configured)
+ die(_("--exclude-hidden= passed more than once"));
+
+ cb.exclusions = exclusions;
+ cb.section = section;
+
+ git_config(hide_refs_config, &cb);
+}
+
+struct all_refs_cb {
+ int all_flags;
+ int warned_bad_reflog;
+ struct rev_info *all_revs;
+ const char *name_for_errormsg;
+ struct worktree *wt;
+};
+
static int handle_one_ref(const char *path, const struct object_id *oid,
int flag UNUSED,
void *cb_data)
@@ -1545,7 +1596,7 @@ static int handle_one_ref(const char *path, const struct object_id *oid,
struct all_refs_cb *cb = cb_data;
struct object *object;
- if (ref_excluded(cb->all_revs->ref_excludes, path))
+ if (ref_excluded(&cb->all_revs->ref_excludes, path))
return 0;
object = get_reference(cb->all_revs, path, oid, cb->all_flags);
@@ -1563,24 +1614,6 @@ static void init_all_refs_cb(struct all_refs_cb *cb, struct rev_info *revs,
cb->wt = NULL;
}
-void clear_ref_exclusion(struct string_list **ref_excludes_p)
-{
- if (*ref_excludes_p) {
- string_list_clear(*ref_excludes_p, 0);
- free(*ref_excludes_p);
- }
- *ref_excludes_p = NULL;
-}
-
-void add_ref_exclusion(struct string_list **ref_excludes_p, const char *exclude)
-{
- if (!*ref_excludes_p) {
- CALLOC_ARRAY(*ref_excludes_p, 1);
- (*ref_excludes_p)->strdup_strings = 1;
- }
- string_list_append(*ref_excludes_p, exclude);
-}
-
static void handle_refs(struct ref_store *refs,
struct rev_info *revs, unsigned flags,
int (*for_each)(struct ref_store *, each_ref_fn, void *))
@@ -1865,30 +1898,15 @@ void repo_init_revisions(struct repository *r,
struct rev_info *revs,
const char *prefix)
{
- memset(revs, 0, sizeof(*revs));
+ struct rev_info blank = REV_INFO_INIT;
+ memcpy(revs, &blank, sizeof(*revs));
revs->repo = r;
- revs->abbrev = DEFAULT_ABBREV;
- revs->simplify_history = 1;
revs->pruning.repo = r;
- revs->pruning.flags.recursive = 1;
- revs->pruning.flags.quick = 1;
revs->pruning.add_remove = file_add_remove;
revs->pruning.change = file_change;
revs->pruning.change_fn_data = revs;
- revs->sort_order = REV_SORT_IN_GRAPH_ORDER;
- revs->dense = 1;
revs->prefix = prefix;
- revs->max_age = -1;
- revs->max_age_as_filter = -1;
- revs->min_age = -1;
- revs->skip_count = -1;
- revs->max_count = -1;
- revs->max_parents = -1;
- revs->expand_tabs_in_log = -1;
-
- revs->commit_format = CMIT_FMT_DEFAULT;
- revs->expand_tabs_in_log_default = 8;
grep_init(&revs->grep_filter, revs->repo);
revs->grep_filter.status_only = 1;
@@ -1901,6 +1919,7 @@ void repo_init_revisions(struct repository *r,
init_display_notes(&revs->notes_opt);
list_objects_filter_init(&revs->filter);
+ init_ref_exclusions(&revs->ref_excludes);
}
static void add_pending_commit_list(struct rev_info *revs,
@@ -2225,7 +2244,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
!strcmp(arg, "--bisect") || starts_with(arg, "--glob=") ||
!strcmp(arg, "--indexed-objects") ||
!strcmp(arg, "--alternate-refs") ||
- starts_with(arg, "--exclude=") ||
+ starts_with(arg, "--exclude=") || starts_with(arg, "--exclude-hidden=") ||
starts_with(arg, "--branches=") || starts_with(arg, "--tags=") ||
starts_with(arg, "--remotes=") || starts_with(arg, "--no-walk="))
{
@@ -2689,10 +2708,12 @@ static int handle_revision_pseudo_opt(struct rev_info *revs,
init_all_refs_cb(&cb, revs, *flags);
other_head_refs(handle_one_ref, &cb);
}
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (!strcmp(arg, "--branches")) {
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --branches"));
handle_refs(refs, revs, *flags, refs_for_each_branch_ref);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (!strcmp(arg, "--bisect")) {
read_bisect_terms(&term_bad, &term_good);
handle_refs(refs, revs, *flags, for_each_bad_bisect_ref);
@@ -2700,35 +2721,48 @@ static int handle_revision_pseudo_opt(struct rev_info *revs,
for_each_good_bisect_ref);
revs->bisect = 1;
} else if (!strcmp(arg, "--tags")) {
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --tags"));
handle_refs(refs, revs, *flags, refs_for_each_tag_ref);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (!strcmp(arg, "--remotes")) {
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --remotes"));
handle_refs(refs, revs, *flags, refs_for_each_remote_ref);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if ((argcount = parse_long_opt("glob", argv, &optarg))) {
struct all_refs_cb cb;
init_all_refs_cb(&cb, revs, *flags);
for_each_glob_ref(handle_one_ref, optarg, &cb);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
return argcount;
} else if ((argcount = parse_long_opt("exclude", argv, &optarg))) {
add_ref_exclusion(&revs->ref_excludes, optarg);
return argcount;
+ } else if ((argcount = parse_long_opt("exclude-hidden", argv, &optarg))) {
+ exclude_hidden_refs(&revs->ref_excludes, optarg);
+ return argcount;
} else if (skip_prefix(arg, "--branches=", &optarg)) {
struct all_refs_cb cb;
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --branches"));
init_all_refs_cb(&cb, revs, *flags);
for_each_glob_ref_in(handle_one_ref, optarg, "refs/heads/", &cb);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (skip_prefix(arg, "--tags=", &optarg)) {
struct all_refs_cb cb;
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --tags"));
init_all_refs_cb(&cb, revs, *flags);
for_each_glob_ref_in(handle_one_ref, optarg, "refs/tags/", &cb);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (skip_prefix(arg, "--remotes=", &optarg)) {
struct all_refs_cb cb;
+ if (revs->ref_excludes.hidden_refs_configured)
+ return error(_("--exclude-hidden cannot be used together with --remotes"));
init_all_refs_cb(&cb, revs, *flags);
for_each_glob_ref_in(handle_one_ref, optarg, "refs/remotes/", &cb);
- clear_ref_exclusion(&revs->ref_excludes);
+ clear_ref_exclusions(&revs->ref_excludes);
} else if (!strcmp(arg, "--reflog")) {
add_reflogs_to_pending(revs, *flags);
} else if (!strcmp(arg, "--indexed-objects")) {
@@ -3020,6 +3054,7 @@ void release_revisions(struct rev_info *revs)
date_mode_release(&revs->date_mode);
release_revisions_mailmap(revs->mailmap);
free_grep_patterns(&revs->grep_filter);
+ graph_clear(revs->graph);
/* TODO (need to handle "no_free"): diff_free(&revs->diffopt) */
diff_free(&revs->pruning);
reflog_walk_info_release(revs->reflog_info);
diff --git a/revision.h b/revision.h
index afe1b77985..30febad09a 100644
--- a/revision.h
+++ b/revision.h
@@ -81,6 +81,35 @@ struct rev_cmdline_info {
} *rev;
};
+struct ref_exclusions {
+ /*
+ * Excluded refs is a list of wildmatch patterns. If any of the
+ * patterns matches, the reference will be excluded.
+ */
+ struct string_list excluded_refs;
+
+ /*
+ * Hidden refs is a list of patterns that is to be hidden via
+ * `ref_is_hidden()`.
+ */
+ struct string_list hidden_refs;
+
+ /*
+ * Indicates whether hidden refs have been configured. This is to
+ * distinguish between no hidden refs existing and hidden refs not
+ * being parsed.
+ */
+ char hidden_refs_configured;
+};
+
+/**
+ * Initialize a `struct ref_exclusions` with a macro.
+ */
+#define REF_EXCLUSIONS_INIT { \
+ .excluded_refs = STRING_LIST_INIT_DUP, \
+ .hidden_refs = STRING_LIST_INIT_DUP, \
+}
+
struct oidset;
struct topo_walk_info;
@@ -103,7 +132,7 @@ struct rev_info {
struct list_objects_filter_options filter;
/* excluding from --branches, --refs, etc. expansion */
- struct string_list *ref_excludes;
+ struct ref_exclusions ref_excludes;
/* Basic information */
const char *prefix;
@@ -357,7 +386,23 @@ struct rev_info {
* called before release_revisions() the "struct rev_info" can be left
* uninitialized.
*/
-#define REV_INFO_INIT { 0 }
+#define REV_INFO_INIT { \
+ .abbrev = DEFAULT_ABBREV, \
+ .simplify_history = 1, \
+ .pruning.flags.recursive = 1, \
+ .pruning.flags.quick = 1, \
+ .sort_order = REV_SORT_IN_GRAPH_ORDER, \
+ .dense = 1, \
+ .max_age = -1, \
+ .max_age_as_filter = -1, \
+ .min_age = -1, \
+ .skip_count = -1, \
+ .max_count = -1, \
+ .max_parents = -1, \
+ .expand_tabs_in_log = -1, \
+ .commit_format = CMIT_FMT_DEFAULT, \
+ .expand_tabs_in_log_default = 8, \
+}
/**
* Initialize a rev_info structure with default values. The third parameter may
@@ -439,12 +484,14 @@ void mark_trees_uninteresting_sparse(struct repository *r, struct oidset *trees)
void show_object_with_name(FILE *, struct object *, const char *);
/**
- * Helpers to check if a "struct string_list" item matches with
- * wildmatch().
+ * Helpers to check if a reference should be excluded.
*/
-int ref_excluded(struct string_list *, const char *path);
-void clear_ref_exclusion(struct string_list **);
-void add_ref_exclusion(struct string_list **, const char *exclude);
+
+int ref_excluded(const struct ref_exclusions *exclusions, const char *path);
+void init_ref_exclusions(struct ref_exclusions *);
+void clear_ref_exclusions(struct ref_exclusions *);
+void add_ref_exclusion(struct ref_exclusions *, const char *exclude);
+void exclude_hidden_refs(struct ref_exclusions *, const char *section);
/**
* This function can be used if you want to add commit objects as revision
diff --git a/run-command.c b/run-command.c
index 48b9ba6d6f..02cdb22d7b 100644
--- a/run-command.c
+++ b/run-command.c
@@ -1525,6 +1525,9 @@ static void pp_init(struct parallel_processes *pp,
if (!opts->get_next_task)
BUG("you need to specify a get_next_task function");
+
+ if (opts->duplicate_output && opts->ungroup)
+ BUG("duplicate_output and ungroup are incompatible with each other");
CALLOC_ARRAY(pp->children, n);
if (!opts->ungroup)
@@ -1645,8 +1648,14 @@ static void pp_buffer_stderr(struct parallel_processes *pp,
for (size_t i = 0; i < opts->processes; i++) {
if (pp->children[i].state == GIT_CP_WORKING &&
pp->pfd[i].revents & (POLLIN | POLLHUP)) {
- int n = strbuf_read_once(&pp->children[i].err,
- pp->children[i].process.err, 0);
+ struct strbuf buf = STRBUF_INIT;
+ int n = strbuf_read_once(&buf, pp->children[i].process.err, 0);
+ strbuf_addbuf(&pp->children[i].err, &buf);
+ if (opts->duplicate_output)
+ opts->duplicate_output(&buf, &pp->children[i].err,
+ opts->data,
+ pp->children[i].data);
+ strbuf_release(&buf);
if (n == 0) {
close(pp->children[i].process.err);
pp->children[i].state = GIT_CP_WAIT_CLEANUP;
diff --git a/run-command.h b/run-command.h
index 072db56a4d..709b36a411 100644
--- a/run-command.h
+++ b/run-command.h
@@ -409,6 +409,24 @@ typedef int (*start_failure_fn)(struct strbuf *out,
void *pp_task_cb);
/**
+ * This callback is called whenever output from a child process is buffered
+ *
+ * "struct strbuf *process_out" contains the output from the child process
+ *
+ * See run_processes_parallel() below for a discussion of the "struct
+ * strbuf *out" parameter.
+ *
+ * pp_cb is the callback cookie as passed into run_processes_parallel,
+ * pp_task_cb is the callback cookie as passed into get_next_task_fn.
+ *
+ * This function is incompatible with "ungroup"
+ */
+typedef void (*duplicate_output_fn)(struct strbuf *process_out,
+ struct strbuf *out,
+ void *pp_cb,
+ void *pp_task_cb);
+
+/**
* This callback is called on every child process that finished processing.
*
* See run_processes_parallel() below for a discussion of the "struct
@@ -462,6 +480,12 @@ struct run_process_parallel_opts
start_failure_fn start_failure;
/**
+ * duplicate_output: See duplicate_output_fn() above. This should be
+ * NULL unless process specific output is needed
+ */
+ duplicate_output_fn duplicate_output;
+
+ /**
* task_finished: See task_finished_fn() above. This can be
* NULL to omit any special handling.
*/
diff --git a/scalar.c b/scalar.c
index 03f9e480dd..6c52243cdf 100644
--- a/scalar.c
+++ b/scalar.c
@@ -596,6 +596,24 @@ static int get_scalar_repos(const char *key, const char *value, void *data)
return 0;
}
+static int remove_deleted_enlistment(struct strbuf *path)
+{
+ int res = 0;
+ strbuf_realpath_forgiving(path, path->buf, 1);
+
+ if (run_git("config", "--global",
+ "--unset", "--fixed-value",
+ "scalar.repo", path->buf, NULL) < 0)
+ res = -1;
+
+ if (run_git("config", "--global",
+ "--unset", "--fixed-value",
+ "maintenance.repo", path->buf, NULL) < 0)
+ res = -1;
+
+ return res;
+}
+
static int cmd_reconfigure(int argc, const char **argv)
{
int all = 0;
@@ -635,8 +653,22 @@ static int cmd_reconfigure(int argc, const char **argv)
strbuf_reset(&gitdir);
if (chdir(dir) < 0) {
- warning_errno(_("could not switch to '%s'"), dir);
- res = -1;
+ struct strbuf buf = STRBUF_INIT;
+
+ if (errno != ENOENT) {
+ warning_errno(_("could not switch to '%s'"), dir);
+ res = -1;
+ continue;
+ }
+
+ strbuf_addstr(&buf, dir);
+ if (remove_deleted_enlistment(&buf))
+ res = error(_("could not remove stale "
+ "scalar.repo '%s'"), dir);
+ else
+ warning(_("removing stale scalar.repo '%s'"),
+ dir);
+ strbuf_release(&buf);
} else if (discover_git_directory(&commondir, &gitdir) < 0) {
warning_errno(_("git repository gone in '%s'"), dir);
res = -1;
@@ -722,24 +754,6 @@ static int cmd_run(int argc, const char **argv)
return 0;
}
-static int remove_deleted_enlistment(struct strbuf *path)
-{
- int res = 0;
- strbuf_realpath_forgiving(path, path->buf, 1);
-
- if (run_git("config", "--global",
- "--unset", "--fixed-value",
- "scalar.repo", path->buf, NULL) < 0)
- res = -1;
-
- if (run_git("config", "--global",
- "--unset", "--fixed-value",
- "maintenance.repo", path->buf, NULL) < 0)
- res = -1;
-
- return res;
-}
-
static int cmd_unregister(int argc, const char **argv)
{
struct option options[] = {
diff --git a/sequencer.c b/sequencer.c
index f0f1af4d47..d4608d6a08 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -375,6 +375,7 @@ int sequencer_remove_state(struct replay_opts *opts)
}
free(opts->gpg_sign);
+ free(opts->reflog_action);
free(opts->default_strategy);
free(opts->strategy);
for (i = 0; i < opts->xopts_nr; i++)
@@ -1050,6 +1051,8 @@ static int run_git_commit(const char *defmsg,
gpg_opt, gpg_opt);
}
+ strvec_pushf(&cmd.env, GIT_REFLOG_ACTION "=%s", opts->reflog_message);
+
if (opts->committer_date_is_author_date)
strvec_pushf(&cmd.env, "GIT_COMMITTER_DATE=%s",
opts->ignore_date ?
@@ -1589,8 +1592,8 @@ static int try_to_commit(struct repository *r,
goto out;
}
- if (update_head_with_reflog(current_head, oid,
- getenv("GIT_REFLOG_ACTION"), msg, &err)) {
+ if (update_head_with_reflog(current_head, oid, opts->reflog_message,
+ msg, &err)) {
res = error("%s", err.buf);
goto out;
}
@@ -2894,6 +2897,7 @@ static void read_strategy_opts(struct replay_opts *opts, struct strbuf *buf)
strbuf_reset(buf);
if (!read_oneliner(buf, rebase_path_strategy(), 0))
return;
+ free(opts->strategy);
opts->strategy = strbuf_detach(buf, NULL);
if (!read_oneliner(buf, rebase_path_strategy_opts(), 0))
return;
@@ -3672,17 +3676,28 @@ static int do_label(struct repository *r, const char *name, int len)
return ret;
}
+static const char *sequencer_reflog_action(struct replay_opts *opts)
+{
+ if (!opts->reflog_action) {
+ opts->reflog_action = getenv(GIT_REFLOG_ACTION);
+ opts->reflog_action =
+ xstrdup(opts->reflog_action ? opts->reflog_action
+ : action_name(opts));
+ }
+
+ return opts->reflog_action;
+}
+
__attribute__((format (printf, 3, 4)))
static const char *reflog_message(struct replay_opts *opts,
const char *sub_action, const char *fmt, ...)
{
va_list ap;
static struct strbuf buf = STRBUF_INIT;
- char *reflog_action = getenv(GIT_REFLOG_ACTION);
va_start(ap, fmt);
strbuf_reset(&buf);
- strbuf_addstr(&buf, reflog_action ? reflog_action : action_name(opts));
+ strbuf_addstr(&buf, sequencer_reflog_action(opts));
if (sub_action)
strbuf_addf(&buf, " (%s)", sub_action);
if (fmt) {
@@ -3694,6 +3709,28 @@ static const char *reflog_message(struct replay_opts *opts,
return buf.buf;
}
+static struct commit *lookup_label(struct repository *r, const char *label,
+ int len, struct strbuf *buf)
+{
+ struct commit *commit;
+ struct object_id oid;
+
+ strbuf_reset(buf);
+ strbuf_addf(buf, "refs/rewritten/%.*s", len, label);
+ if (!read_ref(buf->buf, &oid)) {
+ commit = lookup_commit_object(r, &oid);
+ } else {
+ /* fall back to non-rewritten ref or commit */
+ strbuf_splice(buf, 0, strlen("refs/rewritten/"), "", 0);
+ commit = lookup_commit_reference_by_name(buf->buf);
+ }
+
+ if (!commit)
+ error(_("could not resolve '%s'"), buf->buf);
+
+ return commit;
+}
+
static int do_reset(struct repository *r,
const char *name, int len,
struct replay_opts *opts)
@@ -3725,6 +3762,7 @@ static int do_reset(struct repository *r,
oidcpy(&oid, &opts->squash_onto);
} else {
int i;
+ struct commit *commit;
/* Determine the length of the label */
for (i = 0; i < len; i++)
@@ -3732,12 +3770,12 @@ static int do_reset(struct repository *r,
break;
len = i;
- strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
- if (get_oid(ref_name.buf, &oid) &&
- get_oid(ref_name.buf + strlen("refs/rewritten/"), &oid)) {
- ret = error(_("could not read '%s'"), ref_name.buf);
+ commit = lookup_label(r, name, len, &ref_name);
+ if (!commit) {
+ ret = -1;
goto cleanup;
}
+ oid = commit->object.oid;
}
setup_unpack_trees_porcelain(&unpack_tree_opts, "reset");
@@ -3748,6 +3786,7 @@ static int do_reset(struct repository *r,
unpack_tree_opts.merge = 1;
unpack_tree_opts.update = 1;
unpack_tree_opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */
+ unpack_tree_opts.skip_cache_tree_update = 1;
init_checkout_metadata(&unpack_tree_opts.meta, name, &oid, NULL);
if (repo_read_index_unmerged(r)) {
@@ -3784,26 +3823,6 @@ cleanup:
return ret;
}
-static struct commit *lookup_label(const char *label, int len,
- struct strbuf *buf)
-{
- struct commit *commit;
-
- strbuf_reset(buf);
- strbuf_addf(buf, "refs/rewritten/%.*s", len, label);
- commit = lookup_commit_reference_by_name(buf->buf);
- if (!commit) {
- /* fall back to non-rewritten ref or commit */
- strbuf_splice(buf, 0, strlen("refs/rewritten/"), "", 0);
- commit = lookup_commit_reference_by_name(buf->buf);
- }
-
- if (!commit)
- error(_("could not resolve '%s'"), buf->buf);
-
- return commit;
-}
-
static int do_merge(struct repository *r,
struct commit *commit,
const char *arg, int arg_len,
@@ -3851,7 +3870,7 @@ static int do_merge(struct repository *r,
k = strcspn(p, " \t\n");
if (!k)
continue;
- merge_commit = lookup_label(p, k, &ref_name);
+ merge_commit = lookup_label(r, p, k, &ref_name);
if (!merge_commit) {
ret = error(_("unable to parse '%.*s'"), k, p);
goto leave_merge;
@@ -4128,11 +4147,14 @@ static int write_update_refs_state(struct string_list *refs_to_oids)
struct string_list_item *item;
char *path;
- if (!refs_to_oids->nr)
- return 0;
-
path = rebase_path_update_refs(the_repository->gitdir);
+ if (!refs_to_oids->nr) {
+ if (unlink(path) && errno != ENOENT)
+ result = error_errno(_("could not unlink: %s"), path);
+ goto cleanup;
+ }
+
if (safe_create_leading_directories(path)) {
result = error(_("unable to create leading directories of %s"),
path);
@@ -4495,7 +4517,7 @@ static int checkout_onto(struct repository *r, struct replay_opts *opts,
RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
.head_msg = reflog_message(opts, "start", "checkout %s",
onto_name),
- .default_reflog_action = "rebase"
+ .default_reflog_action = sequencer_reflog_action(opts)
};
if (reset_head(r, &ropts)) {
apply_autostash(rebase_path_autostash());
@@ -4564,11 +4586,8 @@ static int pick_commits(struct repository *r,
struct replay_opts *opts)
{
int res = 0, reschedule = 0;
- char *prev_reflog_action;
- /* Note that 0 for 3rd parameter of setenv means set only if not set */
- setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
- prev_reflog_action = xstrdup(getenv(GIT_REFLOG_ACTION));
+ opts->reflog_message = sequencer_reflog_action(opts);
if (opts->allow_ff)
assert(!(opts->signoff || opts->no_commit ||
opts->record_origin || should_edit(opts) ||
@@ -4616,14 +4635,12 @@ static int pick_commits(struct repository *r,
}
if (item->command <= TODO_SQUASH) {
if (is_rebase_i(opts))
- setenv(GIT_REFLOG_ACTION, reflog_message(opts,
- command_to_string(item->command), NULL),
- 1);
+ opts->reflog_message = reflog_message(opts,
+ command_to_string(item->command), NULL);
+
res = do_pick_commit(r, item, opts,
is_final_fixup(todo_list),
&check_todo);
- if (is_rebase_i(opts))
- setenv(GIT_REFLOG_ACTION, prev_reflog_action, 1);
if (is_rebase_i(opts) && res < 0) {
/* Reschedule */
advise(_(rescheduled_advice),
@@ -5046,8 +5063,6 @@ int sequencer_continue(struct repository *r, struct replay_opts *opts)
if (read_populate_opts(opts))
return -1;
if (is_rebase_i(opts)) {
- char *previous_reflog_action;
-
if ((res = read_populate_todo(r, &todo_list, opts)))
goto release_todo_list;
@@ -5058,13 +5073,11 @@ int sequencer_continue(struct repository *r, struct replay_opts *opts)
unlink(rebase_path_dropped());
}
- previous_reflog_action = xstrdup(getenv(GIT_REFLOG_ACTION));
- setenv(GIT_REFLOG_ACTION, reflog_message(opts, "continue", NULL), 1);
+ opts->reflog_message = reflog_message(opts, "continue", NULL);
if (commit_staged_changes(r, opts, &todo_list)) {
res = -1;
goto release_todo_list;
}
- setenv(GIT_REFLOG_ACTION, previous_reflog_action, 1);
} else if (!file_exists(get_todo_path(opts)))
return continue_single_pick(r, opts);
else if ((res = read_populate_todo(r, &todo_list, opts)))
@@ -5112,7 +5125,7 @@ static int single_pick(struct repository *r,
TODO_PICK : TODO_REVERT;
item.commit = cmit;
- setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
+ opts->reflog_message = sequencer_reflog_action(opts);
return do_pick_commit(r, &item, opts, 0, &check_todo);
}
diff --git a/sequencer.h b/sequencer.h
index 563fe59933..888c18aad7 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -63,6 +63,9 @@ struct replay_opts {
char **xopts;
size_t xopts_nr, xopts_alloc;
+ /* Reflog */
+ char *reflog_action;
+
/* Used by fixup/squash */
struct strbuf current_fixups;
int current_fixup_count;
@@ -73,6 +76,9 @@ struct replay_opts {
/* Only used by REPLAY_NONE */
struct rev_info *revs;
+
+ /* Private use */
+ const char *reflog_message;
};
#define REPLAY_OPTS_INIT { .edit = -1, .action = -1, .current_fixups = STRBUF_INIT }
diff --git a/setup.c b/setup.c
index cefd5f63c4..a4525732fe 100644
--- a/setup.c
+++ b/setup.c
@@ -577,6 +577,18 @@ static enum extension_result handle_extension(const char *var,
"extensions.objectformat", value);
data->hash_algo = format;
return EXTENSION_OK;
+ } else if (!strcmp(ext, "refformat")) {
+ if (!strcmp(value, "files"))
+ data->ref_format |= REF_FORMAT_FILES;
+ else if (!strcmp(value, "packed"))
+ data->ref_format |= REF_FORMAT_PACKED;
+ else if (!strcmp(value, "packed-v2"))
+ data->ref_format |= REF_FORMAT_PACKED_V2;
+ else
+ return error(_("invalid value for '%s': '%s'"),
+ "extensions.refFormat", value);
+ data->ref_format_count++;
+ return EXTENSION_OK;
}
return EXTENSION_UNKNOWN;
}
@@ -718,6 +730,14 @@ int read_repository_format(struct repository_format *format, const char *path)
git_config_from_file(check_repo_format, path, format);
if (format->version == -1)
clear_repository_format(format);
+
+ /* Set default ref_format if no extensions.refFormat exists. */
+ if (!format->ref_format_count) {
+ format->ref_format = REF_FORMAT_FILES | REF_FORMAT_PACKED;
+ if (git_env_ulong("GIT_TEST_PACKED_REFS_VERSION", 0) == 2)
+ format->ref_format |= REF_FORMAT_PACKED_V2;
+ }
+
return format->version;
}
@@ -1420,6 +1440,9 @@ int discover_git_directory(struct strbuf *commondir,
candidate.partial_clone;
candidate.partial_clone = NULL;
+ /* take ownership of candidate.ref_format */
+ the_repository->ref_format = candidate.ref_format;
+
clear_repository_format(&candidate);
return 0;
}
@@ -1556,6 +1579,8 @@ const char *setup_git_directory_gently(int *nongit_ok)
the_repository->repository_format_partial_clone =
repo_fmt.partial_clone;
repo_fmt.partial_clone = NULL;
+
+ the_repository->ref_format = repo_fmt.ref_format;
}
}
/*
@@ -1645,6 +1670,7 @@ void check_repository_format(struct repository_format *fmt)
repo_set_hash_algo(the_repository, fmt->hash_algo);
the_repository->repository_format_partial_clone =
xstrdup_or_null(fmt->partial_clone);
+ the_repository->ref_format = fmt->ref_format;
clear_repository_format(&repo_fmt);
}
diff --git a/sha1dc_git.h b/sha1dc_git.h
index 41e1c3fd3f..60e3ce8439 100644
--- a/sha1dc_git.h
+++ b/sha1dc_git.h
@@ -17,6 +17,7 @@ void git_SHA1DCInit(SHA1_CTX *);
void git_SHA1DCFinal(unsigned char [20], SHA1_CTX *);
void git_SHA1DCUpdate(SHA1_CTX *ctx, const void *data, unsigned long len);
+#define platform_SHA_IS_SHA1DC /* used by "test-tool sha1-is-sha1dc" */
#define platform_SHA_CTX SHA1_CTX
#define platform_SHA1_Init git_SHA1DCInit
#define platform_SHA1_Update git_SHA1DCUpdate
diff --git a/shared.mak b/shared.mak
index 33f43edbf9..be1f30ff20 100644
--- a/shared.mak
+++ b/shared.mak
@@ -60,6 +60,7 @@ ifndef V
QUIET_AR = @echo ' ' AR $@;
QUIET_LINK = @echo ' ' LINK $@;
QUIET_BUILT_IN = @echo ' ' BUILTIN $@;
+ QUIET_CP = @echo ' ' CP $< $@;
QUIET_LNCP = @echo ' ' LN/CP $@;
QUIET_XGETTEXT = @echo ' ' XGETTEXT $@;
QUIET_MSGINIT = @echo ' ' MSGINIT $@;
@@ -69,8 +70,11 @@ ifndef V
QUIET_SP = @echo ' ' SP $<;
QUIET_HDR = @echo ' ' HDR $(<:hcc=h);
QUIET_RC = @echo ' ' RC $@;
- QUIET_SPATCH = @echo ' ' SPATCH $<;
- QUIET_SPATCH_T = @echo ' ' SPATCH TEST $(@:.build/%=%);
+
+## Used in "Makefile": SPATCH
+ QUIET_SPATCH = @echo ' ' SPATCH $< \>$@;
+ QUIET_SPATCH_TEST = @echo ' ' SPATCH TEST $(@:.build/%=%);
+ QUIET_SPATCH_CAT = @echo ' ' SPATCH CAT $(@:%.patch=%.d/)\*\*.patch \>$@;
## Used in "Documentation/Makefile"
QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@;
diff --git a/sparse-index.c b/sparse-index.c
index e4a54ce194..8c269dab80 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -493,24 +493,42 @@ void clear_skip_worktree_from_present_files(struct index_state *istate)
int dir_found = 1;
int i;
+ int path_count[2] = {0, 0};
+ int restarted = 0;
if (!core_apply_sparse_checkout ||
sparse_expect_files_outside_of_patterns)
return;
+ trace2_region_enter("index", "clear_skip_worktree_from_present_files",
+ istate->repo);
restart:
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce = istate->cache[i];
- if (ce_skip_worktree(ce) &&
- path_found(ce->name, &last_dirname, &dir_len, &dir_found)) {
- if (S_ISSPARSEDIR(ce->ce_mode)) {
- ensure_full_index(istate);
- goto restart;
+ if (ce_skip_worktree(ce)) {
+ path_count[restarted]++;
+ if (path_found(ce->name, &last_dirname, &dir_len, &dir_found)) {
+ if (S_ISSPARSEDIR(ce->ce_mode)) {
+ if (restarted)
+ BUG("ensure-full-index did not fully flatten?");
+ ensure_full_index(istate);
+ restarted = 1;
+ goto restart;
+ }
+ ce->ce_flags &= ~CE_SKIP_WORKTREE;
}
- ce->ce_flags &= ~CE_SKIP_WORKTREE;
}
}
+
+ if (path_count[0])
+ trace2_data_intmax("index", istate->repo,
+ "sparse_path_count", path_count[0]);
+ if (restarted)
+ trace2_data_intmax("index", istate->repo,
+ "sparse_path_count_full", path_count[1]);
+ trace2_region_leave("index", "clear_skip_worktree_from_present_files",
+ istate->repo);
}
/*
diff --git a/submodule.c b/submodule.c
index b958162d28..f350932ef1 100644
--- a/submodule.c
+++ b/submodule.c
@@ -503,6 +503,8 @@ static void print_submodule_diff_summary(struct repository *r, struct rev_info *
void prepare_submodule_repo_env(struct strvec *out)
{
+ if (the_repository->settings.submodule_propagate_branches)
+ strvec_pushf(out, "%s=1", GIT_SUBMODULE_PROPAGATE_BRANCHES_ENVIRONMENT);
prepare_other_repo_env(out, DEFAULT_GIT_DIR_ENVIRONMENT);
}
@@ -1130,6 +1132,12 @@ static int push_submodule(const char *path,
if (for_each_remote_ref_submodule(path, has_remote, NULL) > 0) {
struct child_process cp = CHILD_PROCESS_INIT;
strvec_push(&cp.args, "push");
+ /*
+ * When recursing into a submodule, treat any "only" configurations as "on-
+ * demand", since "only" would not work (we need all submodules to be pushed
+ * in order to be able to push the superproject).
+ */
+ strvec_push(&cp.args, "--recurse-submodules=only-is-on-demand");
if (dry_run)
strvec_push(&cp.args, "--dry-run");
@@ -1363,6 +1371,18 @@ int submodule_touches_in_range(struct repository *r,
return ret;
}
+struct submodule_parallel_status {
+ size_t index_count;
+ int result;
+
+ struct string_list *submodule_names;
+ struct repository *r;
+
+ /* Pending statuses by OIDs */
+ struct status_task **oid_status_tasks;
+ int oid_status_tasks_nr, oid_status_tasks_alloc;
+};
+
struct submodule_parallel_fetch {
/*
* The index of the last index entry processed by
@@ -1445,6 +1465,12 @@ struct fetch_task {
struct oid_array *commits; /* Ensure these commits are fetched */
};
+struct status_task {
+ const char *path;
+ unsigned dirty_submodule;
+ int ignore_untracked;
+};
+
/**
* When a submodule is not defined in .gitmodules, we cannot access it
* via the regular submodule-config. Create a fake submodule, which we can
@@ -1864,6 +1890,45 @@ out:
return spf.result;
}
+static int parse_status_porcelain(char *str, size_t len,
+ unsigned *dirty_submodule,
+ int ignore_untracked)
+{
+ /* regular untracked files */
+ if (str[0] == '?')
+ *dirty_submodule |= DIRTY_SUBMODULE_UNTRACKED;
+
+ if (str[0] == 'u' ||
+ str[0] == '1' ||
+ str[0] == '2') {
+ /* T = line type, XY = status, SSSS = submodule state */
+ if (len < strlen("T XY SSSS"))
+ BUG("invalid status --porcelain=2 line %s",
+ str);
+
+ if (str[5] == 'S' && str[8] == 'U')
+ /* nested untracked file */
+ *dirty_submodule |= DIRTY_SUBMODULE_UNTRACKED;
+
+ if (str[0] == 'u' ||
+ str[0] == '2' ||
+ memcmp(str + 5, "S..U", 4))
+ /* other change */
+ *dirty_submodule |= DIRTY_SUBMODULE_MODIFIED;
+ }
+
+ if ((*dirty_submodule & DIRTY_SUBMODULE_MODIFIED) &&
+ ((*dirty_submodule & DIRTY_SUBMODULE_UNTRACKED) ||
+ ignore_untracked)) {
+ /*
+ * We're not interested in any further information from
+ * the child any more, neither output nor its exit code.
+ */
+ return 1;
+ }
+ return 0;
+}
+
unsigned is_submodule_modified(const char *path, int ignore_untracked)
{
struct child_process cp = CHILD_PROCESS_INIT;
@@ -1900,39 +1965,13 @@ unsigned is_submodule_modified(const char *path, int ignore_untracked)
fp = xfdopen(cp.out, "r");
while (strbuf_getwholeline(&buf, fp, '\n') != EOF) {
- /* regular untracked files */
- if (buf.buf[0] == '?')
- dirty_submodule |= DIRTY_SUBMODULE_UNTRACKED;
-
- if (buf.buf[0] == 'u' ||
- buf.buf[0] == '1' ||
- buf.buf[0] == '2') {
- /* T = line type, XY = status, SSSS = submodule state */
- if (buf.len < strlen("T XY SSSS"))
- BUG("invalid status --porcelain=2 line %s",
- buf.buf);
-
- if (buf.buf[5] == 'S' && buf.buf[8] == 'U')
- /* nested untracked file */
- dirty_submodule |= DIRTY_SUBMODULE_UNTRACKED;
-
- if (buf.buf[0] == 'u' ||
- buf.buf[0] == '2' ||
- memcmp(buf.buf + 5, "S..U", 4))
- /* other change */
- dirty_submodule |= DIRTY_SUBMODULE_MODIFIED;
- }
+ char *str = buf.buf;
+ const size_t len = buf.len;
- if ((dirty_submodule & DIRTY_SUBMODULE_MODIFIED) &&
- ((dirty_submodule & DIRTY_SUBMODULE_UNTRACKED) ||
- ignore_untracked)) {
- /*
- * We're not interested in any further information from
- * the child any more, neither output nor its exit code.
- */
- ignore_cp_exit_code = 1;
+ ignore_cp_exit_code = parse_status_porcelain(str, len, &dirty_submodule,
+ ignore_untracked);
+ if (ignore_cp_exit_code)
break;
- }
}
fclose(fp);
@@ -1943,6 +1982,142 @@ unsigned is_submodule_modified(const char *path, int ignore_untracked)
return dirty_submodule;
}
+static struct status_task *
+get_status_task_from_index(struct submodule_parallel_status *sps,
+ struct strbuf *err)
+{
+ for (; sps->index_count < sps->submodule_names->nr; sps->index_count++) {
+ struct submodule_status_util *util = sps->submodule_names->items[sps->index_count].util;
+ const struct cache_entry *ce = util->ce;
+ struct status_task *task;
+ struct status_task tmp = {
+ .path = ce->name,
+ .dirty_submodule = util->dirty_submodule,
+ .ignore_untracked = util->ignore_untracked,
+ };
+ struct strbuf buf = STRBUF_INIT;
+ const char *git_dir;
+
+ strbuf_addf(&buf, "%s/.git", ce->name);
+ git_dir = read_gitfile(buf.buf);
+ if (!git_dir)
+ git_dir = buf.buf;
+ if (!is_git_directory(git_dir)) {
+ if (is_directory(git_dir))
+ die(_("'%s' not recognized as a git repository"), git_dir);
+ strbuf_release(&buf);
+ /* The submodule is not checked out, so it is not modified */
+ util->dirty_submodule = 0;
+ continue;
+ }
+ strbuf_release(&buf);
+
+ task = xmalloc(sizeof(*task));
+ memcpy(task, &tmp, sizeof(*task));
+ sps->index_count++;
+ return task;
+ }
+ return NULL;
+}
+
+
+static int get_next_submodule_status(struct child_process *cp,
+ struct strbuf *err, void *data,
+ void **task_cb)
+{
+ struct submodule_parallel_status *sps = data;
+ struct status_task *task = get_status_task_from_index(sps, err);
+
+ if (!task)
+ return 0;
+
+ child_process_init(cp);
+ prepare_submodule_repo_env_in_gitdir(&cp->env);
+
+ strvec_init(&cp->args);
+ strvec_pushl(&cp->args, "status", "--porcelain=2", NULL);
+ if (task->ignore_untracked)
+ strvec_push(&cp->args, "-uno");
+
+ prepare_submodule_repo_env(&cp->env);
+ cp->git_cmd = 1;
+ cp->dir = task->path;
+ *task_cb = task;
+ return 1;
+}
+
+static int status_start_failure(struct strbuf *err,
+ void *cb, void *task_cb)
+{
+ struct submodule_parallel_status *sps = cb;
+
+ sps->result = 1;
+ return 0;
+}
+
+static void status_duplicate_output(struct strbuf *process_out,
+ struct strbuf *out,
+ void *cb, void *task_cb)
+{
+ struct status_task *task = task_cb;
+ struct string_list list = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
+
+ string_list_split(&list, process_out->buf, '\n', -1);
+
+ for_each_string_list_item(item, &list) {
+ if (parse_status_porcelain(item->string,
+ strlen(item->string),
+ &task->dirty_submodule,
+ task->ignore_untracked))
+ break;
+ }
+ string_list_clear(&list, 0);
+ strbuf_reset(out);
+}
+
+static int status_finish(int retvalue, struct strbuf *err,
+ void *cb, void *task_cb)
+{
+ struct submodule_parallel_status *sps = cb;
+ struct status_task *task = task_cb;
+ struct string_list_item *it =
+ string_list_lookup(sps->submodule_names, task->path);
+ struct submodule_status_util *util = it->util;
+
+ util->dirty_submodule = task->dirty_submodule;
+ free(task);
+
+ return 0;
+}
+
+int get_submodules_status(struct repository *r,
+ struct string_list *submodules,
+ int max_parallel_jobs)
+{
+ struct submodule_parallel_status sps = {
+ .r = r,
+ .submodule_names = submodules,
+ };
+ const struct run_process_parallel_opts opts = {
+ .tr2_category = "submodule",
+ .tr2_label = "parallel/status",
+
+ .processes = max_parallel_jobs,
+
+ .get_next_task = get_next_submodule_status,
+ .start_failure = status_start_failure,
+ .duplicate_output = status_duplicate_output,
+ .task_finished = status_finish,
+ .data = &sps,
+ };
+
+ string_list_sort(sps.submodule_names);
+ run_processes_parallel(&opts);
+
+ return sps.result;
+}
+
int submodule_uses_gitfile(const char *path)
{
struct child_process cp = CHILD_PROCESS_INIT;
@@ -2139,8 +2314,7 @@ int submodule_move_head(const char *path,
if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) {
if (old_head) {
if (!submodule_uses_gitfile(path))
- absorb_git_dir_into_superproject(path,
- ABSORB_GITDIR_RECURSE_SUBMODULES);
+ absorb_git_dir_into_superproject(path);
} else {
struct strbuf gitdir = STRBUF_INIT;
submodule_name_to_gitdir(&gitdir, the_repository,
@@ -2274,6 +2448,7 @@ static void relocate_single_git_dir_into_superproject(const char *path)
char *old_git_dir = NULL, *real_old_git_dir = NULL, *real_new_git_dir = NULL;
struct strbuf new_gitdir = STRBUF_INIT;
const struct submodule *sub;
+ size_t off = 0;
if (submodule_uses_worktrees(path))
die(_("relocate_gitdir for submodule '%s' with "
@@ -2298,9 +2473,12 @@ static void relocate_single_git_dir_into_superproject(const char *path)
die(_("could not create directory '%s'"), new_gitdir.buf);
real_new_git_dir = real_pathdup(new_gitdir.buf, 1);
- fprintf(stderr, _("Migrating git directory of '%s%s' from\n'%s' to\n'%s'\n"),
+ while (real_old_git_dir[off] && real_new_git_dir[off] &&
+ real_old_git_dir[off] == real_new_git_dir[off])
+ off++;
+ fprintf(stderr, _("Migrating git directory of '%s%s' from '%s' to '%s'\n"),
get_super_prefix_or_empty(), path,
- real_old_git_dir, real_new_git_dir);
+ real_old_git_dir + off, real_new_git_dir + off);
relocate_gitdir(path, real_old_git_dir, real_new_git_dir);
@@ -2310,13 +2488,29 @@ static void relocate_single_git_dir_into_superproject(const char *path)
strbuf_release(&new_gitdir);
}
+static void absorb_git_dir_into_superproject_recurse(const char *path)
+{
+
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.dir = path;
+ cp.git_cmd = 1;
+ cp.no_stdin = 1;
+ strvec_pushf(&cp.args, "--super-prefix=%s%s/",
+ get_super_prefix_or_empty(), path);
+ strvec_pushl(&cp.args, "submodule--helper",
+ "absorbgitdirs", NULL);
+ prepare_submodule_repo_env(&cp.env);
+ if (run_command(&cp))
+ die(_("could not recurse into submodule '%s'"), path);
+}
+
/*
* Migrate the git directory of the submodule given by path from
* having its git directory within the working tree to the git dir nested
* in its superprojects git dir under modules/.
*/
-void absorb_git_dir_into_superproject(const char *path,
- unsigned flags)
+void absorb_git_dir_into_superproject(const char *path)
{
int err_code;
const char *sub_git_dir;
@@ -2365,23 +2559,7 @@ void absorb_git_dir_into_superproject(const char *path,
}
strbuf_release(&gitdir);
- if (flags & ABSORB_GITDIR_RECURSE_SUBMODULES) {
- struct child_process cp = CHILD_PROCESS_INIT;
-
- if (flags & ~ABSORB_GITDIR_RECURSE_SUBMODULES)
- BUG("we don't know how to pass the flags down?");
-
- cp.dir = path;
- cp.git_cmd = 1;
- cp.no_stdin = 1;
- strvec_pushf(&cp.args, "--super-prefix=%s%s/",
- get_super_prefix_or_empty(), path);
- strvec_pushl(&cp.args, "submodule--helper",
- "absorbgitdirs", NULL);
- prepare_submodule_repo_env(&cp.env);
- if (run_command(&cp))
- die(_("could not recurse into submodule '%s'"), path);
- }
+ absorb_git_dir_into_superproject_recurse(path);
}
int get_superproject_working_tree(struct strbuf *buf)
diff --git a/submodule.h b/submodule.h
index 6a9fec6de1..bdcc597d0d 100644
--- a/submodule.h
+++ b/submodule.h
@@ -41,6 +41,12 @@ struct submodule_update_strategy {
.type = SM_UPDATE_UNSPECIFIED, \
}
+struct submodule_status_util {
+ int changed, ignore_untracked;
+ unsigned dirty_submodule, newmode;
+ struct cache_entry *ce;
+};
+
int is_gitmodules_unmerged(struct index_state *istate);
int is_writing_gitmodules_ok(void);
int is_staging_gitmodules_ok(struct index_state *istate);
@@ -94,6 +100,9 @@ int fetch_submodules(struct repository *r,
int command_line_option,
int default_option,
int quiet, int max_parallel_jobs);
+int get_submodules_status(struct repository *r,
+ struct string_list *submodules,
+ int max_parallel_jobs);
unsigned is_submodule_modified(const char *path, int ignore_untracked);
int submodule_uses_gitfile(const char *path);
@@ -164,9 +173,7 @@ void submodule_unset_core_worktree(const struct submodule *sub);
*/
void prepare_submodule_repo_env(struct strvec *env);
-#define ABSORB_GITDIR_RECURSE_SUBMODULES (1<<0)
-void absorb_git_dir_into_superproject(const char *path,
- unsigned flags);
+void absorb_git_dir_into_superproject(const char *path);
/*
* Return the absolute path of the working tree of the superproject, which this
diff --git a/t/Makefile b/t/Makefile
index 882782a519..2c2b252240 100644
--- a/t/Makefile
+++ b/t/Makefile
@@ -94,7 +94,7 @@ check-chainlint:
done \
} >'$(CHAINLINTTMP_SQ)'/expect && \
$(CHAINLINT) --emit-all '$(CHAINLINTTMP_SQ)'/tests | \
- grep -v '^[ ]*$$' >'$(CHAINLINTTMP_SQ)'/actual && \
+ sed -e 's/^[1-9][0-9]* //;/^[ ]*$$/d' >'$(CHAINLINTTMP_SQ)'/actual && \
if test -f ../GIT-BUILD-OPTIONS; then \
. ../GIT-BUILD-OPTIONS; \
fi && \
diff --git a/t/README b/t/README
index 979b2d4833..fc0daef2e4 100644
--- a/t/README
+++ b/t/README
@@ -231,6 +231,9 @@ override the location of the dashed-form subcommands (what
GIT_EXEC_PATH would be used for during normal operation).
GIT_TEST_EXEC_PATH defaults to `$GIT_TEST_INSTALLED/git --exec-path`.
+Similar to GIT_TEST_INSTALLED, GIT_TEST_BUILD_DIR can be pointed to
+another git.git checkout's build directory, to test its built binaries
+against the tests in this checkout.
Skipping Tests
--------------
diff --git a/t/chainlint.pl b/t/chainlint.pl
index 976db4b8a0..4e47e808d0 100755
--- a/t/chainlint.pl
+++ b/t/chainlint.pl
@@ -67,6 +67,7 @@ sub new {
bless {
parser => $parser,
buff => $s,
+ lineno => 1,
heretags => []
} => $class;
}
@@ -75,7 +76,9 @@ sub scan_heredoc_tag {
my $self = shift @_;
${$self->{buff}} =~ /\G(-?)/gc;
my $indented = $1;
- my $tag = $self->scan_token();
+ my $token = $self->scan_token();
+ return "<<$indented" unless $token;
+ my $tag = $token->[0];
$tag =~ s/['"\\]//g;
push(@{$self->{heretags}}, $indented ? "\t$tag" : "$tag");
return "<<$indented$tag";
@@ -95,7 +98,9 @@ sub scan_op {
sub scan_sqstring {
my $self = shift @_;
${$self->{buff}} =~ /\G([^']*'|.*\z)/sgc;
- return "'" . $1;
+ my $s = $1;
+ $self->{lineno} += () = $s =~ /\n/sg;
+ return "'" . $s;
}
sub scan_dqstring {
@@ -113,7 +118,7 @@ sub scan_dqstring {
if ($c eq '\\') {
$s .= '\\', last unless $$b =~ /\G(.)/sgc;
$c = $1;
- next if $c eq "\n"; # line splice
+ $self->{lineno}++, next if $c eq "\n"; # line splice
# backslash escapes only $, `, ", \ in dq-string
$s .= '\\' unless $c =~ /^[\$`"\\]$/;
$s .= $c;
@@ -121,6 +126,7 @@ sub scan_dqstring {
}
die("internal error scanning dq-string '$c'\n");
}
+ $self->{lineno} += () = $s =~ /\n/sg;
return $s;
}
@@ -135,6 +141,7 @@ sub scan_balanced {
$depth--;
last if $depth == 0;
}
+ $self->{lineno} += () = $s =~ /\n/sg;
return $s;
}
@@ -149,7 +156,7 @@ sub scan_dollar {
my $self = shift @_;
my $b = $self->{buff};
return $self->scan_balanced('(', ')') if $$b =~ /\G\((?=\()/gc; # $((...))
- return '(' . join(' ', $self->scan_subst()) . ')' if $$b =~ /\G\(/gc; # $(...)
+ return '(' . join(' ', map {$_->[0]} $self->scan_subst()) . ')' if $$b =~ /\G\(/gc; # $(...)
return $self->scan_balanced('{', '}') if $$b =~ /\G\{/gc; # ${...}
return $1 if $$b =~ /\G(\w+)/gc; # $var
return $1 if $$b =~ /\G([@*#?$!0-9-])/gc; # $*, $1, $$, etc.
@@ -161,8 +168,11 @@ sub swallow_heredocs {
my $b = $self->{buff};
my $tags = $self->{heretags};
while (my $tag = shift @$tags) {
+ my $start = pos($$b);
my $indent = $tag =~ s/^\t// ? '\\s*' : '';
$$b =~ /(?:\G|\n)$indent\Q$tag\E(?:\n|\z)/gc;
+ my $body = substr($$b, $start, pos($$b) - $start);
+ $self->{lineno} += () = $body =~ /\n/sg;
}
}
@@ -170,34 +180,37 @@ sub scan_token {
my $self = shift @_;
my $b = $self->{buff};
my $token = '';
+ my ($start, $startln);
RESTART:
+ $startln = $self->{lineno};
$$b =~ /\G[ \t]+/gc; # skip whitespace (but not newline)
- return "\n" if $$b =~ /\G#[^\n]*(?:\n|\z)/gc; # comment
+ $start = pos($$b) || 0;
+ $self->{lineno}++, return ["\n", $start, pos($$b), $startln, $startln] if $$b =~ /\G#[^\n]*(?:\n|\z)/gc; # comment
while (1) {
# slurp up non-special characters
$token .= $1 if $$b =~ /\G([^\\;&|<>(){}'"\$\s]+)/gc;
# handle special characters
last unless $$b =~ /\G(.)/sgc;
my $c = $1;
- last if $c =~ /^[ \t]$/; # whitespace ends token
+ pos($$b)--, last if $c =~ /^[ \t]$/; # whitespace ends token
pos($$b)--, last if length($token) && $c =~ /^[;&|<>(){}\n]$/;
$token .= $self->scan_sqstring(), next if $c eq "'";
$token .= $self->scan_dqstring(), next if $c eq '"';
$token .= $c . $self->scan_dollar(), next if $c eq '$';
- $self->swallow_heredocs(), $token = $c, last if $c eq "\n";
+ $self->{lineno}++, $self->swallow_heredocs(), $token = $c, last if $c eq "\n";
$token = $self->scan_op($c), last if $c =~ /^[;&|<>]$/;
$token = $c, last if $c =~ /^[(){}]$/;
if ($c eq '\\') {
$token .= '\\', last unless $$b =~ /\G(.)/sgc;
$c = $1;
- next if $c eq "\n" && length($token); # line splice
- goto RESTART if $c eq "\n"; # line splice
+ $self->{lineno}++, next if $c eq "\n" && length($token); # line splice
+ $self->{lineno}++, goto RESTART if $c eq "\n"; # line splice
$token .= '\\' . $c;
next;
}
die("internal error scanning character '$c'\n");
}
- return length($token) ? $token : undef;
+ return length($token) ? [$token, $start, pos($$b), $startln, $self->{lineno}] : undef;
}
# ShellParser parses POSIX shell scripts (with minor extensions for Bash). It
@@ -239,14 +252,14 @@ sub stop_at {
my ($self, $token) = @_;
return 1 unless defined($token);
my $stop = ${$self->{stop}}[-1] if @{$self->{stop}};
- return defined($stop) && $token =~ $stop;
+ return defined($stop) && $token->[0] =~ $stop;
}
sub expect {
my ($self, $expect) = @_;
my $token = $self->next_token();
- return $token if defined($token) && $token eq $expect;
- push(@{$self->{output}}, "?!ERR?! expected '$expect' but found '" . (defined($token) ? $token : "<end-of-input>") . "'\n");
+ return $token if defined($token) && $token->[0] eq $expect;
+ push(@{$self->{output}}, "?!ERR?! expected '$expect' but found '" . (defined($token) ? $token->[0] : "<end-of-input>") . "'\n");
$self->untoken($token) if defined($token);
return ();
}
@@ -255,7 +268,7 @@ sub optional_newlines {
my $self = shift @_;
my @tokens;
while (my $token = $self->peek()) {
- last unless $token eq "\n";
+ last unless $token->[0] eq "\n";
push(@tokens, $self->next_token());
}
return @tokens;
@@ -278,7 +291,7 @@ sub parse_case_pattern {
my @tokens;
while (defined(my $token = $self->next_token())) {
push(@tokens, $token);
- last if $token eq ')';
+ last if $token->[0] eq ')';
}
return @tokens;
}
@@ -293,13 +306,13 @@ sub parse_case {
$self->optional_newlines());
while (1) {
my $token = $self->peek();
- last unless defined($token) && $token ne 'esac';
+ last unless defined($token) && $token->[0] ne 'esac';
push(@tokens,
$self->parse_case_pattern(),
$self->optional_newlines(),
$self->parse(qr/^(?:;;|esac)$/)); # item body
$token = $self->peek();
- last unless defined($token) && $token ne 'esac';
+ last unless defined($token) && $token->[0] ne 'esac';
push(@tokens,
$self->expect(';;'),
$self->optional_newlines());
@@ -315,7 +328,7 @@ sub parse_for {
$self->next_token(), # variable
$self->optional_newlines());
my $token = $self->peek();
- if (defined($token) && $token eq 'in') {
+ if (defined($token) && $token->[0] eq 'in') {
push(@tokens,
$self->expect('in'),
$self->optional_newlines());
@@ -339,11 +352,11 @@ sub parse_if {
$self->optional_newlines(),
$self->parse(qr/^(?:elif|else|fi)$/)); # if/elif body
my $token = $self->peek();
- last unless defined($token) && $token eq 'elif';
+ last unless defined($token) && $token->[0] eq 'elif';
push(@tokens, $self->expect('elif'));
}
my $token = $self->peek();
- if (defined($token) && $token eq 'else') {
+ if (defined($token) && $token->[0] eq 'else') {
push(@tokens,
$self->expect('else'),
$self->optional_newlines(),
@@ -380,7 +393,7 @@ sub parse_bash_array_assignment {
my @tokens = $self->expect('(');
while (defined(my $token = $self->next_token())) {
push(@tokens, $token);
- last if $token eq ')';
+ last if $token->[0] eq ')';
}
return @tokens;
}
@@ -398,29 +411,31 @@ sub parse_cmd {
my $self = shift @_;
my $cmd = $self->next_token();
return () unless defined($cmd);
- return $cmd if $cmd eq "\n";
+ return $cmd if $cmd->[0] eq "\n";
my $token;
my @tokens = $cmd;
- if ($cmd eq '!') {
+ if ($cmd->[0] eq '!') {
push(@tokens, $self->parse_cmd());
return @tokens;
- } elsif (my $f = $compound{$cmd}) {
+ } elsif (my $f = $compound{$cmd->[0]}) {
push(@tokens, $self->$f());
- } elsif (defined($token = $self->peek()) && $token eq '(') {
- if ($cmd !~ /\w=$/) {
+ } elsif (defined($token = $self->peek()) && $token->[0] eq '(') {
+ if ($cmd->[0] !~ /\w=$/) {
push(@tokens, $self->parse_func());
return @tokens;
}
- $tokens[-1] .= join(' ', $self->parse_bash_array_assignment());
+ my @array = $self->parse_bash_array_assignment();
+ $tokens[-1]->[0] .= join(' ', map {$_->[0]} @array);
+ $tokens[-1]->[2] = $array[$#array][2] if @array;
}
while (defined(my $token = $self->next_token())) {
$self->untoken($token), last if $self->stop_at($token);
push(@tokens, $token);
- last if $token =~ /^(?:[;&\n|]|&&|\|\|)$/;
+ last if $token->[0] =~ /^(?:[;&\n|]|&&|\|\|)$/;
}
- push(@tokens, $self->next_token()) if $tokens[-1] ne "\n" && defined($token = $self->peek()) && $token eq "\n";
+ push(@tokens, $self->next_token()) if $tokens[-1]->[0] ne "\n" && defined($token = $self->peek()) && $token->[0] eq "\n";
return @tokens;
}
@@ -453,11 +468,18 @@ package TestParser;
use base 'ShellParser';
+sub new {
+ my $class = shift @_;
+ my $self = $class->SUPER::new(@_);
+ $self->{problems} = [];
+ return $self;
+}
+
sub find_non_nl {
my $tokens = shift @_;
my $n = shift @_;
$n = $#$tokens if !defined($n);
- $n-- while $n >= 0 && $$tokens[$n] eq "\n";
+ $n-- while $n >= 0 && $$tokens[$n]->[0] eq "\n";
return $n;
}
@@ -467,7 +489,7 @@ sub ends_with {
for my $needle (reverse(@$needles)) {
return undef if $n < 0;
$n = find_non_nl($tokens, $n), next if $needle eq "\n";
- return undef if $$tokens[$n] !~ $needle;
+ return undef if $$tokens[$n]->[0] !~ $needle;
$n--;
}
return 1;
@@ -486,13 +508,13 @@ sub parse_loop_body {
my $self = shift @_;
my @tokens = $self->SUPER::parse_loop_body(@_);
# did loop signal failure via "|| return" or "|| exit"?
- return @tokens if !@tokens || grep(/^(?:return|exit|\$\?)$/, @tokens);
+ return @tokens if !@tokens || grep {$_->[0] =~ /^(?:return|exit|\$\?)$/} @tokens;
# did loop upstream of a pipe signal failure via "|| echo 'impossible
# text'" as the final command in the loop body?
return @tokens if ends_with(\@tokens, [qr/^\|\|$/, "\n", qr/^echo$/, qr/^.+$/]);
# flag missing "return/exit" handling explicit failure in loop body
my $n = find_non_nl(\@tokens);
- splice(@tokens, $n + 1, 0, '?!LOOP?!');
+ push(@{$self->{problems}}, ['LOOP', $tokens[$n]]);
return @tokens;
}
@@ -505,8 +527,13 @@ my @safe_endings = (
sub accumulate {
my ($self, $tokens, $cmd) = @_;
+ my $problems = $self->{problems};
+
+ # no previous command to check for missing "&&"
goto DONE unless @$tokens;
- goto DONE if @$cmd == 1 && $$cmd[0] eq "\n";
+
+ # new command is empty line; can't yet check if previous is missing "&&"
+ goto DONE if @$cmd == 1 && $$cmd[0]->[0] eq "\n";
# did previous command end with "&&", "|", "|| return" or similar?
goto DONE if match_ending($tokens, \@safe_endings);
@@ -514,20 +541,20 @@ sub accumulate {
# if this command handles "$?" specially, then okay for previous
# command to be missing "&&"
for my $token (@$cmd) {
- goto DONE if $token =~ /\$\?/;
+ goto DONE if $token->[0] =~ /\$\?/;
}
# if this command is "false", "return 1", or "exit 1" (which signal
# failure explicitly), then okay for all preceding commands to be
# missing "&&"
- if ($$cmd[0] =~ /^(?:false|return|exit)$/) {
- @$tokens = grep(!/^\?!AMP\?!$/, @$tokens);
+ if ($$cmd[0]->[0] =~ /^(?:false|return|exit)$/) {
+ @$problems = grep {$_->[0] ne 'AMP'} @$problems;
goto DONE;
}
# flag missing "&&" at end of previous command
my $n = find_non_nl($tokens);
- splice(@$tokens, $n + 1, 0, '?!AMP?!') unless $n < 0;
+ push(@$problems, ['AMP', $tokens->[$n]]) unless $n < 0;
DONE:
$self->SUPER::accumulate($tokens, $cmd);
@@ -553,7 +580,7 @@ sub new {
# composition of multiple strings and non-string character runs; for instance,
# `"test body"` unwraps to `test body`; `word"a b"42'c d'` to `worda b42c d`
sub unwrap {
- my $token = @_ ? shift @_ : $_;
+ my $token = (@_ ? shift @_ : $_)->[0];
# simple case: 'sqstring' or "dqstring"
return $token if $token =~ s/^'([^']*)'$/$1/;
return $token if $token =~ s/^"([^"]*)"$/$1/;
@@ -584,13 +611,25 @@ sub check_test {
$self->{ntests}++;
my $parser = TestParser->new(\$body);
my @tokens = $parser->parse();
- return unless $emit_all || grep(/\?![^?]+\?!/, @tokens);
+ my $problems = $parser->{problems};
+ return unless $emit_all || @$problems;
my $c = main::fd_colors(1);
- my $checked = join(' ', @tokens);
- $checked =~ s/^\n//;
- $checked =~ s/^ //mg;
- $checked =~ s/ $//mg;
+ my $lineno = $_[1]->[3];
+ my $start = 0;
+ my $checked = '';
+ for (sort {$a->[1]->[2] <=> $b->[1]->[2]} @$problems) {
+ my ($label, $token) = @$_;
+ my $pos = $token->[2];
+ $checked .= substr($body, $start, $pos - $start) . " ?!$label?! ";
+ $start = $pos;
+ }
+ $checked .= substr($body, $start);
+ $checked =~ s/^/$lineno++ . ' '/mge;
+ $checked =~ s/^\d+ \n//;
+ $checked =~ s/(\s) \?!/$1?!/mg;
+ $checked =~ s/\?! (\s)/?!$1/mg;
$checked =~ s/(\?![^?]+\?!)/$c->{rev}$c->{red}$1$c->{reset}/mg;
+ $checked =~ s/^\d+/$c->{dim}$&$c->{reset}/mg;
$checked .= "\n" unless $checked =~ /\n$/;
push(@{$self->{output}}, "$c->{blue}# chainlint: $title$c->{reset}\n$checked");
}
@@ -598,9 +637,9 @@ sub check_test {
sub parse_cmd {
my $self = shift @_;
my @tokens = $self->SUPER::parse_cmd();
- return @tokens unless @tokens && $tokens[0] =~ /^test_expect_(?:success|failure)$/;
+ return @tokens unless @tokens && $tokens[0]->[0] =~ /^test_expect_(?:success|failure)$/;
my $n = $#tokens;
- $n-- while $n >= 0 && $tokens[$n] =~ /^(?:[;&\n|]|&&|\|\|)$/;
+ $n-- while $n >= 0 && $tokens[$n]->[0] =~ /^(?:[;&\n|]|&&|\|\|)$/;
$self->check_test($tokens[1], $tokens[2]) if $n == 2; # title body
$self->check_test($tokens[2], $tokens[3]) if $n > 2; # prereq title body
return @tokens;
@@ -622,25 +661,39 @@ if (eval {require Time::HiRes; Time::HiRes->import(); 1;}) {
# thread and ignore %ENV changes in subthreads.
$ENV{TERM} = $ENV{USER_TERM} if $ENV{USER_TERM};
-my @NOCOLORS = (bold => '', rev => '', reset => '', blue => '', green => '', red => '');
+my @NOCOLORS = (bold => '', rev => '', dim => '', reset => '', blue => '', green => '', red => '');
my %COLORS = ();
sub get_colors {
return \%COLORS if %COLORS;
- if (exists($ENV{NO_COLOR}) ||
- system("tput sgr0 >/dev/null 2>&1") != 0 ||
- system("tput bold >/dev/null 2>&1") != 0 ||
- system("tput rev >/dev/null 2>&1") != 0 ||
- system("tput setaf 1 >/dev/null 2>&1") != 0) {
+ if (exists($ENV{NO_COLOR})) {
%COLORS = @NOCOLORS;
return \%COLORS;
}
- %COLORS = (bold => `tput bold`,
- rev => `tput rev`,
- reset => `tput sgr0`,
- blue => `tput setaf 4`,
- green => `tput setaf 2`,
- red => `tput setaf 1`);
- chomp(%COLORS);
+ if ($ENV{TERM} =~ /xterm|xterm-\d+color|xterm-new|xterm-direct|nsterm|nsterm-\d+color|nsterm-direct/) {
+ %COLORS = (bold => "\e[1m",
+ rev => "\e[7m",
+ dim => "\e[2m",
+ reset => "\e[0m",
+ blue => "\e[34m",
+ green => "\e[32m",
+ red => "\e[31m");
+ return \%COLORS;
+ }
+ if (system("tput sgr0 >/dev/null 2>&1") == 0 &&
+ system("tput bold >/dev/null 2>&1") == 0 &&
+ system("tput rev >/dev/null 2>&1") == 0 &&
+ system("tput dim >/dev/null 2>&1") == 0 &&
+ system("tput setaf 1 >/dev/null 2>&1") == 0) {
+ %COLORS = (bold => `tput bold`,
+ rev => `tput rev`,
+ dim => `tput dim`,
+ reset => `tput sgr0`,
+ blue => `tput setaf 4`,
+ green => `tput setaf 2`,
+ red => `tput setaf 1`);
+ return \%COLORS;
+ }
+ %COLORS = @NOCOLORS;
return \%COLORS;
}
diff --git a/t/chainlint/block-comment.expect b/t/chainlint/block-comment.expect
index d10b2eeaf2..df2beea888 100644
--- a/t/chainlint/block-comment.expect
+++ b/t/chainlint/block-comment.expect
@@ -1,6 +1,8 @@
(
{
+ # show a
echo a &&
+ # show b
echo b
}
)
diff --git a/t/chainlint/case-comment.expect b/t/chainlint/case-comment.expect
index 1e4b054bda..641c157b98 100644
--- a/t/chainlint/case-comment.expect
+++ b/t/chainlint/case-comment.expect
@@ -1,7 +1,10 @@
(
case "$x" in
+ # found foo
x) foo ;;
+ # found other
*)
+ # treat it as bar
bar
;;
esac
diff --git a/t/chainlint/close-subshell.expect b/t/chainlint/close-subshell.expect
index 0f87db9ae6..2192a2870a 100644
--- a/t/chainlint/close-subshell.expect
+++ b/t/chainlint/close-subshell.expect
@@ -15,7 +15,8 @@
) | wuzzle &&
(
bop
-) | fazz fozz &&
+) | fazz \
+ fozz &&
(
bup
) |
diff --git a/t/chainlint/comment.expect b/t/chainlint/comment.expect
index f76fde1ffb..a68f1f9d7c 100644
--- a/t/chainlint/comment.expect
+++ b/t/chainlint/comment.expect
@@ -1,4 +1,8 @@
(
+ # comment 1
nothing &&
+ # comment 2
something
+ # comment 3
+ # comment 4
)
diff --git a/t/chainlint/double-here-doc.expect b/t/chainlint/double-here-doc.expect
index 75477bb1ad..cd584a4357 100644
--- a/t/chainlint/double-here-doc.expect
+++ b/t/chainlint/double-here-doc.expect
@@ -1,2 +1,12 @@
-run_sub_test_lib_test_err run-inv-range-start "--run invalid range start" --run="a-5" <<-EOF &&
-check_sub_test_lib_test_err run-inv-range-start <<-EOF_OUT 3 <<-EOF_ERR
+run_sub_test_lib_test_err run-inv-range-start \
+ "--run invalid range start" \
+ --run="a-5" <<-\EOF &&
+test_expect_success "passing test #1" "true"
+test_done
+EOF
+check_sub_test_lib_test_err run-inv-range-start \
+ <<-\EOF_OUT 3<<-EOF_ERR
+> FATAL: Unexpected exit with code 1
+EOF_OUT
+> error: --run: invalid non-numeric in range start: ${SQ}a-5${SQ}
+EOF_ERR
diff --git a/t/chainlint/empty-here-doc.expect b/t/chainlint/empty-here-doc.expect
index f42f2d41ba..e8733c97c6 100644
--- a/t/chainlint/empty-here-doc.expect
+++ b/t/chainlint/empty-here-doc.expect
@@ -1,3 +1,4 @@
git ls-tree $tree path > current &&
-cat > expected <<EOF &&
+cat > expected <<\EOF &&
+EOF
test_output
diff --git a/t/chainlint/for-loop.expect b/t/chainlint/for-loop.expect
index a5810c9bdd..d65c82129a 100644
--- a/t/chainlint/for-loop.expect
+++ b/t/chainlint/for-loop.expect
@@ -2,7 +2,9 @@
for i in a b c
do
echo $i ?!AMP?!
- cat <<-EOF ?!LOOP?!
+ cat <<-\EOF ?!LOOP?!
+ bar
+ EOF
done ?!AMP?!
for i in a b c; do
echo $i &&
diff --git a/t/chainlint/here-doc-close-subshell.expect b/t/chainlint/here-doc-close-subshell.expect
index 2af9ced71c..7d9c2b5607 100644
--- a/t/chainlint/here-doc-close-subshell.expect
+++ b/t/chainlint/here-doc-close-subshell.expect
@@ -1,2 +1,4 @@
(
- cat <<-INPUT)
+ cat <<-\INPUT)
+ fizz
+ INPUT
diff --git a/t/chainlint/here-doc-indent-operator.expect b/t/chainlint/here-doc-indent-operator.expect
index fb6cf7285d..f92a7ce999 100644
--- a/t/chainlint/here-doc-indent-operator.expect
+++ b/t/chainlint/here-doc-indent-operator.expect
@@ -1,5 +1,11 @@
-cat > expect <<-EOF &&
+cat >expect <<- EOF &&
+header: 43475048 1 $(test_oid oid_version) $NUM_CHUNKS 0
+num_commits: $1
+chunks: oid_fanout oid_lookup commit_metadata generation_data bloom_indexes bloom_data
+EOF
-cat > expect <<-EOF ?!AMP?!
+cat >expect << -EOF ?!AMP?!
+this is not indented
+-EOF
cleanup
diff --git a/t/chainlint/here-doc-multi-line-command-subst.expect b/t/chainlint/here-doc-multi-line-command-subst.expect
index f8b3aa73c4..b7364c82c8 100644
--- a/t/chainlint/here-doc-multi-line-command-subst.expect
+++ b/t/chainlint/here-doc-multi-line-command-subst.expect
@@ -1,5 +1,8 @@
(
- x=$(bobble <<-END &&
+ x=$(bobble <<-\END &&
+ fossil
+ vegetable
+ END
wiffle) ?!AMP?!
echo $x
)
diff --git a/t/chainlint/here-doc-multi-line-string.expect b/t/chainlint/here-doc-multi-line-string.expect
index be64b26869..6c13bdcbfb 100644
--- a/t/chainlint/here-doc-multi-line-string.expect
+++ b/t/chainlint/here-doc-multi-line-string.expect
@@ -1,5 +1,7 @@
(
- cat <<-TXT && echo "multi-line
+ cat <<-\TXT && echo "multi-line
string" ?!AMP?!
+ fizzle
+ TXT
bap
)
diff --git a/t/chainlint/here-doc.expect b/t/chainlint/here-doc.expect
index 110059ba58..1df3f78282 100644
--- a/t/chainlint/here-doc.expect
+++ b/t/chainlint/here-doc.expect
@@ -1,7 +1,25 @@
-boodle wobba gorgo snoot wafta snurb <<EOF &&
+boodle wobba \
+ gorgo snoot \
+ wafta snurb <<EOF &&
+quoth the raven,
+nevermore...
+EOF
cat <<-Arbitrary_Tag_42 >foo &&
+snoz
+boz
+woz
+Arbitrary_Tag_42
-cat <<zump >boo &&
+cat <<"zump" >boo &&
+snoz
+boz
+woz
+zump
-horticulture <<EOF
+horticulture <<\EOF
+gomez
+morticia
+wednesday
+pugsly
+EOF
diff --git a/t/chainlint/if-then-else.expect b/t/chainlint/if-then-else.expect
index 44d86c3597..cbaaf857d4 100644
--- a/t/chainlint/if-then-else.expect
+++ b/t/chainlint/if-then-else.expect
@@ -8,7 +8,9 @@
echo foo
else
echo foo &&
- cat <<-EOF
+ cat <<-\EOF
+ bar
+ EOF
fi ?!AMP?!
echo poodle
) &&
diff --git a/t/chainlint/incomplete-line.expect b/t/chainlint/incomplete-line.expect
index ffac8f9018..134d3a14f5 100644
--- a/t/chainlint/incomplete-line.expect
+++ b/t/chainlint/incomplete-line.expect
@@ -1,4 +1,10 @@
-line 1 line 2 line 3 line 4 &&
+line 1 \
+line 2 \
+line 3 \
+line 4 &&
(
- line 5 line 6 line 7 line 8
+ line 5 \
+ line 6 \
+ line 7 \
+ line 8
)
diff --git a/t/chainlint/inline-comment.expect b/t/chainlint/inline-comment.expect
index dd0dace077..6bad218530 100644
--- a/t/chainlint/inline-comment.expect
+++ b/t/chainlint/inline-comment.expect
@@ -1,6 +1,6 @@
(
- foobar &&
- barfoo ?!AMP?!
+ foobar && # comment 1
+ barfoo ?!AMP?! # wrong position for &&
flibble "not a # comment"
) &&
diff --git a/t/chainlint/loop-detect-status.expect b/t/chainlint/loop-detect-status.expect
index 0ad23bb35e..24da9e86d5 100644
--- a/t/chainlint/loop-detect-status.expect
+++ b/t/chainlint/loop-detect-status.expect
@@ -2,7 +2,7 @@
do
printf "Generating blob $i/$blobcount\r" >& 2 &&
printf "blob\nmark :$i\ndata $blobsize\n" &&
-
+ #test-tool genrandom $i $blobsize &&
printf "%-${blobsize}s" $i &&
echo "M 100644 :$i $i" >> commit &&
i=$(($i+1)) ||
diff --git a/t/chainlint/nested-here-doc.expect b/t/chainlint/nested-here-doc.expect
index e3bef63f75..29b3832a98 100644
--- a/t/chainlint/nested-here-doc.expect
+++ b/t/chainlint/nested-here-doc.expect
@@ -1,7 +1,30 @@
cat <<ARBITRARY >foop &&
+naddle
+fub <<EOF
+ nozzle
+ noodle
+EOF
+formp
+ARBITRARY
(
- cat <<-INPUT_END &&
- cat <<-EOT ?!AMP?!
+ cat <<-\INPUT_END &&
+ fish are mice
+ but geese go slow
+ data <<EOF
+ perl is lerp
+ and nothing else
+ EOF
+ toink
+ INPUT_END
+
+ cat <<-\EOT ?!AMP?!
+ text goes here
+ data <<EOF
+ data goes here
+ EOF
+ more test here
+ EOT
+
foobar
)
diff --git a/t/chainlint/nested-subshell-comment.expect b/t/chainlint/nested-subshell-comment.expect
index be4b27a305..9138cf386d 100644
--- a/t/chainlint/nested-subshell-comment.expect
+++ b/t/chainlint/nested-subshell-comment.expect
@@ -2,6 +2,8 @@
foo &&
(
bar &&
+ # bottles wobble while fiddles gobble
+ # minor numbers of cows (or do they?)
baz &&
snaff
) ?!AMP?!
diff --git a/t/chainlint/subshell-here-doc.expect b/t/chainlint/subshell-here-doc.expect
index 029d129299..52789278d1 100644
--- a/t/chainlint/subshell-here-doc.expect
+++ b/t/chainlint/subshell-here-doc.expect
@@ -1,10 +1,30 @@
(
- echo wobba gorgo snoot wafta snurb <<-EOF &&
+ echo wobba \
+ gorgo snoot \
+ wafta snurb <<-EOF &&
+ quoth the raven,
+ nevermore...
+ EOF
+
cat <<EOF >bip ?!AMP?!
- echo <<-EOF >bop
+ fish fly high
+EOF
+
+ echo <<-\EOF >bop
+ gomez
+ morticia
+ wednesday
+ pugsly
+ EOF
) &&
(
- cat <<-ARBITRARY >bup &&
- cat <<-ARBITRARY3 >bup3 &&
+ cat <<-\ARBITRARY >bup &&
+ glink
+ FIZZ
+ ARBITRARY
+ cat <<-"ARBITRARY3" >bup3 &&
+ glink
+ FIZZ
+ ARBITRARY3
meep
)
diff --git a/t/chainlint/t7900-subtree.expect b/t/chainlint/t7900-subtree.expect
index 69167da2f2..71b3b3bc20 100644
--- a/t/chainlint/t7900-subtree.expect
+++ b/t/chainlint/t7900-subtree.expect
@@ -4,12 +4,16 @@ sub2
sub3
sub4" &&
chks_sub=$(cat <<TXT | sed "s,^,sub dir/,"
+$chks
+TXT
) &&
chkms="main-sub1
main-sub2
main-sub3
main-sub4" &&
chkms_sub=$(cat <<TXT | sed "s,^,sub dir/,"
+$chkms
+TXT
) &&
subfiles=$(git ls-files) &&
check_equal "$subfiles" "$chkms
diff --git a/t/chainlint/while-loop.expect b/t/chainlint/while-loop.expect
index f272aa21fe..1f5eaea0fd 100644
--- a/t/chainlint/while-loop.expect
+++ b/t/chainlint/while-loop.expect
@@ -2,7 +2,9 @@
while true
do
echo foo ?!AMP?!
- cat <<-EOF ?!LOOP?!
+ cat <<-\EOF ?!LOOP?!
+ bar
+ EOF
done ?!AMP?!
while true; do
echo foo &&
diff --git a/t/helper/test-cache-tree.c b/t/helper/test-cache-tree.c
new file mode 100644
index 0000000000..93051b25f5
--- /dev/null
+++ b/t/helper/test-cache-tree.c
@@ -0,0 +1,64 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "tree.h"
+#include "cache-tree.h"
+#include "parse-options.h"
+
+static char const * const test_cache_tree_usage[] = {
+ N_("test-tool cache-tree <options> (control|prime|update)"),
+ NULL
+};
+
+int cmd__cache_tree(int argc, const char **argv)
+{
+ struct object_id oid;
+ struct tree *tree;
+ int empty = 0;
+ int invalidate_qty = 0;
+ int i;
+
+ struct option options[] = {
+ OPT_BOOL(0, "empty", &empty,
+ N_("clear the cache tree before each iteration")),
+ OPT_INTEGER_F(0, "invalidate", &invalidate_qty,
+ N_("number of entries in the cache tree to invalidate (default 0)"),
+ PARSE_OPT_NONEG),
+ OPT_END()
+ };
+
+ setup_git_directory();
+
+ argc = parse_options(argc, argv, NULL, options, test_cache_tree_usage, 0);
+
+ if (read_cache() < 0)
+ die(_("unable to read index file"));
+
+ oidcpy(&oid, &the_index.cache_tree->oid);
+ tree = parse_tree_indirect(&oid);
+ if (!tree)
+ die(_("not a tree object: %s"), oid_to_hex(&oid));
+
+ if (empty) {
+ /* clear the cache tree & allocate a new one */
+ cache_tree_free(&the_index.cache_tree);
+ the_index.cache_tree = cache_tree();
+ } else if (invalidate_qty) {
+ /* invalidate the specified number of unique paths */
+ float f_interval = (float)the_index.cache_nr / invalidate_qty;
+ int interval = f_interval < 1.0 ? 1 : (int)f_interval;
+ for (i = 0; i < invalidate_qty && i * interval < the_index.cache_nr; i++)
+ cache_tree_invalidate_path(&the_index, the_index.cache[i * interval]->name);
+ }
+
+ if (argc != 1)
+ usage_with_options(test_cache_tree_usage, options);
+ else if (!strcmp(argv[0], "prime"))
+ prime_cache_tree(the_repository, &the_index, tree);
+ else if (!strcmp(argv[0], "update"))
+ cache_tree_update(&the_index, WRITE_TREE_SILENT | WRITE_TREE_REPAIR);
+ /* use "control" subcommand to specify no-op */
+ else if (!!strcmp(argv[0], "control"))
+ die(_("Unhandled subcommand '%s'"), argv[0]);
+
+ return 0;
+}
diff --git a/t/helper/test-fake-ssh.c b/t/helper/test-fake-ssh.c
index 2e576bcc11..27323cb367 100644
--- a/t/helper/test-fake-ssh.c
+++ b/t/helper/test-fake-ssh.c
@@ -17,6 +17,7 @@ int cmd_main(int argc, const char **argv)
f = fopen(buf.buf, "w");
if (!f)
die("Could not write to %s", buf.buf);
+ strbuf_release(&buf);
for (i = 0; i < argc; i++)
fprintf(f, "%s%s", i > 0 ? " " : "", i > 0 ? argv[i] : "ssh:");
fprintf(f, "\n");
diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c
index 3ecb830f4a..40dd329e02 100644
--- a/t/helper/test-run-command.c
+++ b/t/helper/test-run-command.c
@@ -52,6 +52,21 @@ static int no_job(struct child_process *cp,
return 0;
}
+static void duplicate_output(struct strbuf *process_out,
+ struct strbuf *out,
+ void *pp_cb,
+ void *pp_task_cb)
+{
+ struct string_list list = STRING_LIST_INIT_DUP;
+
+ string_list_split(&list, process_out->buf, '\n', -1);
+ for (size_t i = 0; i < list.nr; i++) {
+ if (strlen(list.items[i].string) > 0)
+ fprintf(stderr, "duplicate_output: %s\n", list.items[i].string);
+ }
+ string_list_clear(&list, 0);
+}
+
static int task_finished(int result,
struct strbuf *err,
void *pp_cb,
@@ -439,6 +454,12 @@ int cmd__run_command(int argc, const char **argv)
opts.ungroup = 1;
}
+ if (!strcmp(argv[1], "--duplicate-output")) {
+ argv += 1;
+ argc -= 1;
+ opts.duplicate_output = duplicate_output;
+ }
+
jobs = atoi(argv[2]);
strvec_clear(&proc.args);
strvec_pushv(&proc.args, (const char **)argv + 3);
diff --git a/t/helper/test-sha1.c b/t/helper/test-sha1.c
index d860c387c3..71fe5c6145 100644
--- a/t/helper/test-sha1.c
+++ b/t/helper/test-sha1.c
@@ -5,3 +5,11 @@ int cmd__sha1(int ac, const char **av)
{
return cmd_hash_impl(ac, av, GIT_HASH_SHA1);
}
+
+int cmd__sha1_is_sha1dc(int argc UNUSED, const char **argv UNUSED)
+{
+#ifdef platform_SHA_IS_SHA1DC
+ return 0;
+#endif
+ return 1;
+}
diff --git a/t/helper/test-submodule.c b/t/helper/test-submodule.c
index b7d117cd55..e060cc6226 100644
--- a/t/helper/test-submodule.c
+++ b/t/helper/test-submodule.c
@@ -111,10 +111,94 @@ static int cmd__submodule_resolve_relative_url(int argc, const char **argv)
return 0;
}
+static int cmd__submodule_config_list(int argc, const char **argv)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *const usage[] = {
+ "test-tool submodule config-list <key>",
+ NULL
+ };
+ argc = parse_options(argc, argv, "test-tools", options, usage,
+ PARSE_OPT_KEEP_ARGV0);
+
+ setup_git_directory();
+
+ if (argc == 2)
+ return print_config_from_gitmodules(the_repository, argv[1]);
+ usage_with_options(usage, options);
+}
+
+static int cmd__submodule_config_set(int argc, const char **argv)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *const usage[] = {
+ "test-tool submodule config-set <key> <value>",
+ NULL
+ };
+ argc = parse_options(argc, argv, "test-tools", options, usage,
+ PARSE_OPT_KEEP_ARGV0);
+
+ setup_git_directory();
+
+ /* Equivalent to ACTION_SET in builtin/config.c */
+ if (argc == 3) {
+ if (!is_writing_gitmodules_ok())
+ die("please make sure that the .gitmodules file is in the working tree");
+
+ return config_set_in_gitmodules_file_gently(argv[1], argv[2]);
+ }
+ usage_with_options(usage, options);
+}
+
+static int cmd__submodule_config_unset(int argc, const char **argv)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *const usage[] = {
+ "test-tool submodule config-unset <key>",
+ NULL
+ };
+
+ setup_git_directory();
+
+ if (argc == 2) {
+ if (!is_writing_gitmodules_ok())
+ die("please make sure that the .gitmodules file is in the working tree");
+ return config_set_in_gitmodules_file_gently(argv[1], NULL);
+ }
+ usage_with_options(usage, options);
+}
+
+static int cmd__submodule_config_writeable(int argc, const char **argv)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *const usage[] = {
+ "test-tool submodule config-writeable",
+ NULL
+ };
+ setup_git_directory();
+
+ if (argc == 1)
+ return is_writing_gitmodules_ok() ? 0 : -1;
+
+ usage_with_options(usage, options);
+}
+
static struct test_cmd cmds[] = {
{ "check-name", cmd__submodule_check_name },
{ "is-active", cmd__submodule_is_active },
{ "resolve-relative-url", cmd__submodule_resolve_relative_url},
+ { "config-list", cmd__submodule_config_list },
+ { "config-set", cmd__submodule_config_set },
+ { "config-unset", cmd__submodule_config_unset },
+ { "config-writeable", cmd__submodule_config_writeable },
};
int cmd__submodule(int argc, const char **argv)
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
index 01cda9358d..7eb1a26a30 100644
--- a/t/helper/test-tool.c
+++ b/t/helper/test-tool.c
@@ -14,6 +14,7 @@ static struct test_cmd cmds[] = {
{ "bitmap", cmd__bitmap },
{ "bloom", cmd__bloom },
{ "bundle-uri", cmd__bundle_uri },
+ { "cache-tree", cmd__cache_tree },
{ "chmtime", cmd__chmtime },
{ "config", cmd__config },
{ "crontab", cmd__crontab },
@@ -73,6 +74,7 @@ static struct test_cmd cmds[] = {
{ "scrap-cache-tree", cmd__scrap_cache_tree },
{ "serve-v2", cmd__serve_v2 },
{ "sha1", cmd__sha1 },
+ { "sha1-is-sha1dc", cmd__sha1_is_sha1dc },
{ "sha256", cmd__sha256 },
{ "sigchain", cmd__sigchain },
{ "simple-ipc", cmd__simple_ipc },
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
index ca2948066f..da7cd6351a 100644
--- a/t/helper/test-tool.h
+++ b/t/helper/test-tool.h
@@ -8,6 +8,7 @@ int cmd__advise_if_enabled(int argc, const char **argv);
int cmd__bitmap(int argc, const char **argv);
int cmd__bloom(int argc, const char **argv);
int cmd__bundle_uri(int argc, const char **argv);
+int cmd__cache_tree(int argc, const char **argv);
int cmd__chmtime(int argc, const char **argv);
int cmd__config(int argc, const char **argv);
int cmd__crontab(int argc, const char **argv);
@@ -66,6 +67,7 @@ int cmd__run_command(int argc, const char **argv);
int cmd__scrap_cache_tree(int argc, const char **argv);
int cmd__serve_v2(int argc, const char **argv);
int cmd__sha1(int argc, const char **argv);
+int cmd__sha1_is_sha1dc(int argc, const char **argv);
int cmd__oid_array(int argc, const char **argv);
int cmd__sha256(int argc, const char **argv);
int cmd__sigchain(int argc, const char **argv);
diff --git a/t/lib-gettext.sh b/t/lib-gettext.sh
index cc6bb2cdea..dcd6e9c3f7 100644
--- a/t/lib-gettext.sh
+++ b/t/lib-gettext.sh
@@ -7,7 +7,7 @@
. ./test-lib.sh
GIT_TEXTDOMAINDIR="$GIT_BUILD_DIR/po/build/locale"
-GIT_PO_PATH="$GIT_BUILD_DIR/po"
+GIT_PO_PATH="$GIT_SOURCE_DIR/po"
export GIT_TEXTDOMAINDIR GIT_PO_PATH
if test -n "$GIT_TEST_INSTALLED"
diff --git a/t/lib-gitweb.sh b/t/lib-gitweb.sh
index 1f32ca66ea..6f68df247a 100644
--- a/t/lib-gitweb.sh
+++ b/t/lib-gitweb.sh
@@ -49,7 +49,7 @@ EOF
error "Cannot find gitweb at $GITWEB_TEST_INSTALLED."
say "# Testing $SCRIPT_NAME"
else # normal case, use source version of gitweb
- SCRIPT_NAME="$GIT_BUILD_DIR/gitweb/gitweb.perl"
+ SCRIPT_NAME="$GIT_SOURCE_DIR/gitweb/gitweb.perl"
fi
export SCRIPT_NAME
}
diff --git a/t/lib-httpd.sh b/t/lib-httpd.sh
index 1f6b9b08d1..ba9fe36772 100644
--- a/t/lib-httpd.sh
+++ b/t/lib-httpd.sh
@@ -174,6 +174,11 @@ prepare_httpd() {
fi
}
+enable_http2 () {
+ HTTPD_PARA="$HTTPD_PARA -DHTTP2"
+ test_set_prereq HTTP2
+}
+
start_httpd() {
prepare_httpd >&3 2>&4
diff --git a/t/lib-httpd/apache.conf b/t/lib-httpd/apache.conf
index 706799391b..0294739a77 100644
--- a/t/lib-httpd/apache.conf
+++ b/t/lib-httpd/apache.conf
@@ -29,6 +29,11 @@ ErrorLog error.log
LoadModule setenvif_module modules/mod_setenvif.so
</IfModule>
+<IfDefine HTTP2>
+LoadModule http2_module modules/mod_http2.so
+Protocols h2c
+</IfDefine>
+
<IfVersion < 2.4>
LockFile accept.lock
</IfVersion>
@@ -64,12 +69,20 @@ LockFile accept.lock
<IfModule !mod_access_compat.c>
LoadModule access_compat_module modules/mod_access_compat.so
</IfModule>
-<IfModule !mod_mpm_prefork.c>
- LoadModule mpm_prefork_module modules/mod_mpm_prefork.so
-</IfModule>
<IfModule !mod_unixd.c>
LoadModule unixd_module modules/mod_unixd.so
</IfModule>
+
+<IfDefine HTTP2>
+<IfModule !mod_mpm_event.c>
+ LoadModule mpm_event_module modules/mod_mpm_event.so
+</IfModule>
+</IfDefine>
+<IfDefine !HTTP2>
+<IfModule !mod_mpm_prefork.c>
+ LoadModule mpm_prefork_module modules/mod_mpm_prefork.so
+</IfModule>
+</IfDefine>
</IfVersion>
PassEnv GIT_VALGRIND
diff --git a/t/perf/p0006-read-tree-checkout.sh b/t/perf/p0006-read-tree-checkout.sh
index c481c012d2..325566e18e 100755
--- a/t/perf/p0006-read-tree-checkout.sh
+++ b/t/perf/p0006-read-tree-checkout.sh
@@ -49,6 +49,14 @@ test_perf "read-tree br_base br_ballast ($nr_files)" '
git read-tree -n -m br_base br_ballast
'
+test_perf "read-tree br_ballast_plus_1 ($nr_files)" '
+ # Run read-tree 100 times for clearer performance results & comparisons
+ for i in $(test_seq 100)
+ do
+ git read-tree -n -m br_ballast_plus_1 || return 1
+ done
+'
+
test_perf "switch between br_base br_ballast ($nr_files)" '
git checkout -q br_base &&
git checkout -q br_ballast
diff --git a/t/perf/p0090-cache-tree.sh b/t/perf/p0090-cache-tree.sh
new file mode 100755
index 0000000000..a8eabca2c4
--- /dev/null
+++ b/t/perf/p0090-cache-tree.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+
+test_description="Tests performance of cache tree update operations"
+
+. ./perf-lib.sh
+
+test_perf_large_repo
+test_checkout_worktree
+
+count=100
+
+test_expect_success 'setup cache tree' '
+ git write-tree
+'
+
+test_cache_tree () {
+ test_perf "$1, $3" "
+ for i in \$(test_seq $count)
+ do
+ test-tool cache-tree $4 $2
+ done
+ "
+}
+
+test_cache_tree_update_functions () {
+ test_cache_tree 'no-op' 'control' "$1" "$2"
+ test_cache_tree 'prime_cache_tree' 'prime' "$1" "$2"
+ test_cache_tree 'cache_tree_update' 'update' "$1" "$2"
+}
+
+test_cache_tree_update_functions "clean" ""
+test_cache_tree_update_functions "invalidate 2" "--invalidate 2"
+test_cache_tree_update_functions "invalidate 50" "--invalidate 50"
+test_cache_tree_update_functions "empty" "--empty"
+
+test_done
diff --git a/t/perf/p1401-ref-operations.sh b/t/perf/p1401-ref-operations.sh
new file mode 100755
index 0000000000..0b88a2f531
--- /dev/null
+++ b/t/perf/p1401-ref-operations.sh
@@ -0,0 +1,52 @@
+#!/bin/sh
+
+test_description="Tests performance of ref operations"
+
+. ./perf-lib.sh
+
+test_perf_large_repo
+
+test_perf 'git pack-refs (v1)' '
+ git commit --allow-empty -m "change one ref" &&
+ git pack-refs --all
+'
+
+test_perf 'git for-each-ref (v1)' '
+ git for-each-ref --format="%(refname)" >/dev/null
+'
+
+test_perf 'git for-each-ref prefix (v1)' '
+ git for-each-ref --format="%(refname)" refs/tags/ >/dev/null
+'
+
+test_expect_success 'configure packed-refs v2' '
+ git config core.repositoryFormatVersion 1 &&
+ git config --add extensions.refFormat files &&
+ git config --add extensions.refFormat packed &&
+ git config --add extensions.refFormat packed-v2 &&
+ git config refs.packedRefsVersion 2 &&
+ git commit --allow-empty -m "change one ref" &&
+ git pack-refs --all &&
+ test_copy_bytes 16 .git/packed-refs | xxd >actual &&
+ grep PREF actual
+'
+
+test_perf 'git pack-refs (v2)' '
+ git commit --allow-empty -m "change one ref" &&
+ git pack-refs --all
+'
+
+test_perf 'git pack-refs (v2;hashing)' '
+ git commit --allow-empty -m "change one ref" &&
+ git -c refs.hashPackedRefs=true pack-refs --all
+'
+
+test_perf 'git for-each-ref (v2)' '
+ git for-each-ref --format="%(refname)" >/dev/null
+'
+
+test_perf 'git for-each-ref prefix (v2)' '
+ git for-each-ref --format="%(refname)" refs/tags/ >/dev/null
+'
+
+test_done
diff --git a/t/perf/p7102-reset.sh b/t/perf/p7102-reset.sh
new file mode 100755
index 0000000000..9b039e8691
--- /dev/null
+++ b/t/perf/p7102-reset.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+test_description='performance of reset'
+. ./perf-lib.sh
+
+test_perf_default_repo
+test_checkout_worktree
+
+test_perf 'reset --hard with change in tree' '
+ base=$(git rev-parse HEAD) &&
+ test_commit --no-tag A &&
+ new=$(git rev-parse HEAD) &&
+
+ for i in $(test_seq 10)
+ do
+ git reset --hard $new &&
+ git reset --hard $base || return $?
+ done
+'
+
+test_done
diff --git a/t/t0013-sha1dc.sh b/t/t0013-sha1dc.sh
index 9ad76080aa..5324047689 100755
--- a/t/t0013-sha1dc.sh
+++ b/t/t0013-sha1dc.sh
@@ -6,9 +6,11 @@ TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
TEST_DATA="$TEST_DIRECTORY/t0013"
-if test -z "$DC_SHA1"
+test_lazy_prereq SHA1_IS_SHA1DC 'test-tool sha1-is-sha1dc'
+
+if ! test_have_prereq SHA1_IS_SHA1DC
then
- skip_all='skipping sha1 collision tests, DC_SHA1 not set'
+ skip_all='skipping sha1 collision tests, not using sha1collisiondetection'
test_done
fi
diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh
index abecd75e4e..46abbeed68 100755
--- a/t/t0021-conversion.sh
+++ b/t/t0021-conversion.sh
@@ -8,8 +8,8 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-terminal.sh
-TEST_ROOT="$PWD"
-PATH=$TEST_ROOT:$PATH
+PATH=$PWD:$PATH
+TEST_ROOT="$(pwd)"
write_script <<\EOF "$TEST_ROOT/rot13.sh"
tr \
diff --git a/t/t0040-parse-options.sh b/t/t0040-parse-options.sh
index 5cc62306e3..7d7ecfd571 100755
--- a/t/t0040-parse-options.sh
+++ b/t/t0040-parse-options.sh
@@ -709,4 +709,16 @@ test_expect_success 'subcommands are incompatible with KEEP_DASHDASH unless in c
grep ^BUG err
'
+test_expect_success 'negative magnitude' '
+ test_must_fail test-tool parse-options --magnitude -1 >out 2>err &&
+ grep "non-negative integer" err &&
+ test_must_be_empty out
+'
+
+test_expect_success 'magnitude with units but no numbers' '
+ test_must_fail test-tool parse-options --magnitude m >out 2>err &&
+ grep "non-negative integer" err &&
+ test_must_be_empty out
+'
+
test_done
diff --git a/t/t0061-run-command.sh b/t/t0061-run-command.sh
index 7b5423eebd..879e536638 100755
--- a/t/t0061-run-command.sh
+++ b/t/t0061-run-command.sh
@@ -130,10 +130,20 @@ World
EOF
test_expect_success 'run_command runs in parallel with more jobs available than tasks' '
- test-tool run-command run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test-tool run-command run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>actual &&
+ test_must_be_empty out &&
test_cmp expect actual
'
+test_expect_success 'run_command runs in parallel with more jobs available than tasks --duplicate-output' '
+ test-tool run-command --duplicate-output run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
+ test_must_be_empty out &&
+ test 4 = $(grep -c "duplicate_output: Hello" err) &&
+ test 4 = $(grep -c "duplicate_output: World" err) &&
+ sed "/duplicate_output/d" err > err1 &&
+ test_cmp expect err1
+'
+
test_expect_success 'run_command runs ungrouped in parallel with more jobs available than tasks' '
test-tool run-command --ungroup run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
test_line_count = 8 out &&
@@ -141,10 +151,20 @@ test_expect_success 'run_command runs ungrouped in parallel with more jobs avail
'
test_expect_success 'run_command runs in parallel with as many jobs as tasks' '
- test-tool run-command run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test-tool run-command run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>actual &&
+ test_must_be_empty out &&
test_cmp expect actual
'
+test_expect_success 'run_command runs in parallel with as many jobs as tasks --duplicate-output' '
+ test-tool run-command --duplicate-output run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
+ test_must_be_empty out &&
+ test 4 = $(grep -c "duplicate_output: Hello" err) &&
+ test 4 = $(grep -c "duplicate_output: World" err) &&
+ sed "/duplicate_output/d" err > err1 &&
+ test_cmp expect err1
+'
+
test_expect_success 'run_command runs ungrouped in parallel with as many jobs as tasks' '
test-tool run-command --ungroup run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
test_line_count = 8 out &&
@@ -152,10 +172,20 @@ test_expect_success 'run_command runs ungrouped in parallel with as many jobs as
'
test_expect_success 'run_command runs in parallel with more tasks than jobs available' '
- test-tool run-command run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test-tool run-command run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>actual &&
+ test_must_be_empty out &&
test_cmp expect actual
'
+test_expect_success 'run_command runs in parallel with more tasks than jobs available --duplicate-output' '
+ test-tool run-command --duplicate-output run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
+ test_must_be_empty out &&
+ test 4 = $(grep -c "duplicate_output: Hello" err) &&
+ test 4 = $(grep -c "duplicate_output: World" err) &&
+ sed "/duplicate_output/d" err > err1 &&
+ test_cmp expect err1
+'
+
test_expect_success 'run_command runs ungrouped in parallel with more tasks than jobs available' '
test-tool run-command --ungroup run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
test_line_count = 8 out &&
@@ -172,10 +202,17 @@ asking for a quick stop
EOF
test_expect_success 'run_command is asked to abort gracefully' '
- test-tool run-command run-command-abort 3 false 2>actual &&
+ test-tool run-command run-command-abort 3 false >out 2>actual &&
+ test_must_be_empty out &&
test_cmp expect actual
'
+test_expect_success 'run_command is asked to abort gracefully --duplicate-output' '
+ test-tool run-command --duplicate-output run-command-abort 3 false >out 2>err &&
+ test_must_be_empty out &&
+ test_cmp expect err
+'
+
test_expect_success 'run_command is asked to abort gracefully (ungroup)' '
test-tool run-command --ungroup run-command-abort 3 false >out 2>err &&
test_must_be_empty out &&
@@ -187,10 +224,17 @@ no further jobs available
EOF
test_expect_success 'run_command outputs ' '
- test-tool run-command run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test-tool run-command run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>actual &&
+ test_must_be_empty out &&
test_cmp expect actual
'
+test_expect_success 'run_command outputs --duplicate-output' '
+ test-tool run-command --duplicate-output run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
+ test_must_be_empty out &&
+ test_cmp expect err
+'
+
test_expect_success 'run_command outputs (ungroup) ' '
test-tool run-command --ungroup run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" >out 2>err &&
test_must_be_empty out &&
diff --git a/t/t0068-for-each-repo.sh b/t/t0068-for-each-repo.sh
index 4675e85251..3648d439a8 100755
--- a/t/t0068-for-each-repo.sh
+++ b/t/t0068-for-each-repo.sh
@@ -2,15 +2,18 @@
test_description='git for-each-repo builtin'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'run based on configured value' '
git init one &&
git init two &&
git init three &&
+ git init ~/four &&
git -C two commit --allow-empty -m "DID NOT RUN" &&
git config run.key "$TRASH_DIRECTORY/one" &&
git config --add run.key "$TRASH_DIRECTORY/three" &&
+ git config --add run.key "~/four" &&
git for-each-repo --config=run.key commit --allow-empty -m "ran" &&
git -C one log -1 --pretty=format:%s >message &&
grep ran message &&
@@ -18,12 +21,16 @@ test_expect_success 'run based on configured value' '
! grep ran message &&
git -C three log -1 --pretty=format:%s >message &&
grep ran message &&
+ git -C ~/four log -1 --pretty=format:%s >message &&
+ grep ran message &&
git for-each-repo --config=run.key -- commit --allow-empty -m "ran again" &&
git -C one log -1 --pretty=format:%s >message &&
grep again message &&
git -C two log -1 --pretty=format:%s >message &&
! grep again message &&
git -C three log -1 --pretty=format:%s >message &&
+ grep again message &&
+ git -C ~/four log -1 --pretty=format:%s >message &&
grep again message
'
diff --git a/t/t0070-fundamental.sh b/t/t0070-fundamental.sh
index 8d59905ef0..574de34198 100755
--- a/t/t0070-fundamental.sh
+++ b/t/t0070-fundamental.sh
@@ -6,6 +6,7 @@ test_description='check that the most basic functions work
Verify wrappers and compatibility functions.
'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'character classes (isspace, isalpha etc.)' '
diff --git a/t/t1011-read-tree-sparse-checkout.sh b/t/t1011-read-tree-sparse-checkout.sh
index 742f0fa909..595b24c0ad 100755
--- a/t/t1011-read-tree-sparse-checkout.sh
+++ b/t/t1011-read-tree-sparse-checkout.sh
@@ -12,6 +12,7 @@ test_description='sparse checkout tests
'
TEST_CREATE_REPO_NO_TEMPLATE=1
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-read-tree.sh
diff --git a/t/t1022-read-tree-partial-clone.sh b/t/t1022-read-tree-partial-clone.sh
index a9953b6a71..cca4380e43 100755
--- a/t/t1022-read-tree-partial-clone.sh
+++ b/t/t1022-read-tree-partial-clone.sh
@@ -3,7 +3,7 @@
test_description='git read-tree in partial clones'
TEST_NO_CREATE_REPO=1
-
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'read-tree in partial clone prefetches in one batch' '
@@ -19,7 +19,7 @@ test_expect_success 'read-tree in partial clone prefetches in one batch' '
git -C server config uploadpack.allowfilter 1 &&
git -C server config uploadpack.allowanysha1inwant 1 &&
git clone --bare --filter=blob:none "file://$(pwd)/server" client &&
- GIT_TRACE_PACKET="$(pwd)/trace" git -C client read-tree $TREE &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client read-tree $TREE $TREE &&
# "done" marks the end of negotiation (once per fetch). Expect that
# only one fetch occurs.
diff --git a/t/t1050-large.sh b/t/t1050-large.sh
index 4f3aa17c99..c71932b024 100755
--- a/t/t1050-large.sh
+++ b/t/t1050-large.sh
@@ -5,6 +5,12 @@ test_description='adding and checking out large blobs'
. ./test-lib.sh
+test_expect_success 'core.bigFileThreshold must be non-negative' '
+ test_must_fail git -c core.bigFileThreshold=-1 rev-parse >out 2>err &&
+ grep "bad numeric config value" err &&
+ test_must_be_empty out
+'
+
test_expect_success setup '
# clone does not allow us to pass core.bigfilethreshold to
# new repos, so set core.bigfilethreshold globally
diff --git a/t/t1300-config.sh b/t/t1300-config.sh
index c6661e61af..2575279ab8 100755
--- a/t/t1300-config.sh
+++ b/t/t1300-config.sh
@@ -2228,6 +2228,12 @@ test_expect_success '--type rejects unknown specifiers' '
test_i18ngrep "unrecognized --type argument" error
'
+test_expect_success '--type=int requires at least one digit' '
+ test_must_fail git config --type int --default m some.key >out 2>error &&
+ grep "bad numeric config value" error &&
+ test_must_be_empty out
+'
+
test_expect_success '--replace-all does not invent newlines' '
q_to_tab >.git/config <<-\EOF &&
[abc]key
diff --git a/t/t1404-update-ref-errors.sh b/t/t1404-update-ref-errors.sh
index 13c2b43bba..b5606d93b5 100755
--- a/t/t1404-update-ref-errors.sh
+++ b/t/t1404-update-ref-errors.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='Test git update-ref error handling'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Create some references, perhaps run pack-refs --all, then try to
diff --git a/t/t1409-avoid-packing-refs.sh b/t/t1409-avoid-packing-refs.sh
index be12fb6350..74dedd57e9 100755
--- a/t/t1409-avoid-packing-refs.sh
+++ b/t/t1409-avoid-packing-refs.sh
@@ -2,19 +2,36 @@
test_description='avoid rewriting packed-refs unnecessarily'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Add an identifying mark to the packed-refs file header line. This
# shouldn't upset readers, and it should be omitted if the file is
# ever rewritten.
mark_packed_refs () {
- sed -e "s/^\(#.*\)/\1 t1409 /" .git/packed-refs >.git/packed-refs.new &&
- mv .git/packed-refs.new .git/packed-refs
+ if test "$GIT_TEST_PACKED_REFS_VERSION" = "2"
+ then
+ size=$(wc -c < .git/packed-refs) &&
+ pos=$(expr $size - 4) &&
+ printf "FAKE" | dd of=".git/packed-refs" bs=1 seek="$pos" conv=notrunc
+ else
+ sed -e "s/^\(#.*\)/\1 t1409 /" .git/packed-refs >.git/packed-refs.new &&
+ mv .git/packed-refs.new .git/packed-refs
+ fi
}
# Verify that the packed-refs file is still marked.
check_packed_refs_marked () {
- grep -q '^#.* t1409 ' .git/packed-refs
+ if test "$GIT_TEST_PACKED_REFS_VERSION" = "2"
+ then
+ size=$(wc -c < .git/packed-refs) &&
+ pos=$(expr $size - 4) &&
+ tail -c 4 .git/packed-refs >actual &&
+ printf "FAKE" >expect &&
+ test_cmp expect actual
+ else
+ grep -q '^#.* t1409 ' .git/packed-refs
+ fi
}
test_expect_success 'setup' '
diff --git a/t/t1413-reflog-detach.sh b/t/t1413-reflog-detach.sh
index 934688a1ee..d2a4822d46 100755
--- a/t/t1413-reflog-detach.sh
+++ b/t/t1413-reflog-detach.sh
@@ -4,6 +4,7 @@ test_description='Test reflog interaction with detached HEAD'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
reset_state () {
diff --git a/t/t1501-work-tree.sh b/t/t1501-work-tree.sh
index b75558040f..ae6528aece 100755
--- a/t/t1501-work-tree.sh
+++ b/t/t1501-work-tree.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test separate work tree'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t1600-index.sh b/t/t1600-index.sh
index 010989f90e..24ab90ca04 100755
--- a/t/t1600-index.sh
+++ b/t/t1600-index.sh
@@ -103,4 +103,12 @@ test_expect_success 'index version config precedence' '
test_index_version 0 true 2 2
'
+test_expect_success 'index.computeHash config option' '
+ (
+ rm -f .git/index &&
+ git -c index.computeHash=false add a &&
+ git fsck
+ )
+'
+
test_done
diff --git a/t/t1800-hook.sh b/t/t1800-hook.sh
index 43fcb7c0bf..2ef3579fa7 100755
--- a/t/t1800-hook.sh
+++ b/t/t1800-hook.sh
@@ -95,7 +95,7 @@ test_expect_success 'git hook run -- out-of-repo runs excluded' '
test_expect_success 'git -c core.hooksPath=<PATH> hook run' '
mkdir my-hooks &&
write_script my-hooks/test-hook <<-\EOF &&
- echo Hook ran $1 >>actual
+ echo Hook ran $1
EOF
cat >expect <<-\EOF &&
diff --git a/t/t2012-checkout-last.sh b/t/t2012-checkout-last.sh
index 1f6c4ed042..4b6372f4c3 100755
--- a/t/t2012-checkout-last.sh
+++ b/t/t2012-checkout-last.sh
@@ -5,6 +5,7 @@ test_description='checkout can switch to last branch and merge base'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t2018-checkout-branch.sh b/t/t2018-checkout-branch.sh
index 771c3c3c50..8581ad3437 100755
--- a/t/t2018-checkout-branch.sh
+++ b/t/t2018-checkout-branch.sh
@@ -3,6 +3,7 @@
test_description='checkout'
TEST_CREATE_REPO_NO_TEMPLATE=1
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Arguments: [!] <branch> <oid> [<checkout options>]
diff --git a/t/t2025-checkout-no-overlay.sh b/t/t2025-checkout-no-overlay.sh
index 8f13341cf8..3832c3de81 100755
--- a/t/t2025-checkout-no-overlay.sh
+++ b/t/t2025-checkout-no-overlay.sh
@@ -2,6 +2,7 @@
test_description='checkout --no-overlay <tree-ish> -- <pathspec>'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t2400-worktree-add.sh b/t/t2400-worktree-add.sh
index d587e0b20d..93c340f4af 100755
--- a/t/t2400-worktree-add.sh
+++ b/t/t2400-worktree-add.sh
@@ -310,6 +310,26 @@ test_expect_success '"add" -B/--detach mutually exclusive' '
test_must_fail git worktree add -B poodle --detach bamboo main
'
+test_expect_success '"add" --orphan/-b mutually exclusive' '
+ test_must_fail git worktree add --orphan poodle -b poodle bamboo
+'
+
+test_expect_success '"add" --orphan/-B mutually exclusive' '
+ test_must_fail git worktree add --orphan poodle -B poodle bamboo
+'
+
+test_expect_success '"add" --orphan/--detach mutually exclusive' '
+ test_must_fail git worktree add --orphan poodle --detach bamboo
+'
+
+test_expect_success '"add" --orphan/--no-checkout mutually exclusive' '
+ test_must_fail git worktree add --orphan poodle --no-checkout bamboo
+'
+
+test_expect_success '"add" -B/--detach mutually exclusive' '
+ test_must_fail git worktree add -B poodle --detach bamboo main
+'
+
test_expect_success '"add -B" fails if the branch is checked out' '
git rev-parse newmain >before &&
test_must_fail git worktree add -B newmain bamboo main &&
@@ -330,6 +350,31 @@ test_expect_success 'add --quiet' '
test_must_be_empty actual
'
+test_expect_success '"add --orphan"' '
+ test_when_finished "git worktree remove -f -f orphandir" &&
+ git worktree add --orphan neworphan orphandir &&
+ echo refs/heads/neworphan >expected &&
+ git -C orphandir symbolic-ref HEAD >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success '"add --orphan" fails if the branch already exists' '
+ test_when_finished "git branch -D existingbranch" &&
+ test_when_finished "git worktree remove -f -f orphandir" &&
+ git worktree add -b existingbranch orphandir main &&
+ test_must_fail git worktree add --orphan existingbranch orphandir2 &&
+ test ! -d orphandir2
+'
+
+test_expect_success '"add --orphan" with empty repository' '
+ test_when_finished "rm -rf empty_repo" &&
+ echo refs/heads/newbranch >expected &&
+ GIT_DIR="empty_repo" git init --bare &&
+ git -C empty_repo worktree add --orphan newbranch worktreedir &&
+ git -C empty_repo/worktreedir symbolic-ref HEAD >actual &&
+ test_cmp expected actual
+'
+
test_expect_success 'local clone from linked checkout' '
git clone --local here here-clone &&
( cd here-clone && git fsck )
diff --git a/t/t3009-ls-files-others-nonsubmodule.sh b/t/t3009-ls-files-others-nonsubmodule.sh
index 963f3462b7..14218b3424 100755
--- a/t/t3009-ls-files-others-nonsubmodule.sh
+++ b/t/t3009-ls-files-others-nonsubmodule.sh
@@ -18,6 +18,7 @@ This test runs git ls-files --others with the following working tree:
git repository with a commit and an untracked file
'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup: directories' '
diff --git a/t/t3010-ls-files-killed-modified.sh b/t/t3010-ls-files-killed-modified.sh
index 580e158f99..054178703d 100755
--- a/t/t3010-ls-files-killed-modified.sh
+++ b/t/t3010-ls-files-killed-modified.sh
@@ -41,6 +41,8 @@ Also for modification test, the cache and working tree have:
We should report path0, path1, path2/file2, path3/file3, path7 and path8
modified without reporting path9 and path10. submod1 is also modified.
'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'git update-index --add to add various paths.' '
diff --git a/t/t3050-subprojects-fetch.sh b/t/t3050-subprojects-fetch.sh
index f1f09abdd9..3884694165 100755
--- a/t/t3050-subprojects-fetch.sh
+++ b/t/t3050-subprojects-fetch.sh
@@ -2,6 +2,7 @@
test_description='fetching and pushing project with subproject'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
diff --git a/t/t3060-ls-files-with-tree.sh b/t/t3060-ls-files-with-tree.sh
index 52f76f7b57..c4a72ae446 100755
--- a/t/t3060-ls-files-with-tree.sh
+++ b/t/t3060-ls-files-with-tree.sh
@@ -8,6 +8,8 @@ test_description='git ls-files test (--with-tree).
This test runs git ls-files --with-tree and in particular in
a scenario known to trigger a crash with some versions of git.
'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t3200-branch.sh b/t/t3200-branch.sh
index 7f605f865b..5a169b68d6 100755
--- a/t/t3200-branch.sh
+++ b/t/t3200-branch.sh
@@ -279,6 +279,42 @@ test_expect_success 'git branch -M and -C fail on detached HEAD' '
test_cmp expect err
'
+test_expect_success 'git branch -d on orphan HEAD (merged)' '
+ test_when_finished git checkout main &&
+ git checkout --orphan orphan &&
+ test_when_finished "rm -rf .git/objects/commit-graph*" &&
+ git commit-graph write --reachable &&
+ git branch --track to-delete main &&
+ git branch -d to-delete
+'
+
+test_expect_success 'git branch -d on orphan HEAD (merged, graph)' '
+ test_when_finished git checkout main &&
+ git checkout --orphan orphan &&
+ git branch --track to-delete main &&
+ git branch -d to-delete
+'
+
+test_expect_success 'git branch -d on orphan HEAD (unmerged)' '
+ test_when_finished git checkout main &&
+ git checkout --orphan orphan &&
+ test_when_finished "git branch -D to-delete" &&
+ git branch to-delete main &&
+ test_must_fail git branch -d to-delete 2>err &&
+ grep "not fully merged" err
+'
+
+test_expect_success 'git branch -d on orphan HEAD (unmerged, graph)' '
+ test_when_finished git checkout main &&
+ git checkout --orphan orphan &&
+ test_when_finished "git branch -D to-delete" &&
+ git branch to-delete main &&
+ test_when_finished "rm -rf .git/objects/commit-graph*" &&
+ git commit-graph write --reachable &&
+ test_must_fail git branch -d to-delete 2>err &&
+ grep "not fully merged" err
+'
+
test_expect_success 'git branch -v -d t should work' '
git branch t &&
git rev-parse --verify refs/heads/t &&
diff --git a/t/t3210-pack-refs.sh b/t/t3210-pack-refs.sh
index 577f32dc71..76251dfe05 100755
--- a/t/t3210-pack-refs.sh
+++ b/t/t3210-pack-refs.sh
@@ -159,7 +159,7 @@ test_expect_success 'delete ref while another dangling packed ref' '
test_expect_success 'pack ref directly below refs/' '
git update-ref refs/top HEAD &&
git pack-refs --all --prune &&
- grep refs/top .git/packed-refs &&
+ git rev-parse refs/top &&
test_path_is_missing .git/refs/top
'
@@ -197,7 +197,7 @@ test_expect_success 'notice d/f conflict with existing ref' '
test_must_fail git branch foo/bar/baz/lots/of/extra/components
'
-test_expect_success 'reject packed-refs with unterminated line' '
+test_expect_success PACKED_REFS_V1 'reject packed-refs with unterminated line' '
cp .git/packed-refs .git/packed-refs.bak &&
test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
printf "%s" "$HEAD refs/zzzzz" >>.git/packed-refs &&
@@ -206,7 +206,7 @@ test_expect_success 'reject packed-refs with unterminated line' '
test_cmp expected_err err
'
-test_expect_success 'reject packed-refs containing junk' '
+test_expect_success PACKED_REFS_V1 'reject packed-refs containing junk' '
cp .git/packed-refs .git/packed-refs.bak &&
test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
printf "%s\n" "bogus content" >>.git/packed-refs &&
@@ -215,7 +215,7 @@ test_expect_success 'reject packed-refs containing junk' '
test_cmp expected_err err
'
-test_expect_success 'reject packed-refs with a short SHA-1' '
+test_expect_success PACKED_REFS_V1 'reject packed-refs with a short SHA-1' '
cp .git/packed-refs .git/packed-refs.bak &&
test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
printf "%.7s %s\n" $HEAD refs/zzzzz >>.git/packed-refs &&
diff --git a/t/t3212-ref-formats.sh b/t/t3212-ref-formats.sh
new file mode 100755
index 0000000000..5583f16db4
--- /dev/null
+++ b/t/t3212-ref-formats.sh
@@ -0,0 +1,100 @@
+#!/bin/sh
+
+test_description='test across ref formats'
+
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
+. ./test-lib.sh
+
+test_expect_success 'extensions.refFormat requires core.repositoryFormatVersion=1' '
+ test_when_finished rm -rf broken &&
+
+ # Force sha1 to ensure GIT_TEST_DEFAULT_HASH does
+ # not imply a value of core.repositoryFormatVersion.
+ git init --object-format=sha1 broken &&
+ git -C broken config extensions.refFormat files &&
+ test_must_fail git -C broken status 2>err &&
+ grep "repo version is 0, but v1-only extension found" err
+'
+
+test_expect_success 'invalid extensions.refFormat' '
+ test_when_finished rm -rf broken &&
+ git init broken &&
+ git -C broken config core.repositoryFormatVersion 1 &&
+ git -C broken config extensions.refFormat bogus &&
+ test_must_fail git -C broken status 2>err &&
+ grep "invalid value for '\''extensions.refFormat'\'': '\''bogus'\''" err
+'
+
+test_expect_success 'extensions.refFormat=packed only' '
+ git init only-packed &&
+ (
+ cd only-packed &&
+ git config core.repositoryFormatVersion 1 &&
+ git config extensions.refFormat packed &&
+ test_commit A &&
+ test_path_exists .git/packed-refs &&
+ test_path_is_missing .git/refs/tags/A
+ )
+'
+
+test_expect_success 'extensions.refFormat=files only' '
+ test_commit T &&
+ git pack-refs --all &&
+ git init only-loose &&
+ (
+ cd only-loose &&
+ git config core.repositoryFormatVersion 1 &&
+ git config extensions.refFormat files &&
+ test_commit A &&
+ test_commit B &&
+ test_must_fail git pack-refs 2>err &&
+ grep "refusing to create" err &&
+ test_path_is_missing .git/packed-refs &&
+
+ # Refuse to parse a packed-refs file.
+ cp ../.git/packed-refs .git/packed-refs &&
+ test_must_fail git rev-parse refs/tags/T
+ )
+'
+
+test_expect_success 'extensions.refFormat=files,packed-v2' '
+ test_commit Q &&
+ git pack-refs --all &&
+ git init no-packed-v1 &&
+ (
+ cd no-packed-v1 &&
+ git config core.repositoryFormatVersion 1 &&
+ git config extensions.refFormat files &&
+ git config --add extensions.refFormat packed-v2 &&
+ test_commit A &&
+ test_commit B &&
+
+ # Refuse to parse a v1 packed-refs file.
+ cp ../.git/packed-refs .git/packed-refs &&
+ test_must_fail git rev-parse refs/tags/Q &&
+ rm -f .git/packed-refs &&
+
+ git for-each-ref --format="%(refname) %(objectname)" >expect-all &&
+ git for-each-ref --format="%(refname) %(objectname)" \
+ refs/tags/* >expect-tags &&
+
+ # Create a v2 packed-refs file
+ git pack-refs --all &&
+ test_path_exists .git/packed-refs &&
+ for t in A B
+ do
+ test_path_is_missing .git/refs/tags/$t &&
+ git rev-parse refs/tags/$t || return 1
+ done &&
+
+ git for-each-ref --format="%(refname) %(objectname)" >actual-all &&
+ test_cmp expect-all actual-all &&
+ git for-each-ref --format="%(refname) %(objectname)" \
+ refs/tags/* >actual-tags &&
+ test_cmp expect-tags actual-tags
+ )
+'
+
+test_done
diff --git a/t/t3301-notes.sh b/t/t3301-notes.sh
index 3288aaec7d..dedad93a2f 100755
--- a/t/t3301-notes.sh
+++ b/t/t3301-notes.sh
@@ -521,12 +521,25 @@ test_expect_success 'listing non-existing notes fails' '
test_must_be_empty actual
'
+test_expect_success 'append to existing note without a beginning blank line' '
+ test_when_finished git notes remove HEAD &&
+ cat >expect <<-\EOF &&
+ Initial set of notes
+ Appended notes
+ EOF
+ git notes add -m "Initial set of notes" &&
+ git notes append --no-blank-line -m "Appended notes" &&
+ git notes show >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'append to existing note with "git notes append"' '
cat >expect <<-EOF &&
Initial set of notes
More notes appended with git notes append
EOF
+
git notes add -m "Initial set of notes" &&
git notes append -m "More notes appended with git notes append" &&
git notes show >actual &&
@@ -552,6 +565,7 @@ test_expect_success 'appending empty string does not change existing note' '
'
test_expect_success 'git notes append == add when there is no existing note' '
+ test_when_finished git notes remove HEAD &&
git notes remove HEAD &&
test_must_fail git notes list HEAD &&
git notes append -m "Initial set of notes${LF}${LF}More notes appended with git notes append" &&
@@ -560,9 +574,9 @@ test_expect_success 'git notes append == add when there is no existing note' '
'
test_expect_success 'appending empty string to non-existing note does not create note' '
- git notes remove HEAD &&
test_must_fail git notes list HEAD &&
- git notes append -m "" &&
+ git notes append -m "" >output 2>&1 &&
+ grep "Both original and appended notes are empty" output &&
test_must_fail git notes list HEAD
'
diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh
index 4f5abb5ad2..462cefd25d 100755
--- a/t/t3404-rebase-interactive.sh
+++ b/t/t3404-rebase-interactive.sh
@@ -1964,6 +1964,113 @@ test_expect_success 'respect user edits to update-ref steps' '
test_cmp_rev HEAD refs/heads/no-conflict-branch
'
+test_expect_success '--update-refs: all update-ref lines removed' '
+ git checkout -b test-refs-not-removed no-conflict-branch &&
+ git branch -f base HEAD~4 &&
+ git branch -f first HEAD~3 &&
+ git branch -f second HEAD~3 &&
+ git branch -f third HEAD~1 &&
+ git branch -f tip &&
+
+ test_commit test-refs-not-removed &&
+ git commit --amend --fixup first &&
+
+ git rev-parse first second third tip no-conflict-branch >expect-oids &&
+
+ (
+ set_cat_todo_editor &&
+ test_must_fail git rebase -i --update-refs base >todo.raw &&
+ sed -e "/^update-ref/d" <todo.raw >todo
+ ) &&
+ (
+ set_replace_editor todo &&
+ git rebase -i --update-refs base
+ ) &&
+
+ # Ensure refs are not deleted and their OIDs have not changed
+ git rev-parse first second third tip no-conflict-branch >actual-oids &&
+ test_cmp expect-oids actual-oids
+'
+
+test_expect_success '--update-refs: all update-ref lines removed, then some re-added' '
+ git checkout -b test-refs-not-removed2 no-conflict-branch &&
+ git branch -f base HEAD~4 &&
+ git branch -f first HEAD~3 &&
+ git branch -f second HEAD~3 &&
+ git branch -f third HEAD~1 &&
+ git branch -f tip &&
+
+ test_commit test-refs-not-removed2 &&
+ git commit --amend --fixup first &&
+
+ git rev-parse first second third >expect-oids &&
+
+ (
+ set_cat_todo_editor &&
+ test_must_fail git rebase -i \
+ --autosquash --update-refs \
+ base >todo.raw &&
+ sed -e "/^update-ref/d" <todo.raw >todo
+ ) &&
+
+ # Add a break to the end of the todo so we can edit later
+ echo "break" >>todo &&
+
+ (
+ set_replace_editor todo &&
+ git rebase -i --autosquash --update-refs base &&
+ echo "update-ref refs/heads/tip" >todo &&
+ git rebase --edit-todo &&
+ git rebase --continue
+ ) &&
+
+ # Ensure first/second/third are unchanged, but tip is updated
+ git rev-parse first second third >actual-oids &&
+ test_cmp expect-oids actual-oids &&
+ test_cmp_rev HEAD tip
+'
+
+test_expect_success '--update-refs: --edit-todo with no update-ref lines' '
+ git checkout -b test-refs-not-removed3 no-conflict-branch &&
+ git branch -f base HEAD~4 &&
+ git branch -f first HEAD~3 &&
+ git branch -f second HEAD~3 &&
+ git branch -f third HEAD~1 &&
+ git branch -f tip &&
+
+ test_commit test-refs-not-removed3 &&
+ git commit --amend --fixup first &&
+
+ git rev-parse first second third tip no-conflict-branch >expect-oids &&
+
+ (
+ set_cat_todo_editor &&
+ test_must_fail git rebase -i \
+ --autosquash --update-refs \
+ base >todo.raw &&
+ sed -e "/^update-ref/d" <todo.raw >todo
+ ) &&
+
+ # Add a break to the beginning of the todo so we can resume with no
+ # update-ref lines
+ echo "break" >todo.new &&
+ cat todo >>todo.new &&
+
+ (
+ set_replace_editor todo.new &&
+ git rebase -i --autosquash --update-refs base &&
+
+ # Make no changes when editing so update-refs is still empty
+ cat todo >todo.new &&
+ git rebase --edit-todo &&
+ git rebase --continue
+ ) &&
+
+ # Ensure refs are not deleted and their OIDs have not changed
+ git rev-parse first second third tip no-conflict-branch >actual-oids &&
+ test_cmp expect-oids actual-oids
+'
+
test_expect_success '--update-refs: check failed ref update' '
git checkout -B update-refs-error no-conflict-branch &&
git branch -f base HEAD~4 &&
diff --git a/t/t3409-rebase-environ.sh b/t/t3409-rebase-environ.sh
index 83ffb39d9f..acaf5558db 100755
--- a/t/t3409-rebase-environ.sh
+++ b/t/t3409-rebase-environ.sh
@@ -2,6 +2,7 @@
test_description='git rebase interactive environment'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t3413-rebase-hook.sh b/t/t3413-rebase-hook.sh
index 9fab0d779b..e8456831e8 100755
--- a/t/t3413-rebase-hook.sh
+++ b/t/t3413-rebase-hook.sh
@@ -5,6 +5,7 @@ test_description='git rebase with its hook(s)'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
diff --git a/t/t3428-rebase-signoff.sh b/t/t3428-rebase-signoff.sh
index f6993b7e14..e1b1e94764 100755
--- a/t/t3428-rebase-signoff.sh
+++ b/t/t3428-rebase-signoff.sh
@@ -5,6 +5,7 @@ test_description='git rebase --signoff
This test runs git rebase --signoff and make sure that it works.
'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# A simple file to commit
diff --git a/t/t3429-rebase-edit-todo.sh b/t/t3429-rebase-edit-todo.sh
index abd66f3602..8e0d03969a 100755
--- a/t/t3429-rebase-edit-todo.sh
+++ b/t/t3429-rebase-edit-todo.sh
@@ -2,6 +2,7 @@
test_description='rebase should reread the todo file if an exec modifies it'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-rebase.sh
diff --git a/t/t3430-rebase-merges.sh b/t/t3430-rebase-merges.sh
index f351701fec..fa2a06c19f 100755
--- a/t/t3430-rebase-merges.sh
+++ b/t/t3430-rebase-merges.sh
@@ -138,6 +138,23 @@ test_expect_success '`reset` refuses to overwrite untracked files' '
git rebase --abort
'
+test_expect_success '`reset` rejects trees' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ test_must_fail env GIT_SEQUENCE_EDITOR="echo reset A^{tree} >" \
+ git rebase -i B C >out 2>err &&
+ grep "object .* is a tree" err &&
+ test_must_be_empty out
+'
+
+test_expect_success '`reset` only looks for labels under refs/rewritten/' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ git branch refs/rewritten/my-label A &&
+ test_must_fail env GIT_SEQUENCE_EDITOR="echo reset my-label >" \
+ git rebase -i B C >out 2>err &&
+ grep "could not resolve ${SQ}my-label${SQ}" err &&
+ test_must_be_empty out
+'
+
test_expect_success 'failed `merge -C` writes patch (may be rescheduled, too)' '
test_when_finished "test_might_fail git rebase --abort" &&
git checkout -b conflicting-merge A &&
diff --git a/t/t3433-rebase-across-mode-change.sh b/t/t3433-rebase-across-mode-change.sh
index 05df964670..c8172b0852 100755
--- a/t/t3433-rebase-across-mode-change.sh
+++ b/t/t3433-rebase-across-mode-change.sh
@@ -2,6 +2,7 @@
test_description='git rebase across mode change'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t4015-diff-whitespace.sh b/t/t4015-diff-whitespace.sh
index f3e20dd5bb..b298f220e0 100755
--- a/t/t4015-diff-whitespace.sh
+++ b/t/t4015-diff-whitespace.sh
@@ -1638,7 +1638,7 @@ test_expect_success 'no effect on diff from --color-moved with --word-diff' '
test_cmp expect actual
'
-test_expect_success !SANITIZE_LEAK 'no effect on show from --color-moved with --word-diff' '
+test_expect_success 'no effect on show from --color-moved with --word-diff' '
git show --color-moved --word-diff >actual &&
git show --word-diff >expect &&
test_cmp expect actual
@@ -2024,7 +2024,7 @@ test_expect_success '--color-moved rewinds for MIN_ALNUM_COUNT' '
test_cmp expected actual
'
-test_expect_success !SANITIZE_LEAK 'move detection with submodules' '
+test_expect_success 'move detection with submodules' '
test_create_repo bananas &&
echo ripe >bananas/recipe &&
git -C bananas add recipe &&
diff --git a/t/t4027-diff-submodule.sh b/t/t4027-diff-submodule.sh
index 40164ae07d..e08ee315a7 100755
--- a/t/t4027-diff-submodule.sh
+++ b/t/t4027-diff-submodule.sh
@@ -34,6 +34,25 @@ test_expect_success setup '
subtip=$3 subprev=$2
'
+test_expect_success 'diff in superproject with submodules respects parallel settings' '
+ test_when_finished "rm -f trace.out" &&
+ (
+ GIT_TRACE=$(pwd)/trace.out git diff &&
+ grep "1 tasks" trace.out &&
+ >trace.out &&
+
+ git config submodule.diffJobs 8 &&
+ GIT_TRACE=$(pwd)/trace.out git diff &&
+ grep "8 tasks" trace.out &&
+ >trace.out &&
+
+ GIT_TRACE=$(pwd)/trace.out git -c submodule.diffJobs=0 diff &&
+ grep "preparing to run up to [0-9]* tasks" trace.out &&
+ ! grep "up to 0 tasks" trace.out &&
+ >trace.out
+ )
+'
+
test_expect_success 'git diff --raw HEAD' '
hexsz=$(test_oid hexsz) &&
git diff --raw --abbrev=$hexsz HEAD >actual &&
diff --git a/t/t4045-diff-relative.sh b/t/t4045-diff-relative.sh
index fab351b48a..198dfc9190 100755
--- a/t/t4045-diff-relative.sh
+++ b/t/t4045-diff-relative.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='diff --relative tests'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t4052-stat-output.sh b/t/t4052-stat-output.sh
index b5c281edaa..3ee27e277d 100755
--- a/t/t4052-stat-output.sh
+++ b/t/t4052-stat-output.sh
@@ -8,6 +8,7 @@ test_description='test --stat output of various commands'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-terminal.sh
diff --git a/t/t4053-diff-no-index.sh b/t/t4053-diff-no-index.sh
index 3feadf0e35..4e9fa0403d 100755
--- a/t/t4053-diff-no-index.sh
+++ b/t/t4053-diff-no-index.sh
@@ -2,6 +2,7 @@
test_description='diff --no-index'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t4067-diff-partial-clone.sh b/t/t4067-diff-partial-clone.sh
index 28f42a4046..f60f5cbd65 100755
--- a/t/t4067-diff-partial-clone.sh
+++ b/t/t4067-diff-partial-clone.sh
@@ -2,6 +2,7 @@
test_description='behavior of diff when reading objects in a partial clone'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'git show batches blobs' '
diff --git a/t/t4111-apply-subdir.sh b/t/t4111-apply-subdir.sh
index 1618a6dbc7..e9a87d761d 100755
--- a/t/t4111-apply-subdir.sh
+++ b/t/t4111-apply-subdir.sh
@@ -2,6 +2,7 @@
test_description='patching from inconvenient places'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t4135-apply-weird-filenames.sh b/t/t4135-apply-weird-filenames.sh
index 6bc3fb97a7..d3502c6fdd 100755
--- a/t/t4135-apply-weird-filenames.sh
+++ b/t/t4135-apply-weird-filenames.sh
@@ -2,6 +2,7 @@
test_description='git apply with weird postimage filenames'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t4213-log-tabexpand.sh b/t/t4213-log-tabexpand.sh
index 53a4af3244..590fce95e9 100755
--- a/t/t4213-log-tabexpand.sh
+++ b/t/t4213-log-tabexpand.sh
@@ -2,6 +2,7 @@
test_description='log/show --expand-tabs'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
HT=" "
diff --git a/t/t4301-merge-tree-write-tree.sh b/t/t4301-merge-tree-write-tree.sh
index cac85591b5..a8983a0edc 100755
--- a/t/t4301-merge-tree-write-tree.sh
+++ b/t/t4301-merge-tree-write-tree.sh
@@ -860,4 +860,66 @@ test_expect_success '--stdin with both a successful and a conflicted merge' '
test_cmp expect actual
'
+
+test_expect_success '--merge-base is incompatible with --stdin' '
+ test_must_fail git merge-tree --merge-base=side1 --stdin 2>expect &&
+
+ grep "^fatal: --merge-base is incompatible with --stdin" expect
+'
+
+# specify merge-base as parent of branch2
+# git merge-tree --write-tree --merge-base=c2 c1 c3
+# Commit c1: add file1
+# Commit c2: add file2 after c1
+# Commit c3: add file3 after c2
+# Expected: add file3, and file2 does NOT appear
+
+test_expect_success 'specify merge-base as parent of branch2' '
+ # Setup
+ test_when_finished "rm -rf base-b2-p" &&
+ git init base-b2-p &&
+ test_commit -C base-b2-p c1 file1 &&
+ test_commit -C base-b2-p c2 file2 &&
+ test_commit -C base-b2-p c3 file3 &&
+
+ # Testing
+ TREE_OID=$(git -C base-b2-p merge-tree --write-tree --merge-base=c2 c1 c3) &&
+
+ q_to_tab <<-EOF >expect &&
+ 100644 blob $(git -C base-b2-p rev-parse c1:file1)Qfile1
+ 100644 blob $(git -C base-b2-p rev-parse c3:file3)Qfile3
+ EOF
+
+ git -C base-b2-p ls-tree $TREE_OID >actual &&
+ test_cmp expect actual
+'
+
+# Since the earlier tests have verified that individual merge-tree calls
+# are doing the right thing, this test case is only used to verify that
+# we can also trigger merges via --stdin, and that when we do we get
+# the same answer as running a bunch of separate merges.
+
+test_expect_success 'check the input format when --stdin is passed' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ test_commit -C repo c1 &&
+ test_commit -C repo c2 &&
+ test_commit -C repo c3 &&
+ printf "c1 c3\nc2 -- c1 c3\nc2 c3" | git -C repo merge-tree --stdin >actual &&
+
+ printf "1\0" >expect &&
+ git -C repo merge-tree --write-tree -z c1 c3 >>expect &&
+ printf "\0" >>expect &&
+
+ printf "1\0" >>expect &&
+ git -C repo merge-tree --write-tree -z --merge-base=c2 c1 c3 >>expect &&
+ printf "\0" >>expect &&
+
+ printf "1\0" >>expect &&
+ git -C repo merge-tree --write-tree -z c2 c3 >>expect &&
+ printf "\0" >>expect &&
+
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh
index 6d693eef82..7d8dee41b0 100755
--- a/t/t5310-pack-bitmaps.sh
+++ b/t/t5310-pack-bitmaps.sh
@@ -428,8 +428,9 @@ test_bitmap_cases () {
test_line_count = 2 packs &&
test_line_count = 2 bitmaps &&
- git rev-list --use-bitmap-index HEAD 2>err &&
- grep "ignoring extra bitmap file" err
+ GIT_TRACE2_EVENT=$(pwd)/trace2.txt git rev-list --use-bitmap-index HEAD &&
+ grep "opened bitmap" trace2.txt &&
+ grep "ignoring extra bitmap" trace2.txt
)
'
}
diff --git a/t/t5317-pack-objects-filter-objects.sh b/t/t5317-pack-objects-filter-objects.sh
index bb633c9b09..bd8983bb56 100755
--- a/t/t5317-pack-objects-filter-objects.sh
+++ b/t/t5317-pack-objects-filter-objects.sh
@@ -178,6 +178,25 @@ test_expect_success 'verify blob:limit=1001' '
test_cmp expected observed
'
+test_expect_success 'verify blob:limit=1001+object:type=blob' '
+ git -C r2 ls-files -s large.1000 |
+ test_parse_ls_files_stage_oids |
+ sort >expected &&
+
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1001 \
+ --filter=object:type=blob >filter.pack <<-EOF &&
+ HEAD
+ EOF
+ git -C r2 index-pack ../filter.pack &&
+
+ git -C r2 verify-pack -v ../filter.pack >verify_result &&
+ grep blob verify_result |
+ parse_verify_pack_blob_oid |
+ sort >observed &&
+
+ test_cmp expected observed
+'
+
test_expect_success 'verify blob:limit=10001' '
git -C r2 ls-files -s large.1000 large.10000 |
test_parse_ls_files_stage_oids |
diff --git a/t/t5502-quickfetch.sh b/t/t5502-quickfetch.sh
index b160f8b7fb..0c4aadebae 100755
--- a/t/t5502-quickfetch.sh
+++ b/t/t5502-quickfetch.sh
@@ -122,7 +122,7 @@ test_expect_success 'quickfetch should not copy from alternate' '
'
-test_expect_success 'quickfetch should handle ~1000 refs (on Windows)' '
+test_expect_success PACKED_REFS_V1 'quickfetch should handle ~1000 refs (on Windows)' '
git gc &&
head=$(git rev-parse HEAD) &&
diff --git a/t/t5526-fetch-submodules.sh b/t/t5526-fetch-submodules.sh
index 75da8acf8f..b9546ef8e5 100755
--- a/t/t5526-fetch-submodules.sh
+++ b/t/t5526-fetch-submodules.sh
@@ -178,6 +178,7 @@ test_expect_success "submodule.recurse option triggers recursive fetch" '
'
test_expect_success "fetch --recurse-submodules -j2 has the same output behaviour" '
+ test_when_finished "rm -f trace.out" &&
add_submodule_commits &&
(
cd downstream &&
@@ -705,15 +706,22 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .git
test_expect_success 'fetching submodules respects parallel settings' '
git config fetch.recurseSubmodules true &&
+ test_when_finished "rm -f downstream/trace.out" &&
(
cd downstream &&
GIT_TRACE=$(pwd)/trace.out git fetch &&
grep "1 tasks" trace.out &&
+ >trace.out &&
+
GIT_TRACE=$(pwd)/trace.out git fetch --jobs 7 &&
grep "7 tasks" trace.out &&
+ >trace.out &&
+
git config submodule.fetchJobs 8 &&
GIT_TRACE=$(pwd)/trace.out git fetch &&
grep "8 tasks" trace.out &&
+ >trace.out &&
+
GIT_TRACE=$(pwd)/trace.out git fetch --jobs 9 &&
grep "9 tasks" trace.out &&
>trace.out &&
diff --git a/t/t5531-deep-submodule-push.sh b/t/t5531-deep-submodule-push.sh
index 3f58b515ce..302e4cbdba 100755
--- a/t/t5531-deep-submodule-push.sh
+++ b/t/t5531-deep-submodule-push.sh
@@ -512,6 +512,56 @@ test_expect_success 'push only unpushed submodules recursively' '
test_cmp expected_pub actual_pub
'
+setup_subsub () {
+ git init upstream &&
+ git init upstream/sub &&
+ git init upstream/sub/deepsub &&
+ test_commit -C upstream/sub/deepsub innermost &&
+ git -C upstream/sub submodule add ./deepsub deepsub &&
+ git -C upstream/sub commit -m middle &&
+ git -C upstream submodule add ./sub sub &&
+ git -C upstream commit -m outermost &&
+
+ git -c protocol.file.allow=always clone --recurse-submodules upstream downstream &&
+ git -C downstream/sub/deepsub checkout -b downstream-branch &&
+ git -C downstream/sub checkout -b downstream-branch &&
+ git -C downstream checkout -b downstream-branch
+}
+
+new_downstream_commits () {
+ test_commit -C downstream/sub/deepsub new-innermost &&
+ git -C downstream/sub add deepsub &&
+ git -C downstream/sub commit -m new-middle &&
+ git -C downstream add sub &&
+ git -C downstream commit -m new-outermost
+}
+
+test_expect_success 'push with push.recurseSubmodules=only on superproject' '
+ test_when_finished rm -rf upstream downstream &&
+ setup_subsub &&
+ new_downstream_commits &&
+ git -C downstream config push.recurseSubmodules only &&
+ git -C downstream push origin downstream-branch &&
+
+ test_must_fail git -C upstream rev-parse refs/heads/downstream-branch &&
+ git -C upstream/sub rev-parse refs/heads/downstream-branch &&
+ test_must_fail git -C upstream/sub/deepsub rev-parse refs/heads/downstream-branch
+'
+
+test_expect_success 'push with push.recurseSubmodules=only on superproject and top-level submodule' '
+ test_when_finished rm -rf upstream downstream &&
+ setup_subsub &&
+ new_downstream_commits &&
+ git -C downstream config push.recurseSubmodules only &&
+ git -C downstream/sub config push.recurseSubmodules only &&
+ git -C downstream push origin downstream-branch 2> err &&
+
+ test_must_fail git -C upstream rev-parse refs/heads/downstream-branch &&
+ git -C upstream/sub rev-parse refs/heads/downstream-branch &&
+ git -C upstream/sub/deepsub rev-parse refs/heads/downstream-branch &&
+ grep "recursing into submodule with push.recurseSubmodules=only; using on-demand instead" err
+'
+
test_expect_success 'push propagating the remotes name to a submodule' '
git -C work remote add origin ../pub.git &&
git -C work remote add pub ../pub.git &&
diff --git a/t/t5539-fetch-http-shallow.sh b/t/t5539-fetch-http-shallow.sh
index 3ea75d34ca..5e3b430436 100755
--- a/t/t5539-fetch-http-shallow.sh
+++ b/t/t5539-fetch-http-shallow.sh
@@ -5,6 +5,13 @@ test_description='fetch/clone from a shallow clone over http'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+# If GIT_TEST_PACKED_REFS_VERSION=2, then the packed-refs file will
+# be written in v2 format without extensions.refFormat=packed-v2. This
+# causes issues for the HTTP server which does not carry over the
+# environment variable to the server process.
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
diff --git a/t/t5541-http-push-smart.sh b/t/t5541-http-push-smart.sh
index fbad2d5ff5..495437dd3c 100755
--- a/t/t5541-http-push-smart.sh
+++ b/t/t5541-http-push-smart.sh
@@ -7,6 +7,13 @@ test_description='test smart pushing over http via http-backend'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+# If GIT_TEST_PACKED_REFS_VERSION=2, then the packed-refs file will
+# be written in v2 format without extensions.refFormat=packed-v2. This
+# causes issues for the HTTP server which does not carry over the
+# environment variable to the server process.
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
. ./test-lib.sh
ROOT_PATH="$PWD"
diff --git a/t/t5542-push-http-shallow.sh b/t/t5542-push-http-shallow.sh
index c2cc83182f..c47b18b9fa 100755
--- a/t/t5542-push-http-shallow.sh
+++ b/t/t5542-push-http-shallow.sh
@@ -5,6 +5,13 @@ test_description='push from/to a shallow clone over http'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+# If GIT_TEST_PACKED_REFS_VERSION=2, then the packed-refs file will
+# be written in v2 format without extensions.refFormat=packed-v2. This
+# causes issues for the HTTP server which does not carry over the
+# environment variable to the server process.
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
diff --git a/t/t5544-pack-objects-hook.sh b/t/t5544-pack-objects-hook.sh
index 54f54f8d2e..1a9e14bbcc 100755
--- a/t/t5544-pack-objects-hook.sh
+++ b/t/t5544-pack-objects-hook.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test custom script in place of pack-objects'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'create some history to fetch' '
diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh
index 64c6c9f59e..faf6bd44d8 100755
--- a/t/t5551-http-fetch-smart.sh
+++ b/t/t5551-http-fetch-smart.sh
@@ -1,13 +1,26 @@
#!/bin/sh
-test_description='test smart fetching over http via http-backend'
+: ${HTTP_PROTO:=HTTP}
+test_description="test smart fetching over http via http-backend ($HTTP_PROTO)"
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+# If GIT_TEST_PACKED_REFS_VERSION=2, then the packed-refs file will
+# be written in v2 format without extensions.refFormat=packed-v2. This
+# causes issues for the HTTP server which does not carry over the
+# environment variable to the server process.
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-httpd.sh
+test "$HTTP_PROTO" = "HTTP/2" && enable_http2
start_httpd
+test_expect_success HTTP2 'enable client-side http/2' '
+ git config --global http.version HTTP/2
+'
+
test_expect_success 'setup repository' '
git config push.default matching &&
echo content >file &&
@@ -347,7 +360,10 @@ test_expect_success CMDLINE_LIMIT \
test_expect_success 'large fetch-pack requests can be sent using chunked encoding' '
GIT_TRACE_CURL=true git -c http.postbuffer=65536 \
clone --bare "$HTTPD_URL/smart/repo.git" split.git 2>err &&
- grep "^=> Send header: Transfer-Encoding: chunked" err
+ {
+ test_have_prereq HTTP2 ||
+ grep "^=> Send header: Transfer-Encoding: chunked" err
+ }
'
test_expect_success 'test allowreachablesha1inwant' '
diff --git a/t/t5554-noop-fetch-negotiator.sh b/t/t5554-noop-fetch-negotiator.sh
index 2ac7b5859e..06991e8e8a 100755
--- a/t/t5554-noop-fetch-negotiator.sh
+++ b/t/t5554-noop-fetch-negotiator.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test noop fetch negotiator'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'noop negotiator does not emit any "have"' '
diff --git a/t/t5558-clone-bundle-uri.sh b/t/t5558-clone-bundle-uri.sh
index 9155f31fa2..3e35322155 100755
--- a/t/t5558-clone-bundle-uri.sh
+++ b/t/t5558-clone-bundle-uri.sh
@@ -2,6 +2,13 @@
test_description='test fetching bundles with --bundle-uri'
+# If GIT_TEST_PACKED_REFS_VERSION=2, then the packed-refs file will
+# be written in v2 format without extensions.refFormat=packed-v2. This
+# causes issues for the HTTP server which does not carry over the
+# environment variable to the server process.
+GIT_TEST_PACKED_REFS_VERSION=0
+export GIT_TEST_PACKED_REFS_VERSION
+
. ./test-lib.sh
test_expect_success 'fail to clone from non-existent file' '
diff --git a/t/t5559-http-fetch-smart-http2.sh b/t/t5559-http-fetch-smart-http2.sh
new file mode 100755
index 0000000000..9eece71c2c
--- /dev/null
+++ b/t/t5559-http-fetch-smart-http2.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+HTTP_PROTO=HTTP/2
+. ./t5551-http-fetch-smart.sh
diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh
index b2524a24c2..23c8999bc5 100755
--- a/t/t5601-clone.sh
+++ b/t/t5601-clone.sh
@@ -310,6 +310,28 @@ test_expect_success 'clone checking out a tag' '
test_cmp fetch.expected fetch.actual
'
+test_expect_success '--detach detaches and does not create branch' '
+ test_when_finished "rm -fr dst" &&
+ git clone --detach src dst &&
+ (
+ cd dst &&
+ test_must_fail git rev-parse main &&
+ test_must_fail git symbolic-ref HEAD &&
+ test_cmp_rev HEAD refs/remotes/origin/HEAD
+ )
+'
+
+test_expect_success '--detach with --bare detaches but creates branch' '
+ test_when_finished "rm -fr dst" &&
+ git clone --bare --detach src dst &&
+ (
+ cd dst &&
+ git rev-parse main &&
+ test_must_fail git symbolic-ref HEAD &&
+ test_cmp_rev HEAD refs/heads/main
+ )
+'
+
test_expect_success 'set up ssh wrapper' '
cp "$GIT_BUILD_DIR/t/helper/test-fake-ssh$X" \
"$TRASH_DIRECTORY/ssh$X" &&
diff --git a/t/t5610-clone-detached.sh b/t/t5610-clone-detached.sh
index a7ec21eda5..022ed3d87c 100755
--- a/t/t5610-clone-detached.sh
+++ b/t/t5610-clone-detached.sh
@@ -4,6 +4,7 @@ test_description='test cloning a repository with detached HEAD'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
head_is_detached() {
diff --git a/t/t5611-clone-config.sh b/t/t5611-clone-config.sh
index 4b3877216e..727caff443 100755
--- a/t/t5611-clone-config.sh
+++ b/t/t5611-clone-config.sh
@@ -4,6 +4,7 @@ test_description='tests for git clone -c key=value'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'clone -c sets config in cloned repo' '
diff --git a/t/t5614-clone-submodules-shallow.sh b/t/t5614-clone-submodules-shallow.sh
index 0c85ef834a..c2a2bb453e 100755
--- a/t/t5614-clone-submodules-shallow.sh
+++ b/t/t5614-clone-submodules-shallow.sh
@@ -2,6 +2,7 @@
test_description='Test shallow cloning of repos with submodules'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
pwd=$(pwd)
diff --git a/t/t5617-clone-submodules-remote.sh b/t/t5617-clone-submodules.sh
index 6884338249..5767c4d318 100755
--- a/t/t5617-clone-submodules-remote.sh
+++ b/t/t5617-clone-submodules.sh
@@ -1,10 +1,11 @@
#!/bin/sh
-test_description='Test cloning repos with submodules using remote-tracking branches'
+test_description='Test cloning repos with submodules'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
pwd=$(pwd)
@@ -13,10 +14,17 @@ test_expect_success 'setup' '
git config --global protocol.file.allow always &&
git checkout -b main &&
test_commit commit1 &&
+ mkdir subsub &&
+ (
+ cd subsub &&
+ git init &&
+ test_commit subsubcommit1
+ ) &&
mkdir sub &&
(
cd sub &&
git init &&
+ git submodule add "file://$pwd/subsub" subsub &&
test_commit subcommit1 &&
git tag sub_when_added_to_super &&
git branch other
@@ -107,4 +115,35 @@ test_expect_success '--no-also-filter-submodules overrides clone.filterSubmodule
test_cmp_config -C super_clone3/sub false --default false remote.origin.promisor
'
+test_expect_success 'submodule.propagateBranches checks out branches at correct commits' '
+ test_when_finished "git checkout main" &&
+
+ git checkout -b checked-out &&
+ git -C sub checkout -b not-in-clone &&
+ git -C subsub checkout -b not-in-clone &&
+ git clone --recurse-submodules \
+ --branch checked-out \
+ -c submodule.propagateBranches=true \
+ "file://$pwd/." super_clone4 &&
+
+ # Assert that each repo is pointing to "checked-out"
+ for REPO in "super_clone4" "super_clone4/sub" "super_clone4/sub/subsub"
+ do
+ HEAD_BRANCH=$(git -C $REPO symbolic-ref HEAD) &&
+ test $HEAD_BRANCH = "refs/heads/checked-out" || return 1
+ done &&
+
+ # Assert that the submodule branches are pointing to the right revs
+ EXPECT_SUB_OID="$(git -C super_clone4 rev-parse :sub)" &&
+ ACTUAL_SUB_OID="$(git -C super_clone4/sub rev-parse refs/heads/checked-out)" &&
+ test $EXPECT_SUB_OID = $ACTUAL_SUB_OID &&
+ EXPECT_SUBSUB_OID="$(git -C super_clone4/sub rev-parse :subsub)" &&
+ ACTUAL_SUBSUB_OID="$(git -C super_clone4/sub/subsub rev-parse refs/heads/checked-out)" &&
+ test $EXPECT_SUBSUB_OID = $ACTUAL_SUBSUB_OID &&
+
+ # Assert that the submodules do not have branches from their upstream
+ test_must_fail git -C super_clone4/sub rev-parse not-in-clone &&
+ test_must_fail git -C super_clone4/sub/subsub rev-parse not-in-clone
+'
+
test_done
diff --git a/t/t5618-alternate-refs.sh b/t/t5618-alternate-refs.sh
index 3353216f09..f905db0a3f 100755
--- a/t/t5618-alternate-refs.sh
+++ b/t/t5618-alternate-refs.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='test handling of --alternate-refs traversal'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Avoid test_commit because we want a specific and known set of refs:
diff --git a/t/t6018-rev-list-glob.sh b/t/t6018-rev-list-glob.sh
index e1abc5c2b3..aabf590dda 100755
--- a/t/t6018-rev-list-glob.sh
+++ b/t/t6018-rev-list-glob.sh
@@ -187,6 +187,46 @@ test_expect_success 'rev-parse --exclude=ref with --remotes=glob' '
compare rev-parse "--exclude=upstream/x --remotes=upstream/*" "upstream/one upstream/two"
'
+for section in receive uploadpack
+do
+ test_expect_success "rev-parse --exclude-hidden=$section with --all" '
+ compare "-c transfer.hideRefs=refs/remotes/ rev-parse" "--branches --tags" "--exclude-hidden=$section --all"
+ '
+
+ test_expect_success "rev-parse --exclude-hidden=$section with --all" '
+ compare "-c transfer.hideRefs=refs/heads/subspace/ rev-parse" "--exclude=refs/heads/subspace/* --all" "--exclude-hidden=$section --all"
+ '
+
+ test_expect_success "rev-parse --exclude-hidden=$section with --glob" '
+ compare "-c transfer.hideRefs=refs/heads/subspace/ rev-parse" "--exclude=refs/heads/subspace/* --glob=refs/heads/*" "--exclude-hidden=$section --glob=refs/heads/*"
+ '
+
+ test_expect_success "rev-parse --exclude-hidden=$section can be passed once per pseudo-ref" '
+ compare "-c transfer.hideRefs=refs/remotes/ rev-parse" "--branches --tags --branches --tags" "--exclude-hidden=$section --all --exclude-hidden=$section --all"
+ '
+
+ test_expect_success "rev-parse --exclude-hidden=$section can only be passed once per pseudo-ref" '
+ echo "fatal: --exclude-hidden= passed more than once" >expected &&
+ test_must_fail git rev-parse --exclude-hidden=$section --exclude-hidden=$section 2>err &&
+ test_cmp expected err
+ '
+
+ for pseudoopt in branches tags remotes
+ do
+ test_expect_success "rev-parse --exclude-hidden=$section fails with --$pseudoopt" '
+ echo "error: --exclude-hidden cannot be used together with --$pseudoopt" >expected &&
+ test_must_fail git rev-parse --exclude-hidden=$section --$pseudoopt 2>err &&
+ test_cmp expected err
+ '
+
+ test_expect_success "rev-parse --exclude-hidden=$section fails with --$pseudoopt=pattern" '
+ echo "error: --exclude-hidden cannot be used together with --$pseudoopt" >expected &&
+ test_must_fail git rev-parse --exclude-hidden=$section --$pseudoopt=pattern 2>err &&
+ test_cmp expected err
+ '
+ done
+done
+
test_expect_success 'rev-list --exclude=glob with --branches=glob' '
compare rev-list "--exclude=subspace-* --branches=sub*" "subspace/one subspace/two"
'
diff --git a/t/t6021-rev-list-exclude-hidden.sh b/t/t6021-rev-list-exclude-hidden.sh
new file mode 100755
index 0000000000..32b2b09413
--- /dev/null
+++ b/t/t6021-rev-list-exclude-hidden.sh
@@ -0,0 +1,163 @@
+#!/bin/sh
+
+test_description='git rev-list --exclude-hidden test'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit_bulk --id=commit --ref=refs/heads/branch 1 &&
+ COMMIT=$(git rev-parse refs/heads/branch) &&
+ test_commit_bulk --id=tag --ref=refs/tags/lightweight 1 &&
+ TAG=$(git rev-parse refs/tags/lightweight) &&
+ test_commit_bulk --id=hidden --ref=refs/hidden/commit 1 &&
+ HIDDEN=$(git rev-parse refs/hidden/commit) &&
+ test_commit_bulk --id=namespace --ref=refs/namespaces/namespace/refs/namespaced/commit 1 &&
+ NAMESPACE=$(git rev-parse refs/namespaces/namespace/refs/namespaced/commit)
+'
+
+test_expect_success 'invalid section' '
+ echo "fatal: unsupported section for hidden refs: unsupported" >expected &&
+ test_must_fail git rev-list --exclude-hidden=unsupported 2>err &&
+ test_cmp expected err
+'
+
+for section in receive uploadpack
+do
+ test_expect_success "$section: passed multiple times" '
+ echo "fatal: --exclude-hidden= passed more than once" >expected &&
+ test_must_fail git rev-list --exclude-hidden=$section --exclude-hidden=$section 2>err &&
+ test_cmp expected err
+ '
+
+ test_expect_success "$section: without hiddenRefs" '
+ git rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $HIDDEN
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: hidden via transfer.hideRefs" '
+ git -c transfer.hideRefs=refs/hidden/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: hidden via $section.hideRefs" '
+ git -c $section.hideRefs=refs/hidden/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: respects both transfer.hideRefs and $section.hideRefs" '
+ git -c transfer.hideRefs=refs/tags/ -c $section.hideRefs=refs/hidden/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: negation without hidden refs marks everything as uninteresting" '
+ git rev-list --all --exclude-hidden=$section --not --all >out &&
+ test_must_be_empty out
+ '
+
+ test_expect_success "$section: negation with hidden refs marks them as interesting" '
+ git -c transfer.hideRefs=refs/hidden/ rev-list --all --exclude-hidden=$section --not --all >out &&
+ cat >expected <<-EOF &&
+ $HIDDEN
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: hidden refs and excludes work together" '
+ git -c transfer.hideRefs=refs/hidden/ rev-list --exclude=refs/tags/* --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: excluded hidden refs get reset" '
+ git -c transfer.hideRefs=refs/ rev-list --exclude-hidden=$section --all --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $HIDDEN
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: excluded hidden refs can be used with multiple pseudo-refs" '
+ git -c transfer.hideRefs=refs/ rev-list --exclude-hidden=$section --all --exclude-hidden=$section --all >out &&
+ test_must_be_empty out
+ '
+
+ test_expect_success "$section: works with --glob" '
+ git -c transfer.hideRefs=refs/hidden/ rev-list --exclude-hidden=$section --glob=refs/h* >out &&
+ cat >expected <<-EOF &&
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: operates on stripped refs by default" '
+ GIT_NAMESPACE=namespace git -c transfer.hideRefs=refs/namespaced/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $HIDDEN
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: does not hide namespace by default" '
+ GIT_NAMESPACE=namespace git -c transfer.hideRefs=refs/namespaces/namespace/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $NAMESPACE
+ $HIDDEN
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ test_expect_success "$section: can operate on unstripped refs" '
+ GIT_NAMESPACE=namespace git -c transfer.hideRefs=^refs/namespaces/namespace/ rev-list --exclude-hidden=$section --all >out &&
+ cat >expected <<-EOF &&
+ $HIDDEN
+ $TAG
+ $COMMIT
+ EOF
+ test_cmp expected out
+ '
+
+ for pseudoopt in remotes branches tags
+ do
+ test_expect_success "$section: fails with --$pseudoopt" '
+ test_must_fail git rev-list --exclude-hidden=$section --$pseudoopt 2>err &&
+ test_i18ngrep "error: --exclude-hidden cannot be used together with --$pseudoopt" err
+ '
+
+ test_expect_success "$section: fails with --$pseudoopt=pattern" '
+ test_must_fail git rev-list --exclude-hidden=$section --$pseudoopt=pattern 2>err &&
+ test_i18ngrep "error: --exclude-hidden cannot be used together with --$pseudoopt" err
+ '
+ done
+done
+
+test_done
diff --git a/t/t6030-bisect-porcelain.sh b/t/t6030-bisect-porcelain.sh
index 83931d482f..98a72ff78a 100755
--- a/t/t6030-bisect-porcelain.sh
+++ b/t/t6030-bisect-porcelain.sh
@@ -34,6 +34,36 @@ HASH2=
HASH3=
HASH4=
+test_bisect_usage () {
+ local code="$1" &&
+ shift &&
+ cat >expect &&
+ test_expect_code $code "$@" >out 2>actual &&
+ test_must_be_empty out &&
+ test_cmp expect actual
+}
+
+test_expect_success 'bisect usage' "
+ test_bisect_usage 1 git bisect reset extra1 extra2 <<-\EOF &&
+ error: 'git bisect reset' requires either no argument or a commit
+ EOF
+ test_bisect_usage 1 git bisect terms extra1 extra2 <<-\EOF &&
+ error: 'git bisect terms' requires 0 or 1 argument
+ EOF
+ test_bisect_usage 1 git bisect next extra1 <<-\EOF &&
+ error: 'git bisect next' requires 0 arguments
+ EOF
+ test_bisect_usage 1 git bisect log extra1 <<-\EOF &&
+ error: We are not bisecting.
+ EOF
+ test_bisect_usage 1 git bisect replay <<-\EOF &&
+ error: no logfile given
+ EOF
+ test_bisect_usage 1 git bisect run <<-\EOF
+ error: 'git bisect run' failed: no command provided.
+ EOF
+"
+
test_expect_success 'set up basic repo with 1 file (hello) and 4 commits' '
add_line_into_file "1: Hello World" hello &&
HASH1=$(git rev-parse --verify HEAD) &&
@@ -252,6 +282,124 @@ test_expect_success 'bisect skip: with commit both bad and skipped' '
grep $HASH4 my_bisect_log.txt
'
+test_bisect_run_args () {
+ test_when_finished "rm -f run.sh actual" &&
+ >actual &&
+ cat >expect.args &&
+ cat <&6 >expect.out &&
+ cat <&7 >expect.err &&
+ write_script run.sh <<-\EOF &&
+ while test $# != 0
+ do
+ echo "<$1>" &&
+ shift
+ done >actual.args
+ EOF
+
+ test_when_finished "git bisect reset" &&
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ git bisect run ./run.sh $@ >actual.out.raw 2>actual.err &&
+ # Prune just the log output
+ sed -n \
+ -e '/^Author:/d' \
+ -e '/^Date:/d' \
+ -e '/^$/d' \
+ -e '/^commit /d' \
+ -e '/^ /d' \
+ -e 'p' \
+ <actual.out.raw >actual.out &&
+ test_cmp expect.out actual.out &&
+ test_cmp expect.err actual.err &&
+ test_cmp expect.args actual.args
+}
+
+test_expect_success 'git bisect run: args, stdout and stderr with no arguments' "
+ test_bisect_run_args <<-'EOF_ARGS' 6<<-EOF_OUT 7<<-'EOF_ERR'
+ EOF_ARGS
+ running './run.sh'
+ $HASH4 is the first bad commit
+ bisect found first bad commit
+ EOF_OUT
+ EOF_ERR
+"
+
+test_expect_success 'git bisect run: args, stdout and stderr: "--" argument' "
+ test_bisect_run_args -- <<-'EOF_ARGS' 6<<-EOF_OUT 7<<-'EOF_ERR'
+ <-->
+ EOF_ARGS
+ running './run.sh' '--'
+ $HASH4 is the first bad commit
+ bisect found first bad commit
+ EOF_OUT
+ EOF_ERR
+"
+
+test_expect_success 'git bisect run: args, stdout and stderr: "--log foo --no-log bar" arguments' "
+ test_bisect_run_args --log foo --no-log bar <<-'EOF_ARGS' 6<<-EOF_OUT 7<<-'EOF_ERR'
+ <--log>
+ <foo>
+ <--no-log>
+ <bar>
+ EOF_ARGS
+ running './run.sh' '--log' 'foo' '--no-log' 'bar'
+ $HASH4 is the first bad commit
+ bisect found first bad commit
+ EOF_OUT
+ EOF_ERR
+"
+
+test_expect_success 'git bisect run: args, stdout and stderr: "--bisect-start" argument' "
+ test_bisect_run_args --bisect-start <<-'EOF_ARGS' 6<<-EOF_OUT 7<<-'EOF_ERR'
+ <--bisect-start>
+ EOF_ARGS
+ running './run.sh' '--bisect-start'
+ $HASH4 is the first bad commit
+ bisect found first bad commit
+ EOF_OUT
+ EOF_ERR
+"
+
+test_expect_success 'git bisect run: negative exit code' "
+ write_script fail.sh <<-'EOF' &&
+ exit 255
+ EOF
+ cat <<-'EOF' >expect &&
+ bisect run failed: exit code -1 from './fail.sh' is < 0 or >= 128
+ EOF
+ test_when_finished 'git bisect reset' &&
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ ! git bisect run ./fail.sh 2>err &&
+ sed -En 's/.*(bisect.*code) (-?[0-9]+) (from.*)/\1 -1 \3/p' err >actual &&
+ test_cmp expect actual
+"
+
+test_expect_success 'git bisect run: unable to verify on good' "
+ write_script fail.sh <<-'EOF' &&
+ head=\$(git rev-parse --verify HEAD)
+ good=\$(git rev-parse --verify $HASH1)
+ if test "\$head" = "\$good"
+ then
+ exit 255
+ else
+ exit 127
+ fi
+ EOF
+ cat <<-'EOF' >expect &&
+ unable to verify './fail.sh' on good revision
+ EOF
+ test_when_finished 'git bisect reset' &&
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ ! git bisect run ./fail.sh 2>err &&
+ sed -n 's/.*\(unable to verify.*\)/\1/p' err >actual &&
+ test_cmp expect actual
+"
+
# We want to automatically find the commit that
# added "Another" into hello.
test_expect_success '"git bisect run" simple case' '
@@ -266,6 +414,16 @@ test_expect_success '"git bisect run" simple case' '
git bisect reset
'
+# We want to make sure no arguments has been eaten
+test_expect_success '"git bisect run" simple case' '
+ git bisect start &&
+ git bisect good $HASH1 &&
+ git bisect bad $HASH4 &&
+ git bisect run printf "%s %s\n" reset --bisect-skip >my_bisect_log.txt &&
+ grep -e "reset --bisect-skip" my_bisect_log.txt &&
+ git bisect reset
+'
+
# We want to automatically find the commit that
# added "Ciao" into hello.
test_expect_success '"git bisect run" with more complex "git bisect start"' '
diff --git a/t/t6060-merge-index.sh b/t/t6060-merge-index.sh
index ed449abe55..1a8b64cce1 100755
--- a/t/t6060-merge-index.sh
+++ b/t/t6060-merge-index.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='basic git merge-index / git-merge-one-file tests'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup diverging branches' '
diff --git a/t/t6102-rev-list-unexpected-objects.sh b/t/t6102-rev-list-unexpected-objects.sh
index 4a9a4436e2..9350b5fd2c 100755
--- a/t/t6102-rev-list-unexpected-objects.sh
+++ b/t/t6102-rev-list-unexpected-objects.sh
@@ -121,8 +121,8 @@ test_expect_success 'setup unexpected non-blob tag' '
tag=$(git hash-object -w --literally -t tag broken-tag)
'
-test_expect_success 'TODO (should fail!): traverse unexpected non-blob tag (lone)' '
- git rev-list --objects $tag
+test_expect_success 'traverse unexpected non-blob tag (lone)' '
+ test_must_fail git rev-list --objects $tag
'
test_expect_success 'traverse unexpected non-blob tag (seen)' '
diff --git a/t/t6301-for-each-ref-errors.sh b/t/t6301-for-each-ref-errors.sh
index 40edf9dab5..bfda1f46ad 100755
--- a/t/t6301-for-each-ref-errors.sh
+++ b/t/t6301-for-each-ref-errors.sh
@@ -2,6 +2,7 @@
test_description='for-each-ref errors for broken refs'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
ZEROS=$ZERO_OID
diff --git a/t/t6401-merge-criss-cross.sh b/t/t6401-merge-criss-cross.sh
index 9d5e992878..1962310408 100755
--- a/t/t6401-merge-criss-cross.sh
+++ b/t/t6401-merge-criss-cross.sh
@@ -8,6 +8,8 @@
test_description='Test criss-cross merge'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'prepare repository' '
diff --git a/t/t6406-merge-attr.sh b/t/t6406-merge-attr.sh
index 8650a88c40..5e4e4dd6d9 100755
--- a/t/t6406-merge-attr.sh
+++ b/t/t6406-merge-attr.sh
@@ -8,6 +8,7 @@ test_description='per path merge controlled by merge attribute'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
diff --git a/t/t6407-merge-binary.sh b/t/t6407-merge-binary.sh
index e8a28717ce..0753fc95f4 100755
--- a/t/t6407-merge-binary.sh
+++ b/t/t6407-merge-binary.sh
@@ -5,6 +5,7 @@ test_description='ask merge-recursive to merge binary files'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
diff --git a/t/t6415-merge-dir-to-symlink.sh b/t/t6415-merge-dir-to-symlink.sh
index 2655e295f5..ae00492c76 100755
--- a/t/t6415-merge-dir-to-symlink.sh
+++ b/t/t6415-merge-dir-to-symlink.sh
@@ -4,6 +4,7 @@ test_description='merging when a directory was replaced with a symlink'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'create a commit where dir a/b changed to symlink' '
diff --git a/t/t6435-merge-sparse.sh b/t/t6435-merge-sparse.sh
index fde4aa3cd1..78628fb248 100755
--- a/t/t6435-merge-sparse.sh
+++ b/t/t6435-merge-sparse.sh
@@ -3,6 +3,7 @@
test_description='merge with sparse files'
TEST_CREATE_REPO_NO_TEMPLATE=1
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# test_file $filename $content
diff --git a/t/t7001-mv.sh b/t/t7001-mv.sh
index 8c37bceb33..d72cef8826 100755
--- a/t/t7001-mv.sh
+++ b/t/t7001-mv.sh
@@ -60,8 +60,8 @@ test_expect_success 'checking the commit' '
test_expect_success 'mv --dry-run does not move file' '
git mv -n path0/COPYING MOVED &&
- test -f path0/COPYING &&
- test ! -f MOVED
+ test_path_is_file path0/COPYING &&
+ test_path_is_missing MOVED
'
test_expect_success 'checking -k on non-existing file' '
@@ -71,25 +71,25 @@ test_expect_success 'checking -k on non-existing file' '
test_expect_success 'checking -k on untracked file' '
>untracked1 &&
git mv -k untracked1 path0 &&
- test -f untracked1 &&
- test ! -f path0/untracked1
+ test_path_is_file untracked1 &&
+ test_path_is_missing path0/untracked1
'
test_expect_success 'checking -k on multiple untracked files' '
>untracked2 &&
git mv -k untracked1 untracked2 path0 &&
- test -f untracked1 &&
- test -f untracked2 &&
- test ! -f path0/untracked1 &&
- test ! -f path0/untracked2
+ test_path_is_file untracked1 &&
+ test_path_is_file untracked2 &&
+ test_path_is_missing path0/untracked1 &&
+ test_path_is_missing path0/untracked2
'
test_expect_success 'checking -f on untracked file with existing target' '
>path0/untracked1 &&
test_must_fail git mv -f untracked1 path0 &&
- test ! -f .git/index.lock &&
- test -f untracked1 &&
- test -f path0/untracked1
+ test_path_is_missing .git/index.lock &&
+ test_path_is_file untracked1 &&
+ test_path_is_file path0/untracked1
'
# clean up the mess in case bad things happen
@@ -215,8 +215,8 @@ test_expect_success 'absolute pathname' '
git add sub/file &&
git mv sub "$(pwd)/in" &&
- ! test -d sub &&
- test -d in &&
+ test_path_is_missing sub &&
+ test_path_is_dir in &&
git ls-files --error-unmatch in/file
)
'
@@ -234,8 +234,8 @@ test_expect_success 'absolute pathname outside should fail' '
git add sub/file &&
test_must_fail git mv sub "$out/out" &&
- test -d sub &&
- ! test -d ../in &&
+ test_path_is_dir sub &&
+ test_path_is_missing ../in &&
git ls-files --error-unmatch sub/file
)
'
@@ -295,8 +295,8 @@ test_expect_success 'git mv should overwrite symlink to a file' '
git add moved &&
test_must_fail git mv moved symlink &&
git mv -f moved symlink &&
- ! test -e moved &&
- test -f symlink &&
+ test_path_is_missing moved &&
+ test_path_is_file symlink &&
test "$(cat symlink)" = 1 &&
git update-index --refresh &&
git diff-files --quiet
@@ -312,13 +312,13 @@ test_expect_success 'git mv should overwrite file with a symlink' '
git add moved &&
test_must_fail git mv symlink moved &&
git mv -f symlink moved &&
- ! test -e symlink &&
+ test_path_is_missing symlink &&
git update-index --refresh &&
git diff-files --quiet
'
test_expect_success SYMLINKS 'check moved symlink' '
- test -h moved
+ test_path_is_symlink moved
'
rm -f moved symlink
@@ -352,7 +352,7 @@ test_expect_success 'git mv moves a submodule with a .git directory and no .gitm
) &&
mkdir mod &&
git mv sub mod/sub &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
git update-index --refresh &&
@@ -372,7 +372,7 @@ test_expect_success 'git mv moves a submodule with a .git directory and .gitmodu
) &&
mkdir mod &&
git mv sub mod/sub &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
echo mod/sub >expected &&
@@ -389,7 +389,7 @@ test_expect_success 'git mv moves a submodule with gitfile' '
entry="$(git ls-files --stage sub | cut -f 1)" &&
mkdir mod &&
git -C mod mv ../sub/ . &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
echo mod/sub >expected &&
@@ -408,7 +408,7 @@ test_expect_success 'mv does not complain when no .gitmodules file is found' '
mkdir mod &&
git mv sub mod/sub 2>actual.err &&
test_must_be_empty actual.err &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
git update-index --refresh &&
@@ -423,13 +423,13 @@ test_expect_success 'mv will error out on a modified .gitmodules file unless sta
entry="$(git ls-files --stage sub | cut -f 1)" &&
mkdir mod &&
test_must_fail git mv sub mod/sub 2>actual.err &&
- test -s actual.err &&
- test -e sub &&
+ test_file_not_empty actual.err &&
+ test_path_exists sub &&
git diff-files --quiet -- sub &&
git add .gitmodules &&
git mv sub mod/sub 2>actual.err &&
test_must_be_empty actual.err &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
git update-index --refresh &&
@@ -447,7 +447,7 @@ test_expect_success 'mv issues a warning when section is not found in .gitmodule
mkdir mod &&
git mv sub mod/sub 2>actual.err &&
test_cmp expect.err actual.err &&
- ! test -e sub &&
+ test_path_is_missing sub &&
test "$entry" = "$(git ls-files --stage mod/sub | cut -f 1)" &&
git -C mod/sub status &&
git update-index --refresh &&
@@ -460,7 +460,7 @@ test_expect_success 'mv --dry-run does not touch the submodule or .gitmodules' '
git submodule update &&
mkdir mod &&
git mv -n sub mod/sub 2>actual.err &&
- test -f sub/.git &&
+ test_path_is_file sub/.git &&
git diff-index --exit-code HEAD &&
git update-index --refresh &&
git diff-files --quiet -- sub .gitmodules
@@ -474,10 +474,10 @@ test_expect_success 'checking out a commit before submodule moved needs manual u
git status -s sub2 >actual &&
echo "?? sub2/" >expected &&
test_cmp expected actual &&
- ! test -f sub/.git &&
- test -f sub2/.git &&
+ test_path_is_missing sub/.git &&
+ test_path_is_file sub2/.git &&
git submodule update &&
- test -f sub/.git &&
+ test_path_is_file sub/.git &&
rm -rf sub2 &&
git diff-index --exit-code HEAD &&
git update-index --refresh &&
diff --git a/t/t7065-wtstatus-slow.sh b/t/t7065-wtstatus-slow.sh
new file mode 100755
index 0000000000..8d08a962f8
--- /dev/null
+++ b/t/t7065-wtstatus-slow.sh
@@ -0,0 +1,70 @@
+#!/bin/sh
+
+test_description='test status when slow untracked files'
+
+. ./test-lib.sh
+
+GIT_TEST_UF_DELAY_WARNING=1
+export GIT_TEST_UF_DELAY_WARNING
+
+test_expect_success setup '
+ git checkout -b test &&
+ cat >.gitignore <<-\EOF &&
+ /actual
+ /expected
+ /out
+ EOF
+ git add .gitignore &&
+ git commit -m "Add .gitignore"
+'
+
+test_expect_success 'when core.untrackedCache and fsmonitor are unset' '
+ test_might_fail git config --unset-all core.untrackedCache &&
+ test_might_fail git config --unset-all core.fsmonitor &&
+ git status >out &&
+ sed "s/[0-9]\.[0-9][0-9]/X/g" out >actual &&
+ cat >expected <<-\EOF &&
+ On branch test
+
+ It took X seconds to enumerate untracked files.
+ See '"'"'git help status'"'"' for information on how to improve this.
+
+ nothing to commit, working tree clean
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'when core.untrackedCache true, but not fsmonitor' '
+ test_config core.untrackedCache true &&
+ test_might_fail git config --unset-all core.fsmonitor &&
+ git status >out &&
+ sed "s/[0-9]\.[0-9][0-9]/X/g" out >actual &&
+ cat >expected <<-\EOF &&
+ On branch test
+
+ It took X seconds to enumerate untracked files.
+ See '"'"'git help status'"'"' for information on how to improve this.
+
+ nothing to commit, working tree clean
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'when core.untrackedCache true, and fsmonitor' '
+ test_config core.untrackedCache true &&
+ test_config core.fsmonitor true &&
+ git status >out &&
+ sed "s/[0-9]\.[0-9][0-9]/X/g" out >actual &&
+ cat >expected <<-\EOF &&
+ On branch test
+
+ It took X seconds to enumerate untracked files,
+ but the results were cached, and subsequent runs may be faster.
+ See '"'"'git help status'"'"' for information on how to improve this.
+
+ nothing to commit, working tree clean
+ EOF
+ test_cmp expected actual
+'
+
+test_done
diff --git a/t/t7103-reset-bare.sh b/t/t7103-reset-bare.sh
index a60153f9f3..18bbd9975e 100755
--- a/t/t7103-reset-bare.sh
+++ b/t/t7103-reset-bare.sh
@@ -63,7 +63,7 @@ test_expect_success '"mixed" reset is not allowed in bare' '
test_must_fail git reset --mixed HEAD^
'
-test_expect_success !SANITIZE_LEAK '"soft" reset is allowed in bare' '
+test_expect_success '"soft" reset is allowed in bare' '
git reset --soft HEAD^ &&
git show --pretty=format:%s >out &&
echo one >expect &&
diff --git a/t/t7400-submodule-basic.sh b/t/t7400-submodule-basic.sh
index a989aafaf5..eae6a46ef3 100755
--- a/t/t7400-submodule-basic.sh
+++ b/t/t7400-submodule-basic.sh
@@ -579,6 +579,16 @@ test_expect_success 'status should be "modified" after submodule commit' '
grep "^+$rev2" list
'
+test_expect_success '"submodule --cached" command forms should be identical' '
+ git submodule status --cached >expect &&
+
+ git submodule --cached >actual &&
+ test_cmp expect actual &&
+
+ git submodule --cached status >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'the --cached sha1 should be rev1' '
git submodule --cached status >list &&
grep "^+$rev1" list
diff --git a/t/t7406-submodule-update.sh b/t/t7406-submodule-update.sh
index f094e3d7f3..b749d35f78 100755
--- a/t/t7406-submodule-update.sh
+++ b/t/t7406-submodule-update.sh
@@ -1179,4 +1179,160 @@ test_expect_success 'submodule update --recursive skip submodules with strategy=
test_cmp expect.err actual.err
'
+test_expect_success 'setup superproject with submodule.propagateBranches' '
+ git init sub1 &&
+ test_commit -C sub1 "sub1" &&
+ git init branch-super &&
+ git -C branch-super submodule add ../sub1 sub1 &&
+ git -C branch-super commit -m "super" &&
+
+ # Clone into a clean repo that we can cp around
+ git clone --recurse-submodules \
+ -c submodule.propagateBranches=true \
+ branch-super branch-super-clean &&
+ git -C branch-super-clean config submodule.propagateBranches true &&
+
+ # sub2 will not be in the clone. We will fetch the containing
+ # superproject commit and clone sub2 with "git submodule update".
+ git init sub2 &&
+ test_commit -C sub2 "sub2" &&
+ git -C branch-super submodule add ../sub2 sub2 &&
+ git -C branch-super commit -m "add sub2"
+'
+
+test_clean_submodule ()
+{
+ local negate super_dir sub_dir expect_oid actual_oid &&
+ if test "$1" = "!"
+ then
+ negate=t
+ shift
+ fi
+ super_dir="$1" &&
+ sub_dir="$2" &&
+ expect_oid="$(git -C "$super_dir" rev-parse ":$sub_dir")" &&
+ actual_oid="$(git -C "$super_dir/$sub_dir" rev-parse HEAD)" &&
+ if test -n "$negate"
+ then
+ ! test "$expect_oid" = "$actual_oid"
+ else
+ test "$expect_oid" = "$actual_oid"
+ fi
+}
+
+# Test the behavior of a newly cloned submodule
+test_expect_success 'branches - newly-cloned submodule, detached HEAD' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned fetch origin main &&
+ git -C branch-super-cloned checkout FETCH_HEAD &&
+ git -C branch-super-cloned/sub1 checkout --detach &&
+ git -C branch-super-cloned submodule update &&
+
+ # sub1 and sub2 should be in detached HEAD
+ git -C branch-super-cloned/sub1 rev-parse --verify HEAD &&
+ test_must_fail git -C branch-super-cloned/sub1 symbolic-ref HEAD &&
+ test_clean_submodule branch-super-cloned sub1 &&
+ git -C branch-super-cloned/sub2 rev-parse --verify HEAD &&
+ test_must_fail git -C branch-super-cloned/sub2 symbolic-ref HEAD &&
+ test_clean_submodule branch-super-cloned sub2
+'
+
+test_expect_success 'branches - newly-cloned submodule, branch checked out' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned fetch origin main &&
+ git -C branch-super-cloned checkout FETCH_HEAD &&
+ git -C branch-super-cloned branch new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ git -C branch-super-cloned/sub1 branch new-branch &&
+ git -C branch-super-cloned submodule update &&
+
+ # Ignore sub1, we will test it later.
+ # sub2 should check out the branch
+ HEAD_BRANCH2=$(git -C branch-super-cloned/sub2 symbolic-ref HEAD) &&
+ test $HEAD_BRANCH2 = "refs/heads/new-branch" &&
+ test_clean_submodule branch-super-cloned sub2
+'
+
+# Test the behavior of an already-cloned submodule.
+# NEEDSWORK When updating with branches, we always use the branch instead of the
+# gitlink's OID. This results in some imperfect behavior:
+#
+# - If the gitlink's OID disagrees with the branch OID, updating with branches
+# may result in a dirty worktree
+# - If the branch does not exist, the update fails.
+#
+# We will reevaluate when "git checkout --recurse-submodules" supports branches
+# For now, just test for this imperfect behavior.
+test_expect_success 'branches - correct branch checked out, OIDs agree' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned branch --recurse-submodules new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ git -C branch-super-cloned/sub1 checkout new-branch &&
+ git -C branch-super-cloned submodule update &&
+
+ HEAD_BRANCH1=$(git -C branch-super-cloned/sub1 symbolic-ref HEAD) &&
+ test $HEAD_BRANCH1 = "refs/heads/new-branch" &&
+ test_clean_submodule branch-super-cloned sub1
+'
+
+test_expect_success 'branches - correct branch checked out, OIDs disagree' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned branch --recurse-submodules new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ git -C branch-super-cloned/sub1 checkout new-branch &&
+ test_commit -C branch-super-cloned/sub1 new-commit &&
+ git -C branch-super-cloned submodule update &&
+
+ HEAD_BRANCH1=$(git -C branch-super-cloned/sub1 symbolic-ref HEAD) &&
+ test $HEAD_BRANCH1 = "refs/heads/new-branch" &&
+ test_clean_submodule ! branch-super-cloned sub1
+'
+
+test_expect_success 'branches - other branch checked out, correct branch exists, OIDs agree' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned branch --recurse-submodules new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ git -C branch-super-cloned/sub1 checkout main &&
+ git -C branch-super-cloned submodule update &&
+
+ HEAD_BRANCH1=$(git -C branch-super-cloned/sub1 symbolic-ref HEAD) &&
+ test $HEAD_BRANCH1 = "refs/heads/new-branch" &&
+ test_clean_submodule branch-super-cloned sub1
+'
+
+test_expect_success 'branches - other branch checked out, correct branch exists, OIDs disagree' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned branch --recurse-submodules new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ git -C branch-super-cloned/sub1 checkout new-branch &&
+ test_commit -C branch-super-cloned/sub1 new-commit &&
+ git -C branch-super-cloned/sub1 checkout main &&
+ git -C branch-super-cloned submodule update &&
+
+ HEAD_BRANCH1=$(git -C branch-super-cloned/sub1 symbolic-ref HEAD) &&
+ test $HEAD_BRANCH1 = "refs/heads/new-branch" &&
+ test_clean_submodule ! branch-super-cloned sub1
+'
+
+test_expect_success 'branches - other branch checked out, correct branch does not exist' '
+ test_when_finished "rm -fr branch-super-cloned" &&
+ cp -r branch-super-clean branch-super-cloned &&
+
+ git -C branch-super-cloned branch new-branch &&
+ git -C branch-super-cloned checkout new-branch &&
+ test_must_fail git -C branch-super-cloned submodule update
+'
+
test_done
diff --git a/t/t7407-submodule-foreach.sh b/t/t7407-submodule-foreach.sh
index 59bd150166..8d7b234beb 100755
--- a/t/t7407-submodule-foreach.sh
+++ b/t/t7407-submodule-foreach.sh
@@ -154,6 +154,11 @@ test_expect_success 'use "submodule foreach" to checkout 2nd level submodule' '
)
'
+test_expect_success 'usage: foreach -- --not-an-option' '
+ test_expect_code 1 git submodule foreach -- --not-an-option &&
+ test_expect_code 1 git -C clone2 submodule foreach -- --not-an-option
+'
+
test_expect_success 'use "foreach --recursive" to checkout all submodules' '
(
cd clone2 &&
diff --git a/t/t7411-submodule-config.sh b/t/t7411-submodule-config.sh
index c583c4e373..c0167944ab 100755
--- a/t/t7411-submodule-config.sh
+++ b/t/t7411-submodule-config.sh
@@ -137,44 +137,44 @@ test_expect_success 'error in history in fetchrecursesubmodule lets continue' '
)
'
-test_expect_success 'reading submodules config from the working tree with "submodule--helper config"' '
+test_expect_success 'reading submodules config from the working tree' '
(cd super &&
echo "../submodule" >expect &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
)
'
-test_expect_success 'unsetting submodules config from the working tree with "submodule--helper config --unset"' '
+test_expect_success 'unsetting submodules config from the working tree' '
(cd super &&
- git submodule--helper config --unset submodule.submodule.url &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-unset submodule.submodule.url &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_must_be_empty actual
)
'
-test_expect_success 'writing submodules config with "submodule--helper config"' '
+test_expect_success 'writing submodules config' '
(cd super &&
echo "new_url" >expect &&
- git submodule--helper config submodule.submodule.url "new_url" &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-set submodule.submodule.url "new_url" &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
)
'
-test_expect_success 'overwriting unstaged submodules config with "submodule--helper config"' '
+test_expect_success 'overwriting unstaged submodules config' '
test_when_finished "git -C super checkout .gitmodules" &&
(cd super &&
echo "newer_url" >expect &&
- git submodule--helper config submodule.submodule.url "newer_url" &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-set submodule.submodule.url "newer_url" &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
)
'
test_expect_success 'writeable .gitmodules when it is in the working tree' '
- git -C super submodule--helper config --check-writeable
+ test-tool -C super submodule config-writeable
'
test_expect_success 'writeable .gitmodules when it is nowhere in the repository' '
@@ -183,7 +183,7 @@ test_expect_success 'writeable .gitmodules when it is nowhere in the repository'
(cd super &&
git rm .gitmodules &&
git commit -m "remove .gitmodules from the current branch" &&
- git submodule--helper config --check-writeable
+ test-tool submodule config-writeable
)
'
@@ -191,7 +191,7 @@ test_expect_success 'non-writeable .gitmodules when it is in the index but not i
test_when_finished "git -C super checkout .gitmodules" &&
(cd super &&
rm -f .gitmodules &&
- test_must_fail git submodule--helper config --check-writeable
+ test_must_fail test-tool submodule config-writeable
)
'
@@ -200,7 +200,7 @@ test_expect_success 'non-writeable .gitmodules when it is in the current branch
test_when_finished "git -C super reset --hard $ORIG" &&
(cd super &&
git rm .gitmodules &&
- test_must_fail git submodule--helper config --check-writeable
+ test_must_fail test-tool submodule config-writeable
)
'
@@ -208,11 +208,11 @@ test_expect_success 'reading submodules config from the index when .gitmodules i
ORIG=$(git -C super rev-parse HEAD) &&
test_when_finished "git -C super reset --hard $ORIG" &&
(cd super &&
- git submodule--helper config submodule.submodule.url "staged_url" &&
+ test-tool submodule config-set submodule.submodule.url "staged_url" &&
git add .gitmodules &&
rm -f .gitmodules &&
echo "staged_url" >expect &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
)
'
@@ -223,7 +223,7 @@ test_expect_success 'reading submodules config from the current branch when .git
(cd super &&
git rm .gitmodules &&
echo "../submodule" >expect &&
- git submodule--helper config submodule.submodule.url >actual &&
+ test-tool submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
)
'
diff --git a/t/t7412-submodule-absorbgitdirs.sh b/t/t7412-submodule-absorbgitdirs.sh
index 2859695c6d..a5cd6db7ac 100755
--- a/t/t7412-submodule-absorbgitdirs.sh
+++ b/t/t7412-submodule-absorbgitdirs.sh
@@ -18,13 +18,19 @@ test_expect_success 'setup a real submodule' '
'
test_expect_success 'absorb the git dir' '
+ >expect &&
+ >actual &&
>expect.1 &&
>expect.2 &&
>actual.1 &&
>actual.2 &&
git status >expect.1 &&
git -C sub1 rev-parse HEAD >expect.2 &&
- git submodule absorbgitdirs &&
+ cat >expect <<-\EOF &&
+ Migrating git directory of '\''sub1'\'' from '\''sub1/.git'\'' to '\''.git/modules/sub1'\''
+ EOF
+ git submodule absorbgitdirs 2>actual &&
+ test_cmp expect actual &&
git fsck &&
test -f sub1/.git &&
test -d .git/modules/sub1 &&
@@ -37,7 +43,8 @@ test_expect_success 'absorb the git dir' '
test_expect_success 'absorbing does not fail for deinitialized submodules' '
test_when_finished "git submodule update --init" &&
git submodule deinit --all &&
- git submodule absorbgitdirs &&
+ git submodule absorbgitdirs 2>err &&
+ test_must_be_empty err &&
test -d .git/modules/sub1 &&
test -d sub1 &&
! test -e sub1/.git
@@ -56,7 +63,11 @@ test_expect_success 'setup nested submodule' '
test_expect_success 'absorb the git dir in a nested submodule' '
git status >expect.1 &&
git -C sub1/nested rev-parse HEAD >expect.2 &&
- git submodule absorbgitdirs &&
+ cat >expect <<-\EOF &&
+ Migrating git directory of '\''sub1/nested'\'' from '\''sub1/nested/.git'\'' to '\''.git/modules/sub1/modules/nested'\''
+ EOF
+ git submodule absorbgitdirs 2>actual &&
+ test_cmp expect actual &&
test -f sub1/nested/.git &&
test -d .git/modules/sub1/modules/nested &&
git status >actual.1 &&
@@ -87,7 +98,11 @@ test_expect_success 're-setup nested submodule' '
test_expect_success 'absorb the git dir in a nested submodule' '
git status >expect.1 &&
git -C sub1/nested rev-parse HEAD >expect.2 &&
- git submodule absorbgitdirs &&
+ cat >expect <<-\EOF &&
+ Migrating git directory of '\''sub1'\'' from '\''sub1/.git'\'' to '\''.git/modules/sub1'\''
+ EOF
+ git submodule absorbgitdirs 2>actual &&
+ test_cmp expect actual &&
test -f sub1/.git &&
test -f sub1/nested/.git &&
test -d .git/modules/sub1/modules/nested &&
@@ -107,7 +122,11 @@ test_expect_success 'setup a gitlink with missing .gitmodules entry' '
test_expect_success 'absorbing the git dir fails for incomplete submodules' '
git status >expect.1 &&
git -C sub2 rev-parse HEAD >expect.2 &&
- test_must_fail git submodule absorbgitdirs &&
+ cat >expect <<-\EOF &&
+ fatal: could not lookup name for submodule '\''sub2'\''
+ EOF
+ test_must_fail git submodule absorbgitdirs 2>actual &&
+ test_cmp expect actual &&
git -C sub2 fsck &&
test -d sub2/.git &&
git status >actual &&
@@ -127,8 +146,11 @@ test_expect_success 'setup a submodule with multiple worktrees' '
'
test_expect_success 'absorbing fails for a submodule with multiple worktrees' '
- test_must_fail git submodule absorbgitdirs sub3 2>error &&
- test_i18ngrep "not supported" error
+ cat >expect <<-\EOF &&
+ fatal: could not lookup name for submodule '\''sub2'\''
+ EOF
+ test_must_fail git submodule absorbgitdirs 2>actual &&
+ test_cmp expect actual
'
test_done
diff --git a/t/t7418-submodule-sparse-gitmodules.sh b/t/t7418-submodule-sparse-gitmodules.sh
index d5874200fd..dde11ecce8 100755
--- a/t/t7418-submodule-sparse-gitmodules.sh
+++ b/t/t7418-submodule-sparse-gitmodules.sh
@@ -50,12 +50,12 @@ test_expect_success 'sparse checkout setup which hides .gitmodules' '
test_expect_success 'reading gitmodules config file when it is not checked out' '
echo "../submodule" >expect &&
- git -C super submodule--helper config submodule.submodule.url >actual &&
+ test-tool -C super submodule config-list submodule.submodule.url >actual &&
test_cmp expect actual
'
test_expect_success 'not writing gitmodules config file when it is not checked out' '
- test_must_fail git -C super submodule--helper config submodule.submodule.url newurl &&
+ test_must_fail test-tool -C super submodule config-set submodule.submodule.url newurl &&
test_path_is_missing super/.gitmodules
'
diff --git a/t/t7422-submodule-output.sh b/t/t7422-submodule-output.sh
new file mode 100755
index 0000000000..ab946ec940
--- /dev/null
+++ b/t/t7422-submodule-output.sh
@@ -0,0 +1,170 @@
+#!/bin/sh
+
+test_description='submodule --cached, --quiet etc. output'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-t3100.sh
+
+setup_sub () {
+ local d="$1" &&
+ shift &&
+ git $@ clone . "$d" &&
+ git $@ submodule add ./"$d"
+}
+
+normalize_status () {
+ sed -e 's/-g[0-9a-f]*/-gHASH/'
+}
+
+test_expect_success 'setup' '
+ test_commit A &&
+ test_commit B &&
+ setup_sub S &&
+ setup_sub S.D &&
+ setup_sub S.C &&
+ setup_sub S.C.D &&
+ setup_sub X &&
+ git add S* &&
+ test_commit C &&
+
+ # recursive in X/
+ git -C X pull &&
+ GIT_ALLOW_PROTOCOL=file git -C X submodule update --init &&
+
+ # dirty
+ for d in S.D X/S.D
+ do
+ echo dirty >"$d"/A.t || return 1
+ done &&
+
+ # commit (for --cached)
+ for d in S.C* X/S.C*
+ do
+ git -C "$d" reset --hard A || return 1
+ done &&
+
+ # dirty
+ for d in S*.D X/S*.D
+ do
+ echo dirty >"$d/C2.t" || return 1
+ done &&
+
+ for ref in A B C
+ do
+ # Not different with SHA-1 and SHA-256, just (ab)using
+ # test_oid_cache as a variable bag to avoid using
+ # $(git rev-parse ...).
+ oid=$(git rev-parse $ref) &&
+ test_oid_cache <<-EOF || return 1
+ $ref sha1:$oid
+ $ref sha256:$oid
+ EOF
+ done
+'
+
+for opts in "" "status"
+do
+ test_expect_success "git submodule $opts" '
+ sed -e "s/^>//" >expect <<-EOF &&
+ > $(test_oid B) S (B)
+ >+$(test_oid A) S.C (A)
+ >+$(test_oid A) S.C.D (A)
+ > $(test_oid B) S.D (B)
+ >+$(test_oid C) X (C)
+ EOF
+ git submodule $opts >actual.raw &&
+ normalize_status <actual.raw >actual &&
+ test_cmp expect actual
+ '
+done
+
+for opts in \
+ "status --recursive"
+do
+ test_expect_success "git submodule $opts" '
+ sed -e "s/^>//" >expect <<-EOF &&
+ > $(test_oid B) S (B)
+ >+$(test_oid A) S.C (A)
+ >+$(test_oid A) S.C.D (A)
+ > $(test_oid B) S.D (B)
+ >+$(test_oid C) X (C)
+ > $(test_oid B) X/S (B)
+ >+$(test_oid A) X/S.C (A)
+ >+$(test_oid A) X/S.C.D (A)
+ > $(test_oid B) X/S.D (B)
+ > $(test_oid B) X/X (B)
+ EOF
+ git submodule $opts >actual.raw &&
+ normalize_status <actual.raw >actual &&
+ test_cmp expect actual
+ '
+done
+
+for opts in \
+ "--quiet" \
+ "--quiet status" \
+ "status --quiet"
+do
+ test_expect_success "git submodule $opts" '
+ git submodule $opts >out &&
+ test_must_be_empty out
+ '
+done
+
+for opts in \
+ "--cached" \
+ "--cached status" \
+ "status --cached"
+do
+ test_expect_success "git submodule $opts" '
+ sed -e "s/^>//" >expect <<-EOF &&
+ > $(test_oid B) S (B)
+ >+$(test_oid B) S.C (B)
+ >+$(test_oid B) S.C.D (B)
+ > $(test_oid B) S.D (B)
+ >+$(test_oid B) X (B)
+ EOF
+ git submodule $opts >actual.raw &&
+ normalize_status <actual.raw >actual &&
+ test_cmp expect actual
+ '
+done
+
+for opts in \
+ "--cached --quiet" \
+ "--cached --quiet status" \
+ "--cached status --quiet" \
+ "--quiet status --cached" \
+ "status --cached --quiet"
+do
+ test_expect_success "git submodule $opts" '
+ git submodule $opts >out &&
+ test_must_be_empty out
+ '
+done
+
+for opts in \
+ "status --cached --recursive" \
+ "--cached status --recursive"
+do
+ test_expect_success "git submodule $opts" '
+ sed -e "s/^>//" >expect <<-EOF &&
+ > $(test_oid B) S (B)
+ >+$(test_oid B) S.C (B)
+ >+$(test_oid B) S.C.D (B)
+ > $(test_oid B) S.D (B)
+ >+$(test_oid B) X (B)
+ > $(test_oid B) X/S (B)
+ >+$(test_oid B) X/S.C (B)
+ >+$(test_oid B) X/S.C.D (B)
+ > $(test_oid B) X/S.D (B)
+ > $(test_oid B) X/X (B)
+ EOF
+ git submodule $opts >actual.raw &&
+ normalize_status <actual.raw >actual &&
+ test_cmp expect actual
+ '
+done
+
+test_done
diff --git a/t/t7504-commit-msg-hook.sh b/t/t7504-commit-msg-hook.sh
index a39de8c112..07ca46fb0d 100755
--- a/t/t7504-commit-msg-hook.sh
+++ b/t/t7504-commit-msg-hook.sh
@@ -5,6 +5,7 @@ test_description='commit-msg hook'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'with no hook' '
diff --git a/t/t7506-status-submodule.sh b/t/t7506-status-submodule.sh
index d050091345..52a82b703f 100755
--- a/t/t7506-status-submodule.sh
+++ b/t/t7506-status-submodule.sh
@@ -412,4 +412,23 @@ test_expect_success 'status with added file in nested submodule (short)' '
EOF
'
+test_expect_success 'status in superproject with submodules respects parallel settings' '
+ test_when_finished "rm -f trace.out" &&
+ (
+ GIT_TRACE=$(pwd)/trace.out git status &&
+ grep "1 tasks" trace.out &&
+ >trace.out &&
+
+ git config submodule.diffJobs 8 &&
+ GIT_TRACE=$(pwd)/trace.out git status &&
+ grep "8 tasks" trace.out &&
+ >trace.out &&
+
+ GIT_TRACE=$(pwd)/trace.out git -c submodule.diffJobs=0 status &&
+ grep "preparing to run up to [0-9]* tasks" trace.out &&
+ ! grep "up to 0 tasks" trace.out &&
+ >trace.out
+ )
+'
+
test_done
diff --git a/t/t7517-per-repo-email.sh b/t/t7517-per-repo-email.sh
index 163ae80468..efc6496e2b 100755
--- a/t/t7517-per-repo-email.sh
+++ b/t/t7517-per-repo-email.sh
@@ -9,6 +9,7 @@ test_description='per-repo forced setting of email address'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup a likely user.useConfigOnly use case' '
diff --git a/t/t7520-ignored-hook-warning.sh b/t/t7520-ignored-hook-warning.sh
index dc57526e6f..184b258989 100755
--- a/t/t7520-ignored-hook-warning.sh
+++ b/t/t7520-ignored-hook-warning.sh
@@ -2,6 +2,7 @@
test_description='ignored hook warning'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
diff --git a/t/t7605-merge-resolve.sh b/t/t7605-merge-resolve.sh
index 5d56c38546..62d935d31c 100755
--- a/t/t7605-merge-resolve.sh
+++ b/t/t7605-merge-resolve.sh
@@ -4,6 +4,7 @@ test_description='git merge
Testing the resolve strategy.'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t7609-mergetool--lib.sh b/t/t7609-mergetool--lib.sh
index 8b1c3bd39f..2090d12a48 100755
--- a/t/t7609-mergetool--lib.sh
+++ b/t/t7609-mergetool--lib.sh
@@ -8,7 +8,7 @@ TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'mergetool --tool=vimdiff creates the expected layout' '
- . "$GIT_BUILD_DIR"/mergetools/vimdiff &&
+ . "$GIT_SOURCE_DIR"/mergetools/vimdiff &&
run_unit_tests
'
diff --git a/t/t7610-mergetool.sh b/t/t7610-mergetool.sh
index 8cc64729ad..7b957022f1 100755
--- a/t/t7610-mergetool.sh
+++ b/t/t7610-mergetool.sh
@@ -33,7 +33,7 @@ test_expect_success 'setup' '
git add foo &&
git commit -m "Add foo"
) &&
- git submodule add git://example.com/submod submod &&
+ git submodule add file:///dev/null submod &&
git add file1 "spaced name" file1[1-4] subdir/file3 .gitmodules submod &&
git commit -m "add initial versions" &&
@@ -614,7 +614,7 @@ test_expect_success 'submodule in subdirectory' '
)
) &&
test_when_finished "rm -rf subdir/subdir_module" &&
- git submodule add git://example.com/subsubmodule subdir/subdir_module &&
+ git submodule add file:///dev/null subdir/subdir_module &&
git add subdir/subdir_module &&
git commit -m "add submodule in subdirectory" &&
diff --git a/t/t7614-merge-signoff.sh b/t/t7614-merge-signoff.sh
index fee258d4f0..cf96a35e8e 100755
--- a/t/t7614-merge-signoff.sh
+++ b/t/t7614-merge-signoff.sh
@@ -8,6 +8,7 @@ This test runs git merge --signoff and makes sure that it works.
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Setup test files
diff --git a/t/t7700-repack.sh b/t/t7700-repack.sh
index 5be483bf88..c630e0d52d 100755
--- a/t/t7700-repack.sh
+++ b/t/t7700-repack.sh
@@ -543,4 +543,125 @@ test_expect_success '-n overrides repack.updateServerInfo=true' '
test_server_info_missing
'
+test_expect_success '--expire-to stores pruned objects (now)' '
+ git init expire-to-now &&
+ (
+ cd expire-to-now &&
+
+ git branch -M main &&
+
+ test_commit base &&
+
+ git checkout -b cruft &&
+ test_commit --no-tag cruft &&
+
+ git rev-list --objects --no-object-names main..cruft >moved.raw &&
+ sort moved.raw >moved.want &&
+
+ git rev-list --all --objects --no-object-names >expect.raw &&
+ sort expect.raw >expect &&
+
+ git checkout main &&
+ git branch -D cruft &&
+ git reflog expire --all --expire=all &&
+
+ git init --bare expired.git &&
+ git repack -d \
+ --cruft --cruft-expiration="now" \
+ --expire-to="expired.git/objects/pack/pack" &&
+
+ expired="$(ls expired.git/objects/pack/pack-*.idx)" &&
+ test_path_is_file "${expired%.idx}.mtimes" &&
+
+ # Since the `--cruft-expiration` is "now", the effective
+ # behavior is to move _all_ unreachable objects out to
+ # the location in `--expire-to`.
+ git show-index <$expired >expired.raw &&
+ cut -d" " -f2 expired.raw | sort >expired.objects &&
+ git rev-list --all --objects --no-object-names \
+ >remaining.objects &&
+
+ # ...in other words, the combined contents of this
+ # repository and expired.git should be the same as the
+ # set of objects we started with.
+ cat expired.objects remaining.objects | sort >actual &&
+ test_cmp expect actual &&
+
+ # The "moved" objects (i.e., those in expired.git)
+ # should be the same as the cruft objects which were
+ # expired in the previous step.
+ test_cmp moved.want expired.objects
+ )
+'
+
+test_expect_success '--expire-to stores pruned objects (5.minutes.ago)' '
+ git init expire-to-5.minutes.ago &&
+ (
+ cd expire-to-5.minutes.ago &&
+
+ git branch -M main &&
+
+ test_commit base &&
+
+ # Create two classes of unreachable objects, one which
+ # is older than 5 minutes (stale), and another which is
+ # newer (recent).
+ for kind in stale recent
+ do
+ git checkout -b $kind main &&
+ test_commit --no-tag $kind || return 1
+ done &&
+
+ git rev-list --objects --no-object-names main..stale >in &&
+ stale="$(git pack-objects $objdir/pack/pack <in)" &&
+ mtime="$(test-tool chmtime --get =-600 $objdir/pack/pack-$stale.pack)" &&
+
+ # expect holds the set of objects we expect to find in
+ # this repository after repacking
+ git rev-list --objects --no-object-names recent >expect.raw &&
+ sort expect.raw >expect &&
+
+ # moved.want holds the set of objects we expect to find
+ # in expired.git
+ git rev-list --objects --no-object-names main..stale >out &&
+ sort out >moved.want &&
+
+ git checkout main &&
+ git branch -D stale recent &&
+ git reflog expire --all --expire=all &&
+ git prune-packed &&
+
+ git init --bare expired.git &&
+ git repack -d \
+ --cruft --cruft-expiration=5.minutes.ago \
+ --expire-to="expired.git/objects/pack/pack" &&
+
+ # Some of the remaining objects in this repository are
+ # unreachable, so use `cat-file --batch-all-objects`
+ # instead of `rev-list` to get their names
+ git cat-file --batch-all-objects --batch-check="%(objectname)" \
+ >remaining.objects &&
+ sort remaining.objects >actual &&
+ test_cmp expect actual &&
+
+ (
+ cd expired.git &&
+
+ expired="$(ls objects/pack/pack-*.mtimes)" &&
+ test-tool pack-mtimes $(basename $expired) >out &&
+ cut -d" " -f1 out | sort >../moved.got &&
+
+ # Ensure that there are as many objects with the
+ # expected mtime as were moved to expired.git.
+ #
+ # In other words, ensure that the recorded
+ # mtimes of any moved objects was written
+ # correctly.
+ grep " $mtime$" out >matching &&
+ test_line_count = $(wc -l <../moved.want) matching
+ ) &&
+ test_cmp moved.want moved.got
+ )
+'
+
test_done
diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh
index 96bdd42045..823331e44a 100755
--- a/t/t7900-maintenance.sh
+++ b/t/t7900-maintenance.sh
@@ -500,9 +500,28 @@ test_expect_success 'register and unregister' '
git config --global --get-all maintenance.repo >actual &&
test_cmp before actual &&
+ git config --file ./other --add maintenance.repo /existing1 &&
+ git config --file ./other --add maintenance.repo /existing2 &&
+ git config --file ./other --get-all maintenance.repo >before &&
+
+ git maintenance register --config-file ./other &&
+ test_cmp_config false maintenance.auto &&
+ git config --file ./other --get-all maintenance.repo >between &&
+ cp before expect &&
+ pwd >>expect &&
+ test_cmp expect between &&
+
+ git maintenance unregister --config-file ./other &&
+ git config --file ./other --get-all maintenance.repo >actual &&
+ test_cmp before actual &&
+
test_must_fail git maintenance unregister 2>err &&
grep "is not registered" err &&
- git maintenance unregister --force
+ git maintenance unregister --force &&
+
+ test_must_fail git maintenance unregister --config-file ./other 2>err &&
+ grep "is not registered" err &&
+ git maintenance unregister --config-file ./other --force
'
test_expect_success !MINGW 'register and unregister with regex metacharacters' '
diff --git a/t/t9003-help-autocorrect.sh b/t/t9003-help-autocorrect.sh
index f00deaf381..4b9cb4c942 100755
--- a/t/t9003-help-autocorrect.sh
+++ b/t/t9003-help-autocorrect.sh
@@ -1,6 +1,8 @@
#!/bin/sh
test_description='help.autocorrect finding a match'
+
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success 'setup' '
diff --git a/t/t9115-git-svn-dcommit-funky-renames.sh b/t/t9115-git-svn-dcommit-funky-renames.sh
index 419f055721..743fbe1fe4 100755
--- a/t/t9115-git-svn-dcommit-funky-renames.sh
+++ b/t/t9115-git-svn-dcommit-funky-renames.sh
@@ -5,7 +5,6 @@
test_description='git svn dcommit can commit renames of files with ugly names'
-TEST_FAILS_SANITIZE_LEAK=true
. ./lib-git-svn.sh
test_expect_success 'load repository with strange names' '
diff --git a/t/t9146-git-svn-empty-dirs.sh b/t/t9146-git-svn-empty-dirs.sh
index 79c26ed69c..09606f1b3c 100755
--- a/t/t9146-git-svn-empty-dirs.sh
+++ b/t/t9146-git-svn-empty-dirs.sh
@@ -4,7 +4,6 @@
test_description='git svn creates empty directories'
-TEST_FAILS_SANITIZE_LEAK=true
. ./lib-git-svn.sh
test_expect_success 'initialize repo' '
diff --git a/t/t9148-git-svn-propset.sh b/t/t9148-git-svn-propset.sh
index 6cc76a07b3..aebb28995e 100755
--- a/t/t9148-git-svn-propset.sh
+++ b/t/t9148-git-svn-propset.sh
@@ -5,7 +5,6 @@
test_description='git svn propset tests'
-TEST_FAILS_SANITIZE_LEAK=true
. ./lib-git-svn.sh
test_expect_success 'setup propset via import' '
diff --git a/t/t9160-git-svn-preserve-empty-dirs.sh b/t/t9160-git-svn-preserve-empty-dirs.sh
index 9cf7a1427a..36c6b1a12f 100755
--- a/t/t9160-git-svn-preserve-empty-dirs.sh
+++ b/t/t9160-git-svn-preserve-empty-dirs.sh
@@ -9,7 +9,6 @@ This test uses git to clone a Subversion repository that contains empty
directories, and checks that corresponding directories are created in the
local Git repository with placeholder files.'
-TEST_FAILS_SANITIZE_LEAK=true
. ./lib-git-svn.sh
GIT_REPO=git-svn-repo
diff --git a/t/t9210-scalar.sh b/t/t9210-scalar.sh
index be51a8bb7a..25f500cf68 100755
--- a/t/t9210-scalar.sh
+++ b/t/t9210-scalar.sh
@@ -166,6 +166,20 @@ test_expect_success 'scalar reconfigure' '
test true = "$(git -C one/src config core.preloadIndex)"
'
+test_expect_success '`reconfigure -a` removes stale config entries' '
+ git init stale/src &&
+ scalar register stale &&
+ scalar list >scalar.repos &&
+ grep stale scalar.repos &&
+
+ grep -v stale scalar.repos >expect &&
+
+ rm -rf stale &&
+ scalar reconfigure -a &&
+ scalar list >scalar.repos &&
+ test_cmp expect scalar.repos
+'
+
test_expect_success 'scalar delete without enlistment shows a usage' '
test_expect_code 129 scalar delete
'
diff --git a/t/t9902-completion.sh b/t/t9902-completion.sh
index 43de868b80..27e3aa46c1 100755
--- a/t/t9902-completion.sh
+++ b/t/t9902-completion.sh
@@ -33,7 +33,7 @@ complete ()
GIT_TESTING_ALL_COMMAND_LIST='add checkout check-attr rebase ls-files'
GIT_TESTING_PORCELAIN_COMMAND_LIST='add checkout rebase'
-. "$GIT_BUILD_DIR/contrib/completion/git-completion.bash"
+. "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash"
# We don't need this function to actually join words or do anything special.
# Also, it's cleaner to avoid touching bash's internal completion variables.
@@ -2255,6 +2255,38 @@ test_expect_success 'checkout completes ref names' '
EOF
'
+test_expect_success 'checkout does not match ref names of a different case' '
+ test_completion "git checkout M" ""
+'
+
+test_expect_success 'checkout matches case insensitively with GIT_COMPLETION_IGNORE_CASE' '
+ (
+ . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ GIT_COMPLETION_IGNORE_CASE=1 && export GIT_COMPLETION_IGNORE_CASE &&
+ test_completion "git checkout M" <<-\EOF
+ main Z
+ mybranch Z
+ mytag Z
+ EOF
+ )
+'
+
+test_expect_success 'checkout completes pseudo refs' '
+ test_completion "git checkout H" <<-\EOF
+ HEAD Z
+ EOF
+'
+
+test_expect_success 'checkout completes pseudo refs case insensitively with GIT_COMPLETION_IGNORE_CASE' '
+ (
+ . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ GIT_COMPLETION_IGNORE_CASE=1 && export GIT_COMPLETION_IGNORE_CASE &&
+ test_completion "git checkout h" <<-\EOF
+ HEAD Z
+ EOF
+ )
+'
+
test_expect_success 'git -C <path> checkout uses the right repo' '
test_completion "git -C subdir -C subsubdir -C .. -C ../otherrepo checkout b" <<-\EOF
branch-in-other Z
@@ -2567,7 +2599,7 @@ test_expect_success 'sourcing the completion script clears cached commands' '
(
__git_compute_all_commands &&
verbose test -n "$__git_all_commands" &&
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
verbose test -z "$__git_all_commands"
)
'
@@ -2576,7 +2608,7 @@ test_expect_success 'sourcing the completion script clears cached merge strategi
(
__git_compute_merge_strategies &&
verbose test -n "$__git_merge_strategies" &&
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
verbose test -z "$__git_merge_strategies"
)
'
@@ -2587,7 +2619,7 @@ test_expect_success 'sourcing the completion script clears cached --options' '
verbose test -n "$__gitcomp_builtin_checkout" &&
__gitcomp_builtin notes_edit &&
verbose test -n "$__gitcomp_builtin_notes_edit" &&
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
verbose test -z "$__gitcomp_builtin_checkout" &&
verbose test -z "$__gitcomp_builtin_notes_edit"
)
@@ -2599,7 +2631,7 @@ test_expect_success 'option aliases are not shown by default' '
test_expect_success 'option aliases are shown with GIT_COMPLETION_SHOW_ALL' '
(
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
GIT_COMPLETION_SHOW_ALL=1 && export GIT_COMPLETION_SHOW_ALL &&
test_completion "git clone --recurs" <<-\EOF
--recurse-submodules Z
@@ -2610,7 +2642,7 @@ test_expect_success 'option aliases are shown with GIT_COMPLETION_SHOW_ALL' '
test_expect_success 'plumbing commands are excluded without GIT_COMPLETION_SHOW_ALL_COMMANDS' '
(
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
sane_unset GIT_TESTING_PORCELAIN_COMMAND_LIST &&
# Just mainporcelain, not plumbing commands
@@ -2622,7 +2654,7 @@ test_expect_success 'plumbing commands are excluded without GIT_COMPLETION_SHOW_
test_expect_success 'all commands are shown with GIT_COMPLETION_SHOW_ALL_COMMANDS (also main non-builtin)' '
(
- . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+ . "$GIT_SOURCE_DIR/contrib/completion/git-completion.bash" &&
GIT_COMPLETION_SHOW_ALL_COMMANDS=1 &&
export GIT_COMPLETION_SHOW_ALL_COMMANDS &&
sane_unset GIT_TESTING_PORCELAIN_COMMAND_LIST &&
diff --git a/t/t9903-bash-prompt.sh b/t/t9903-bash-prompt.sh
index d459fae655..06f0abfc29 100755
--- a/t/t9903-bash-prompt.sh
+++ b/t/t9903-bash-prompt.sh
@@ -10,7 +10,7 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./lib-bash.sh
-. "$GIT_BUILD_DIR/contrib/completion/git-prompt.sh"
+. "$GIT_SOURCE_DIR/contrib/completion/git-prompt.sh"
actual="$TRASH_DIRECTORY/actual"
c_red='\\[\\e[31m\\]'
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index 796093a7b3..2acfd733e7 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -1450,72 +1450,6 @@ test_skip_or_die () {
error "$2"
}
-# The following mingw_* functions obey POSIX shell syntax, but are actually
-# bash scripts, and are meant to be used only with bash on Windows.
-
-# A test_cmp function that treats LF and CRLF equal and avoids to fork
-# diff when possible.
-mingw_test_cmp () {
- # Read text into shell variables and compare them. If the results
- # are different, use regular diff to report the difference.
- local test_cmp_a= test_cmp_b=
-
- # When text came from stdin (one argument is '-') we must feed it
- # to diff.
- local stdin_for_diff=
-
- # Since it is difficult to detect the difference between an
- # empty input file and a failure to read the files, we go straight
- # to diff if one of the inputs is empty.
- if test -s "$1" && test -s "$2"
- then
- # regular case: both files non-empty
- mingw_read_file_strip_cr_ test_cmp_a <"$1"
- mingw_read_file_strip_cr_ test_cmp_b <"$2"
- elif test -s "$1" && test "$2" = -
- then
- # read 2nd file from stdin
- mingw_read_file_strip_cr_ test_cmp_a <"$1"
- mingw_read_file_strip_cr_ test_cmp_b
- stdin_for_diff='<<<"$test_cmp_b"'
- elif test "$1" = - && test -s "$2"
- then
- # read 1st file from stdin
- mingw_read_file_strip_cr_ test_cmp_a
- mingw_read_file_strip_cr_ test_cmp_b <"$2"
- stdin_for_diff='<<<"$test_cmp_a"'
- fi
- test -n "$test_cmp_a" &&
- test -n "$test_cmp_b" &&
- test "$test_cmp_a" = "$test_cmp_b" ||
- eval "diff -u \"\$@\" $stdin_for_diff"
-}
-
-# $1 is the name of the shell variable to fill in
-mingw_read_file_strip_cr_ () {
- # Read line-wise using LF as the line separator
- # and use IFS to strip CR.
- local line
- while :
- do
- if IFS=$'\r' read -r -d $'\n' line
- then
- # good
- line=$line$'\n'
- else
- # we get here at EOF, but also if the last line
- # was not terminated by LF; in the latter case,
- # some text was read
- if test -z "$line"
- then
- # EOF, really
- break
- fi
- fi
- eval "$1=\$$1\$line"
- done
-}
-
# Like "env FOO=BAR some-program", but run inside a subshell, which means
# it also works for shell functions (though those functions cannot impact
# the environment outside of the test_env invocation).
diff --git a/t/test-lib.sh b/t/test-lib.sh
index 6db377f68b..cd085c652b 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -47,9 +47,21 @@ then
echo "PANIC: Running in a $TEST_DIRECTORY that doesn't end in '/t'?" >&2
exit 1
fi
-if test -f "$GIT_BUILD_DIR/GIT-BUILD-DIR"
+
+# For CMake the top-level source directory is different from our build
+# directory. With the top-level Makefile they're the same.
+GIT_SOURCE_DIR="$GIT_BUILD_DIR"
+
+GIT_AUTO_CONTRIB_BUILDSYSTEMS_OUT=
+if test -n "$GIT_TEST_BUILD_DIR"
then
- GIT_BUILD_DIR="$(cat "$GIT_BUILD_DIR/GIT-BUILD-DIR")" || exit 1
+ GIT_BUILD_DIR="$GIT_TEST_BUILD_DIR"
+elif ! test -x "$GIT_BUILD_DIR/git" &&
+ test -x "$GIT_BUILD_DIR/contrib/buildsystems/out/git"
+then
+ GIT_BUILD_DIR="$GIT_SOURCE_DIR/contrib/buildsystems/out"
+ GIT_AUTO_CONTRIB_BUILDSYSTEMS_OUT=t
+
# On Windows, we must convert Windows paths lest they contain a colon
case "$(uname -s)" in
*MINGW*)
@@ -1447,7 +1459,7 @@ then
make_valgrind_symlink $file
done
# special-case the mergetools loadables
- make_symlink "$GIT_BUILD_DIR"/mergetools "$GIT_VALGRIND/bin/mergetools"
+ make_symlink "$GIT_SOURCE_DIR"/mergetools "$GIT_VALGRIND/bin/mergetools"
OLDIFS=$IFS
IFS=:
for path in $PATH
@@ -1500,6 +1512,8 @@ GIT_CONFIG_NOSYSTEM=1
GIT_ATTR_NOSYSTEM=1
GIT_CEILING_DIRECTORIES="$TRASH_DIRECTORY/.."
export PATH GIT_EXEC_PATH GIT_TEMPLATE_DIR GIT_CONFIG_NOSYSTEM GIT_ATTR_NOSYSTEM GIT_CEILING_DIRECTORIES
+MERGE_TOOLS_DIR="$GIT_SOURCE_DIR/mergetools"
+export MERGE_TOOLS_DIR
if test -z "$GIT_TEST_CMP"
then
@@ -1628,6 +1642,13 @@ remove_trash_directory "$TRASH_DIRECTORY" || {
BAIL_OUT 'cannot prepare test area'
}
+# Emitting this now because earlier we didn't have "say", but not in
+# anything using lib-subtest.sh
+if test -n "$GIT_AUTO_CONTRIB_BUILDSYSTEMS_OUT" && test -t 1
+then
+ say "setup: had no ../git, but found & used cmake built git in ../contrib/buildsystems/out/git"
+fi
+
remove_trash=t
if test -z "$TEST_NO_CREATE_REPO"
then
@@ -1721,7 +1742,7 @@ case $uname_s in
test_set_prereq SED_STRIPS_CR
test_set_prereq GREP_STRIPS_CR
test_set_prereq WINDOWS
- GIT_TEST_CMP=mingw_test_cmp
+ GIT_TEST_CMP="GIT_DIR=/dev/null git diff --no-index --ignore-cr-at-eol --"
;;
*CYGWIN*)
test_set_prereq POSIXPERM
@@ -1954,3 +1975,7 @@ test_lazy_prereq FSMONITOR_DAEMON '
git version --build-options >output &&
grep "feature: fsmonitor--daemon" output
'
+
+test_lazy_prereq PACKED_REFS_V1 '
+ test "$GIT_TEST_PACKED_REFS_VERSION" -ne "2"
+' \ No newline at end of file
diff --git a/unpack-trees.c b/unpack-trees.c
index bae812156c..56b71d37ce 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -2043,7 +2043,8 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
if (!ret) {
if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
cache_tree_verify(the_repository, &o->result);
- if (!cache_tree_fully_valid(o->result.cache_tree))
+ if (!o->skip_cache_tree_update &&
+ !cache_tree_fully_valid(o->result.cache_tree))
cache_tree_update(&o->result,
WRITE_TREE_SILENT |
WRITE_TREE_REPAIR);
@@ -2288,7 +2289,8 @@ static int verify_clean_subdirectory(const struct cache_entry *ce,
if (S_ISGITLINK(ce->ce_mode)) {
struct object_id oid;
- int sub_head = resolve_gitlink_ref(ce->name, "HEAD", &oid);
+ int sub_head =
+ resolve_gitlink_ref(ce->name, "HEAD", &oid, NULL);
/*
* If we are not going to update the submodule, then
* we don't care.
diff --git a/unpack-trees.h b/unpack-trees.h
index efb9edfbb2..6ab0d74c84 100644
--- a/unpack-trees.h
+++ b/unpack-trees.h
@@ -71,7 +71,8 @@ struct unpack_trees_options {
quiet,
exiting_early,
show_all_errors,
- dry_run;
+ dry_run,
+ skip_cache_tree_update;
enum unpack_trees_reset_type reset;
const char *prefix;
int cache_bottom;
diff --git a/upload-pack.c b/upload-pack.c
index 0b8311bd68..551f22ffa5 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -62,6 +62,7 @@ struct upload_pack_data {
struct object_array have_obj;
struct oid_array haves; /* v2 only */
struct string_list wanted_refs; /* v2 only */
+ struct string_list hidden_refs;
struct object_array shallows;
struct string_list deepen_not;
@@ -118,6 +119,7 @@ static void upload_pack_data_init(struct upload_pack_data *data)
{
struct string_list symref = STRING_LIST_INIT_DUP;
struct string_list wanted_refs = STRING_LIST_INIT_DUP;
+ struct string_list hidden_refs = STRING_LIST_INIT_DUP;
struct object_array want_obj = OBJECT_ARRAY_INIT;
struct object_array have_obj = OBJECT_ARRAY_INIT;
struct oid_array haves = OID_ARRAY_INIT;
@@ -130,6 +132,7 @@ static void upload_pack_data_init(struct upload_pack_data *data)
memset(data, 0, sizeof(*data));
data->symref = symref;
data->wanted_refs = wanted_refs;
+ data->hidden_refs = hidden_refs;
data->want_obj = want_obj;
data->have_obj = have_obj;
data->haves = haves;
@@ -151,6 +154,7 @@ static void upload_pack_data_clear(struct upload_pack_data *data)
{
string_list_clear(&data->symref, 1);
string_list_clear(&data->wanted_refs, 1);
+ string_list_clear(&data->hidden_refs, 0);
object_array_clear(&data->want_obj);
object_array_clear(&data->have_obj);
oid_array_clear(&data->haves);
@@ -842,8 +846,8 @@ static void deepen(struct upload_pack_data *data, int depth)
* Checking for reachable shallows requires that our refs be
* marked with OUR_REF.
*/
- head_ref_namespaced(check_ref, NULL);
- for_each_namespaced_ref(check_ref, NULL);
+ head_ref_namespaced(check_ref, data);
+ for_each_namespaced_ref(check_ref, data);
get_reachable_list(data, &reachable_shallows);
result = get_shallow_commits(&reachable_shallows,
@@ -1158,11 +1162,11 @@ static void receive_needs(struct upload_pack_data *data,
/* return non-zero if the ref is hidden, otherwise 0 */
static int mark_our_ref(const char *refname, const char *refname_full,
- const struct object_id *oid)
+ const struct object_id *oid, const struct string_list *hidden_refs)
{
struct object *o = lookup_unknown_object(the_repository, oid);
- if (ref_is_hidden(refname, refname_full)) {
+ if (ref_is_hidden(refname, refname_full, hidden_refs)) {
o->flags |= HIDDEN_REF;
return 1;
}
@@ -1171,11 +1175,12 @@ static int mark_our_ref(const char *refname, const char *refname_full,
}
static int check_ref(const char *refname_full, const struct object_id *oid,
- int flag UNUSED, void *cb_data UNUSED)
+ int flag UNUSED, void *cb_data)
{
const char *refname = strip_namespace(refname_full);
+ struct upload_pack_data *data = cb_data;
- mark_our_ref(refname, refname_full, oid);
+ mark_our_ref(refname, refname_full, oid, &data->hidden_refs);
return 0;
}
@@ -1204,7 +1209,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
struct object_id peeled;
struct upload_pack_data *data = cb_data;
- if (mark_our_ref(refname_nons, refname, oid))
+ if (mark_our_ref(refname_nons, refname, oid, &data->hidden_refs))
return 0;
if (capabilities) {
@@ -1327,7 +1332,7 @@ static int upload_pack_config(const char *var, const char *value, void *cb_data)
if (parse_object_filter_config(var, value, data) < 0)
return -1;
- return parse_hide_refs_config(var, value, "uploadpack");
+ return parse_hide_refs_config(var, value, "uploadpack", &data->hidden_refs);
}
static int upload_pack_protected_config(const char *var, const char *value, void *cb_data)
@@ -1375,8 +1380,8 @@ void upload_pack(const int advertise_refs, const int stateless_rpc,
advertise_shallow_grafts(1);
packet_flush(1);
} else {
- head_ref_namespaced(check_ref, NULL);
- for_each_namespaced_ref(check_ref, NULL);
+ head_ref_namespaced(check_ref, &data);
+ for_each_namespaced_ref(check_ref, &data);
}
if (!advertise_refs) {
@@ -1441,6 +1446,7 @@ static int parse_want(struct packet_writer *writer, const char *line,
static int parse_want_ref(struct packet_writer *writer, const char *line,
struct string_list *wanted_refs,
+ struct string_list *hidden_refs,
struct object_array *want_obj)
{
const char *refname_nons;
@@ -1451,7 +1457,7 @@ static int parse_want_ref(struct packet_writer *writer, const char *line,
struct strbuf refname = STRBUF_INIT;
strbuf_addf(&refname, "%s%s", get_git_namespace(), refname_nons);
- if (ref_is_hidden(refname_nons, refname.buf) ||
+ if (ref_is_hidden(refname_nons, refname.buf, hidden_refs) ||
read_ref(refname.buf, &oid)) {
packet_writer_error(writer, "unknown ref %s", refname_nons);
die("unknown ref %s", refname_nons);
@@ -1508,7 +1514,7 @@ static void process_args(struct packet_reader *request,
continue;
if (data->allow_ref_in_want &&
parse_want_ref(&data->writer, arg, &data->wanted_refs,
- &data->want_obj))
+ &data->hidden_refs, &data->want_obj))
continue;
/* process have line */
if (parse_have(arg, &data->haves))
diff --git a/wt-status.c b/wt-status.c
index 5813174896..1f6d64e759 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -18,8 +18,10 @@
#include "worktree.h"
#include "lockfile.h"
#include "sequencer.h"
+#include "fsmonitor-settings.h"
#define AB_DELAY_WARNING_IN_MS (2 * 1000)
+#define UF_DELAY_WARNING_IN_MS (2 * 1000)
static const char cut_line[] =
"------------------------ >8 ------------------------\n";
@@ -1205,6 +1207,13 @@ static void wt_longstatus_print_tracking(struct wt_status *s)
strbuf_release(&sb);
}
+static int uf_was_slow(uint32_t untracked_in_ms)
+{
+ if (getenv("GIT_TEST_UF_DELAY_WARNING"))
+ untracked_in_ms += UF_DELAY_WARNING_IN_MS + 1;
+ return UF_DELAY_WARNING_IN_MS < untracked_in_ms;
+}
+
static void show_merge_in_progress(struct wt_status *s,
const char *color)
{
@@ -1814,6 +1823,7 @@ static void wt_longstatus_print(struct wt_status *s)
{
const char *branch_color = color(WT_STATUS_ONBRANCH, s);
const char *branch_status_color = color(WT_STATUS_HEADER, s);
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(s->repo);
if (s->branch) {
const char *on_what = _("On branch ");
@@ -1870,13 +1880,21 @@ static void wt_longstatus_print(struct wt_status *s)
wt_longstatus_print_other(s, &s->untracked, _("Untracked files"), "add");
if (s->show_ignored_mode)
wt_longstatus_print_other(s, &s->ignored, _("Ignored files"), "add -f");
- if (advice_enabled(ADVICE_STATUS_U_OPTION) && 2000 < s->untracked_in_ms) {
+ if (advice_enabled(ADVICE_STATUS_U_OPTION) && uf_was_slow(s->untracked_in_ms)) {
status_printf_ln(s, GIT_COLOR_NORMAL, "%s", "");
+ if (fsm_mode > FSMONITOR_MODE_DISABLED) {
+ status_printf_ln(s, GIT_COLOR_NORMAL,
+ _("It took %.2f seconds to enumerate untracked files,\n"
+ "but the results were cached, and subsequent runs may be faster."),
+ s->untracked_in_ms / 1000.0);
+ } else {
+ status_printf_ln(s, GIT_COLOR_NORMAL,
+ _("It took %.2f seconds to enumerate untracked files."),
+ s->untracked_in_ms / 1000.0);
+ }
status_printf_ln(s, GIT_COLOR_NORMAL,
- _("It took %.2f seconds to enumerate untracked files. 'status -uno'\n"
- "may speed it up, but you have to be careful not to forget to add\n"
- "new files yourself (see 'git help status')."),
- s->untracked_in_ms / 1000.0);
+ _("See 'git help status' for information on how to improve this."));
+ status_printf_ln(s, GIT_COLOR_NORMAL, "%s", "");
}
} else if (s->committable)
status_printf_ln(s, GIT_COLOR_NORMAL, _("Untracked files not listed%s"),