summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/main.yml5
-rw-r--r--.mailmap1
-rw-r--r--Documentation/CodingGuidelines7
-rw-r--r--Documentation/Makefile1
-rw-r--r--Documentation/RelNotes/2.17.6.txt16
-rw-r--r--Documentation/RelNotes/2.18.5.txt6
-rw-r--r--Documentation/RelNotes/2.19.6.txt6
-rw-r--r--Documentation/RelNotes/2.20.5.txt6
-rw-r--r--Documentation/RelNotes/2.21.4.txt6
-rw-r--r--Documentation/RelNotes/2.22.5.txt7
-rw-r--r--Documentation/RelNotes/2.23.4.txt7
-rw-r--r--Documentation/RelNotes/2.24.4.txt7
-rw-r--r--Documentation/RelNotes/2.25.5.txt7
-rw-r--r--Documentation/RelNotes/2.26.3.txt7
-rw-r--r--Documentation/RelNotes/2.27.1.txt7
-rw-r--r--Documentation/RelNotes/2.28.1.txt7
-rw-r--r--Documentation/RelNotes/2.29.3.txt8
-rw-r--r--Documentation/RelNotes/2.30.2.txt8
-rw-r--r--Documentation/RelNotes/2.31.0.txt2
-rw-r--r--Documentation/RelNotes/2.31.1.txt27
-rw-r--r--Documentation/RelNotes/2.32.0.txt250
-rw-r--r--Documentation/SubmittingPatches11
-rw-r--r--Documentation/config.txt2
-rw-r--r--Documentation/config/clone.txt4
-rw-r--r--Documentation/config/commitgraph.txt6
-rw-r--r--Documentation/config/mergetool.txt2
-rw-r--r--Documentation/config/pack.txt15
-rw-r--r--Documentation/config/rebase.txt7
-rw-r--r--Documentation/config/stash.txt5
-rw-r--r--Documentation/fetch-options.txt5
-rw-r--r--Documentation/git-apply.txt11
-rw-r--r--Documentation/git-cat-file.txt67
-rw-r--r--Documentation/git-clone.txt7
-rw-r--r--Documentation/git-commit.txt59
-rw-r--r--Documentation/git-format-patch.txt26
-rw-r--r--Documentation/git-maintenance.txt6
-rw-r--r--Documentation/git-mergetool.txt4
-rw-r--r--Documentation/git-multi-pack-index.txt14
-rw-r--r--Documentation/git-pack-objects.txt10
-rw-r--r--Documentation/git-push.txt2
-rw-r--r--Documentation/git-rebase.txt35
-rw-r--r--Documentation/git-repack.txt23
-rw-r--r--Documentation/git-stash.txt22
-rw-r--r--Documentation/gitattributes.txt5
-rw-r--r--Documentation/gitdiffcore.txt2
-rw-r--r--Documentation/githooks.txt33
-rw-r--r--Documentation/gitignore.txt2
-rw-r--r--Documentation/gitweb.conf.txt11
-rw-r--r--Documentation/howto/coordinate-embargoed-releases.txt131
-rw-r--r--Documentation/pretty-formats.txt13
-rw-r--r--Documentation/technical/api-error-handling.txt10
-rw-r--r--Documentation/technical/api-simple-ipc.txt105
-rw-r--r--Documentation/technical/api-trace2.txt2
-rw-r--r--Documentation/technical/multi-pack-index.txt5
-rw-r--r--Documentation/technical/pack-format.txt83
-rw-r--r--Documentation/technical/reftable.txt9
-rw-r--r--Documentation/user-manual.txt3
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--INSTALL4
-rw-r--r--Makefile62
l---------RelNotes2
-rw-r--r--SECURITY.md51
-rw-r--r--add-interactive.c6
-rw-r--r--apply.c29
-rw-r--r--archive-tar.c2
-rw-r--r--archive.c16
-rw-r--r--archive.h2
-rw-r--r--attr.c72
-rw-r--r--bisect.c2
-rw-r--r--blame.c17
-rw-r--r--block-sha1/sha1.c2
-rw-r--r--bloom.c2
-rw-r--r--branch.c1
-rw-r--r--builtin/bisect--helper.c9
-rw-r--r--builtin/checkout-index.c1
-rw-r--r--builtin/checkout.c1
-rw-r--r--builtin/clean.c2
-rw-r--r--builtin/clone.c35
-rw-r--r--builtin/column.c8
-rw-r--r--builtin/commit.c144
-rw-r--r--builtin/credential-cache--daemon.c3
-rw-r--r--builtin/credential-cache.c2
-rw-r--r--builtin/difftool.c1
-rw-r--r--builtin/fast-import.c8
-rw-r--r--builtin/fetch.c59
-rw-r--r--builtin/fsck.c16
-rw-r--r--builtin/gc.c39
-rw-r--r--builtin/grep.c3
-rw-r--r--builtin/index-pack.c40
-rw-r--r--builtin/init-db.c36
-rw-r--r--builtin/log.c25
-rw-r--r--builtin/ls-remote.c6
-rw-r--r--builtin/merge.c2
-rw-r--r--builtin/mktag.c14
-rw-r--r--builtin/multi-pack-index.c182
-rw-r--r--builtin/mv.c2
-rw-r--r--builtin/notes.c2
-rw-r--r--builtin/pack-objects.c381
-rw-r--r--builtin/pack-redundant.c2
-rw-r--r--builtin/range-diff.c2
-rw-r--r--builtin/rebase.c12
-rw-r--r--builtin/receive-pack.c12
-rw-r--r--builtin/remote.c12
-rw-r--r--builtin/repack.c209
-rw-r--r--builtin/reset.c2
-rw-r--r--builtin/revert.c4
-rw-r--r--builtin/sparse-checkout.c8
-rw-r--r--builtin/stash.c64
-rw-r--r--builtin/symbolic-ref.c4
-rw-r--r--builtin/unpack-objects.c7
-rw-r--r--builtin/worktree.c10
-rw-r--r--bulk-checkin.c2
-rw-r--r--cache-tree.c2
-rw-r--r--cache.h27
-rwxr-xr-xci/run-build-and-tests.sh1
-rw-r--r--combine-diff.c20
-rw-r--r--commit-graph.c35
-rw-r--r--commit-graph.h1
-rw-r--r--commit-reach.c6
-rw-r--r--commit.c18
-rw-r--r--commit.h3
-rw-r--r--compat/mingw.c4
-rw-r--r--compat/precompose_utf8.c9
-rw-r--r--compat/precompose_utf8.h1
-rw-r--r--compat/simple-ipc/ipc-shared.c28
-rw-r--r--compat/simple-ipc/ipc-unix-socket.c999
-rw-r--r--compat/simple-ipc/ipc-win32.c751
-rw-r--r--config.c18
-rw-r--r--config.mak.uname2
-rw-r--r--contrib/buildsystems/CMakeLists.txt34
-rw-r--r--contrib/coccinelle/array.cocci8
-rw-r--r--contrib/coccinelle/xcalloc.cocci10
-rw-r--r--contrib/completion/git-completion.bash10
-rw-r--r--convert.c157
-rw-r--r--convert.h96
-rw-r--r--csum-file.c33
-rw-r--r--daemon.c12
-rw-r--r--decorate.c2
-rw-r--r--diff-lib.c23
-rw-r--r--diff.c10
-rw-r--r--diffcore-rename.c681
-rw-r--r--diffcore.h21
-rw-r--r--dir.c27
-rw-r--r--dir.h3
-rw-r--r--entry.c87
-rw-r--r--entry.h59
-rw-r--r--ewah/bitmap.c2
-rw-r--r--fetch-pack.c51
-rw-r--r--fetch-pack.h1
-rw-r--r--fmt-merge-msg.c2
-rw-r--r--fsck.c207
-rw-r--r--fsck.h127
-rw-r--r--fsmonitor.c6
-rw-r--r--fsmonitor.h11
-rw-r--r--git-compat-util.h19
-rwxr-xr-xgit-filter-branch.sh16
-rwxr-xr-xgit-mergetool.sh9
-rwxr-xr-xgit-send-email.perl47
-rw-r--r--git.c2
-rwxr-xr-xgitweb/gitweb.perl34
-rw-r--r--grep.c107
-rw-r--r--grep.h9
-rw-r--r--hashmap.c2
-rw-r--r--http-backend.c2
-rw-r--r--http-push.c6
-rw-r--r--http.c32
-rw-r--r--imap-send.c4
-rw-r--r--line-log.c2
-rw-r--r--line-range.c2
-rw-r--r--list-objects-filter.c6
-rw-r--r--ll-merge.c2
-rw-r--r--log-tree.c10
-rw-r--r--mailmap.c24
-rw-r--r--mem-pool.c2
-rw-r--r--merge-ort.c707
-rw-r--r--merge-recursive.c46
-rw-r--r--midx.c227
-rw-r--r--midx.h11
-rw-r--r--name-hash.c8
-rw-r--r--negotiator/default.c2
-rw-r--r--negotiator/skipping.c4
-rw-r--r--notes-merge.c2
-rw-r--r--notes-utils.c2
-rw-r--r--notes.c2
-rw-r--r--object-file.c2
-rw-r--r--object-store.h5
-rw-r--r--object.c11
-rw-r--r--object.h2
-rw-r--r--pack-bitmap.c29
-rw-r--r--pack-bitmap.h4
-rw-r--r--pack-objects.c2
-rw-r--r--pack-revindex.c129
-rw-r--r--pack-revindex.h53
-rw-r--r--pack-write.c36
-rw-r--r--pack.h1
-rw-r--r--packfile.c73
-rw-r--r--packfile.h5
-rw-r--r--parse-options.c19
-rw-r--r--parse-options.h35
-rw-r--r--patch-ids.c2
-rw-r--r--path.c1
-rw-r--r--path.h2
-rw-r--r--pathspec.c4
-rw-r--r--perl/Git.pm13
-rw-r--r--pkt-line.c86
-rw-r--r--pkt-line.h17
-rw-r--r--po/TEAMS4
-rw-r--r--po/bg.po6500
-rw-r--r--po/ca.po25
-rw-r--r--po/de.po6773
-rw-r--r--po/es.po10746
-rw-r--r--po/fr.po6485
-rw-r--r--po/git.pot6135
-rw-r--r--po/id.po24358
-rw-r--r--po/pl.po6298
-rw-r--r--po/pt_PT.po6128
-rw-r--r--po/ru.po13534
-rw-r--r--po/sv.po6539
-rw-r--r--po/tr.po6391
-rw-r--r--po/vi.po6559
-rw-r--r--po/zh_CN.po6322
-rw-r--r--po/zh_TW.po6607
-rw-r--r--pretty.c66
-rw-r--r--pretty.h5
-rw-r--r--progress.c2
-rw-r--r--promisor-remote.c2
-rw-r--r--range-diff.c2
-rw-r--r--read-cache.c9
-rw-r--r--rebase-interactive.c7
-rw-r--r--ref-filter.c11
-rw-r--r--reflog-walk.c4
-rw-r--r--refs.c6
-rw-r--r--refs/files-backend.c10
-rw-r--r--refs/iterator.c2
-rw-r--r--refs/packed-backend.c4
-rw-r--r--refs/ref-cache.c2
-rw-r--r--remote.c8
-rw-r--r--repository.c4
-rw-r--r--resolve-undo.c4
-rw-r--r--revision.c23
-rw-r--r--revision.h6
-rw-r--r--run-command.c13
-rw-r--r--send-pack.c4
-rw-r--r--sequencer.c339
-rw-r--r--sequencer.h6
-rw-r--r--server-info.c2
-rw-r--r--setup.c28
-rw-r--r--simple-ipc.h239
-rw-r--r--split-index.c4
-rw-r--r--symlinks.c78
-rw-r--r--t/README3
-rw-r--r--t/annotate-tests.sh34
-rw-r--r--t/helper/test-bitmap.c24
-rw-r--r--t/helper/test-bloom.c4
-rw-r--r--t/helper/test-chmtime.c4
-rw-r--r--t/helper/test-example-decorate.c6
-rw-r--r--t/helper/test-read-midx.c24
-rw-r--r--t/helper/test-simple-ipc.c787
-rw-r--r--t/helper/test-tool.c3
-rw-r--r--t/helper/test-tool.h3
-rw-r--r--t/helper/test-userdiff.c46
-rw-r--r--t/lib-rebase.sh11
-rwxr-xr-xt/perf/p5303-many-packs.sh36
-rwxr-xr-xt/perf/p5310-pack-bitmaps.sh14
-rwxr-xr-xt/perf/p5600-partial-clone.sh12
-rwxr-xr-xt/perf/p7519-fsmonitor.sh4
-rw-r--r--t/perf/perf-lib.sh31
-rwxr-xr-xt/t0003-attributes.sh36
-rwxr-xr-xt/t0008-ignores.sh34
-rwxr-xr-xt/t0021-conversion.sh105
-rw-r--r--t/t0021/rot13-filter.pl21
-rwxr-xr-xt/t0052-simple-ipc.sh122
-rwxr-xr-xt/t2006-checkout-index-basic.sh45
-rwxr-xr-xt/t2021-checkout-overwrite.sh12
-rwxr-xr-xt/t3206-range-diff.sh24
-rwxr-xr-xt/t3400-rebase.sh16
-rwxr-xr-xt/t3415-rebase-autosquash.sh30
-rwxr-xr-xt/t3437-rebase-fixup-options.sh211
-rw-r--r--t/t3437/expected-combined-message21
-rw-r--r--t/t3437/expected-squash-message51
-rwxr-xr-xt/t3510-cherry-pick-sequence.sh32
-rwxr-xr-xt/t3512-cherry-pick-submodule.sh7
-rwxr-xr-xt/t3513-revert-submodule.sh5
-rwxr-xr-xt/t3800-mktag.sh2
-rwxr-xr-xt/t3900-i18n-commit.sh4
-rwxr-xr-xt/t3905-stash-include-untracked.sh108
-rwxr-xr-xt/t4014-format-patch.sh34
-rwxr-xr-xt/t4018-diff-funcname.sh53
-rw-r--r--t/t4018/README3
-rw-r--r--t/t4018/scheme-class7
-rw-r--r--t/t4018/scheme-def4
-rw-r--r--t/t4018/scheme-def-variant4
-rw-r--r--t/t4018/scheme-define-slash-public7
-rw-r--r--t/t4018/scheme-define-syntax8
-rw-r--r--t/t4018/scheme-define-variant4
-rw-r--r--t/t4018/scheme-library11
-rw-r--r--t/t4018/scheme-local-define4
-rw-r--r--t/t4018/scheme-module6
-rw-r--r--t/t4018/scheme-top-level-define4
-rw-r--r--t/t4018/scheme-user-defined-define6
-rwxr-xr-xt/t4034-diff-words.sh1
-rw-r--r--t/t4034/scheme/expect11
-rw-r--r--t/t4034/scheme/post6
-rw-r--r--t/t4034/scheme/pre6
-rwxr-xr-xt/t4053-diff-no-index.sh60
-rwxr-xr-xt/t4108-apply-threeway.sh70
-rwxr-xr-xt/t4203-mailmap.sh31
-rwxr-xr-xt/t4205-log-pretty-formats.sh35
-rwxr-xr-xt/t5001-archive-attr.sh14
-rwxr-xr-xt/t5300-pack-object.sh141
-rwxr-xr-xt/t5310-pack-bitmaps.sh61
-rwxr-xr-xt/t5318-commit-graph.sh2
-rwxr-xr-xt/t5319-multi-pack-index.sh42
-rwxr-xr-xt/t5324-split-commit-graph.sh4
-rwxr-xr-xt/t5505-remote.sh3
-rwxr-xr-xt/t5523-push-upstream.sh7
-rwxr-xr-xt/t5572-pull-submodule.sh7
-rwxr-xr-xt/t5582-fetch-negative-refspec.sh43
-rwxr-xr-xt/t5601-clone.sh9
-rwxr-xr-xt/t5606-clone-options.sh37
-rwxr-xr-xt/t5611-clone-config.sh25
-rwxr-xr-xt/t5612-clone-refspec.sh1
-rwxr-xr-xt/t5702-protocol-v2.sh21
-rwxr-xr-xt/t6030-bisect-porcelain.sh12
-rwxr-xr-xt/t6114-keep-packs.sh69
-rwxr-xr-xt/t6300-for-each-ref.sh16
-rwxr-xr-xt/t6423-merge-rename-directories.sh73
-rwxr-xr-xt/t6428-merge-conflicts-sparse.sh158
-rwxr-xr-xt/t6437-submodule-merge.sh5
-rwxr-xr-xt/t6438-submodule-directory-file-conflicts.sh7
-rwxr-xr-xt/t6600-test-reach.sh2
-rwxr-xr-xt/t7003-filter-branch.sh31
-rwxr-xr-xt/t7500-commit-template-squash-signoff.sh159
-rwxr-xr-xt/t7502-commit-porcelain.sh312
-rwxr-xr-xt/t7703-repack-geometric.sh183
-rwxr-xr-xt/t7810-grep.sh3
-rwxr-xr-xt/t7900-maintenance.sh22
-rwxr-xr-xt/t9001-send-email.sh57
-rwxr-xr-xt/t9801-git-p4-branch.sh38
-rw-r--r--t/test-lib-functions.sh7
-rw-r--r--t/test-lib.sh2
-rw-r--r--trailer.c8
-rw-r--r--transport-helper.c4
-rw-r--r--transport.c18
-rw-r--r--transport.h4
-rw-r--r--unix-socket.c53
-rw-r--r--unix-socket.h12
-rw-r--r--unix-stream-server.c125
-rw-r--r--unix-stream-server.h33
-rw-r--r--unpack-trees.c32
-rw-r--r--unpack-trees.h2
-rw-r--r--upload-pack.c2
-rw-r--r--usage.c17
-rw-r--r--userdiff.c178
-rw-r--r--userdiff.h13
-rw-r--r--vcs-svn/fast_export.h34
-rw-r--r--vcs-svn/line_buffer.h30
-rw-r--r--vcs-svn/sliding_window.h18
-rw-r--r--vcs-svn/svndiff.h10
-rw-r--r--vcs-svn/svndump.h10
-rw-r--r--walker.c2
-rw-r--r--worktree.c4
-rw-r--r--wrapper.c16
-rw-r--r--wt-status.c6
364 files changed, 87462 insertions, 45625 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 5f2f884..73856ba 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -186,6 +186,11 @@ jobs:
## Unzip and remove the artifact
unzip artifacts.zip
rm artifacts.zip
+ - name: initialize vcpkg
+ uses: actions/checkout@v2
+ with:
+ repository: 'microsoft/vcpkg'
+ path: 'compat/vcbuild/vcpkg'
- name: download vcpkg artifacts
shell: powershell
run: |
diff --git a/.mailmap b/.mailmap
index bde7aba..9c6a446 100644
--- a/.mailmap
+++ b/.mailmap
@@ -220,6 +220,7 @@ Philipp A. Hartmann <pah@qo.cx> <ph@sorgh.de>
Philippe Bruhat <book@cpan.org>
Ralf Thielow <ralf.thielow@gmail.com> <ralf.thielow@googlemail.com>
Ramsay Jones <ramsay@ramsayjones.plus.com> <ramsay@ramsay1.demon.co.uk>
+Ramkumar Ramachandra <r@artagnon.com> <artagnon@gmail.com>
Randall S. Becker <randall.becker@nexbridge.ca> <rsbecker@nexbridge.com>
René Scharfe <l.s.r@web.de> <rene.scharfe@lsrfire.ath.cx>
René Scharfe <l.s.r@web.de> Rene Scharfe
diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines
index 45465bc..1ff6d8e 100644
--- a/Documentation/CodingGuidelines
+++ b/Documentation/CodingGuidelines
@@ -498,7 +498,12 @@ Error Messages
- Do not end error messages with a full stop.
- - Do not capitalize ("unable to open %s", not "Unable to open %s")
+ - Do not capitalize the first word, only because it is the first word
+ in the message ("unable to open %s", not "Unable to open %s"). But
+ "SHA-3 not supported" is fine, because the reason the first word is
+ capitalized is not because it is at the beginning of the sentence,
+ but because the word would be spelled in capital letters even when
+ it appeared in the middle of the sentence.
- Say what the error is first ("cannot open %s", not "%s: cannot open")
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 81d1bf7..874a01d 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -76,6 +76,7 @@ SP_ARTICLES += howto/rebuild-from-update-hook
SP_ARTICLES += howto/rebase-from-internal-branch
SP_ARTICLES += howto/keep-canonical-history-correct
SP_ARTICLES += howto/maintain-git
+SP_ARTICLES += howto/coordinate-embargoed-releases
API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technical/api-index.txt, $(wildcard technical/api-*.txt)))
SP_ARTICLES += $(API_DOCS)
diff --git a/Documentation/RelNotes/2.17.6.txt b/Documentation/RelNotes/2.17.6.txt
new file mode 100644
index 0000000..2f181e8
--- /dev/null
+++ b/Documentation/RelNotes/2.17.6.txt
@@ -0,0 +1,16 @@
+Git v2.17.6 Release Notes
+=========================
+
+This release addresses the security issues CVE-2021-21300.
+
+Fixes since v2.17.5
+-------------------
+
+ * CVE-2021-21300:
+ On case-insensitive file systems with support for symbolic links,
+ if Git is configured globally to apply delay-capable clean/smudge
+ filters (such as Git LFS), Git could be fooled into running
+ remote code during a clone.
+
+Credit for finding and fixing this vulnerability goes to Matheus
+Tavares, helped by Johannes Schindelin.
diff --git a/Documentation/RelNotes/2.18.5.txt b/Documentation/RelNotes/2.18.5.txt
new file mode 100644
index 0000000..dfb1de4
--- /dev/null
+++ b/Documentation/RelNotes/2.18.5.txt
@@ -0,0 +1,6 @@
+Git v2.18.5 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6 to address
+the security issue CVE-2021-21300; see the release notes for that
+version for details.
diff --git a/Documentation/RelNotes/2.19.6.txt b/Documentation/RelNotes/2.19.6.txt
new file mode 100644
index 0000000..bcca6cd
--- /dev/null
+++ b/Documentation/RelNotes/2.19.6.txt
@@ -0,0 +1,6 @@
+Git v2.19.6 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6 and
+v2.18.5 to address the security issue CVE-2021-21300; see the
+release notes for these versions for details.
diff --git a/Documentation/RelNotes/2.20.5.txt b/Documentation/RelNotes/2.20.5.txt
new file mode 100644
index 0000000..1dfb784
--- /dev/null
+++ b/Documentation/RelNotes/2.20.5.txt
@@ -0,0 +1,6 @@
+Git v2.20.5 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6, v2.18.5
+and v2.19.6 to address the security issue CVE-2021-21300; see
+the release notes for these versions for details.
diff --git a/Documentation/RelNotes/2.21.4.txt b/Documentation/RelNotes/2.21.4.txt
new file mode 100644
index 0000000..0089dd6
--- /dev/null
+++ b/Documentation/RelNotes/2.21.4.txt
@@ -0,0 +1,6 @@
+Git v2.21.4 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6, v2.18.5,
+v2.19.6 and v2.20.5 to address the security issue CVE-2021-21300;
+see the release notes for these versions for details.
diff --git a/Documentation/RelNotes/2.22.5.txt b/Documentation/RelNotes/2.22.5.txt
new file mode 100644
index 0000000..6b280d9
--- /dev/null
+++ b/Documentation/RelNotes/2.22.5.txt
@@ -0,0 +1,7 @@
+Git v2.22.5 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6,
+v2.18.5, v2.19.6, v2.20.5 and v2.21.4 to address the security
+issue CVE-2021-21300; see the release notes for these versions
+for details.
diff --git a/Documentation/RelNotes/2.23.4.txt b/Documentation/RelNotes/2.23.4.txt
new file mode 100644
index 0000000..6e5424d
--- /dev/null
+++ b/Documentation/RelNotes/2.23.4.txt
@@ -0,0 +1,7 @@
+Git v2.23.4 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6, v2.18.5,
+v2.19.6, v2.20.5, v2.21.4 and v2.22.5 to address the security
+issue CVE-2021-21300; see the release notes for these versions
+for details.
diff --git a/Documentation/RelNotes/2.24.4.txt b/Documentation/RelNotes/2.24.4.txt
new file mode 100644
index 0000000..4e216ee
--- /dev/null
+++ b/Documentation/RelNotes/2.24.4.txt
@@ -0,0 +1,7 @@
+Git v2.24.4 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6, v2.18.5,
+v2.19.6, v2.20.5, v2.21.4, v2.22.5 and v2.23.4 to address the
+security issue CVE-2021-21300; see the release notes for these
+versions for details.
diff --git a/Documentation/RelNotes/2.25.5.txt b/Documentation/RelNotes/2.25.5.txt
new file mode 100644
index 0000000..fcb9566
--- /dev/null
+++ b/Documentation/RelNotes/2.25.5.txt
@@ -0,0 +1,7 @@
+Git v2.25.5 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6, v2.18.5,
+v2.19.6, v2.20.5, v2.21.4, v2.22.5, v2.23.4 and v2.24.4 to address
+the security issue CVE-2021-21300; see the release notes for
+these versions for details.
diff --git a/Documentation/RelNotes/2.26.3.txt b/Documentation/RelNotes/2.26.3.txt
new file mode 100644
index 0000000..4111c38
--- /dev/null
+++ b/Documentation/RelNotes/2.26.3.txt
@@ -0,0 +1,7 @@
+Git v2.26.3 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6, v2.18.5,
+v2.19.6, v2.20.5, v2.21.4, v2.22.5, v2.23.4, v2.24.4 and v2.25.5
+to address the security issue CVE-2021-21300; see the release
+notes for these versions for details.
diff --git a/Documentation/RelNotes/2.27.1.txt b/Documentation/RelNotes/2.27.1.txt
new file mode 100644
index 0000000..a1e08a9
--- /dev/null
+++ b/Documentation/RelNotes/2.27.1.txt
@@ -0,0 +1,7 @@
+Git v2.27.1 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6, v2.18.5,
+v2.19.6, v2.20.5, v2.21.4, v2.22.5, v2.23.4, v2.24.4, v2.25.5
+and v2.26.3 to address the security issue CVE-2021-21300; see
+the release notes for these versions for details.
diff --git a/Documentation/RelNotes/2.28.1.txt b/Documentation/RelNotes/2.28.1.txt
new file mode 100644
index 0000000..8484c82
--- /dev/null
+++ b/Documentation/RelNotes/2.28.1.txt
@@ -0,0 +1,7 @@
+Git v2.28.1 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6, v2.18.5,
+v2.19.6, v2.20.5, v2.21.4, v2.22.5, v2.23.4, v2.24.4, v2.25.5,
+v2.26.3 and v2.27.1 to address the security issue CVE-2021-21300;
+see the release notes for these versions for details.
diff --git a/Documentation/RelNotes/2.29.3.txt b/Documentation/RelNotes/2.29.3.txt
new file mode 100644
index 0000000..e10eedb
--- /dev/null
+++ b/Documentation/RelNotes/2.29.3.txt
@@ -0,0 +1,8 @@
+Git v2.29.3 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6,
+v2.18.5, v2.19.6, v2.20.5, v2.21.4, v2.22.5, v2.23.4, v2.24.4,
+v2.25.5, v2.26.3, v2.27.1 and v2.28.1 to address the security
+issue CVE-2021-21300; see the release notes for these versions
+for details.
diff --git a/Documentation/RelNotes/2.30.2.txt b/Documentation/RelNotes/2.30.2.txt
new file mode 100644
index 0000000..bada398
--- /dev/null
+++ b/Documentation/RelNotes/2.30.2.txt
@@ -0,0 +1,8 @@
+Git v2.30.2 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.17.6, v2.18.5,
+v2.19.6, v2.20.5, v2.21.4, v2.22.5, v2.23.4, v2.24.4, v2.25.5,
+v2.26.3, v2.27.1, v2.28.1 and v2.29.3 to address the security
+issue CVE-2021-21300; see the release notes for these versions
+for details.
diff --git a/Documentation/RelNotes/2.31.0.txt b/Documentation/RelNotes/2.31.0.txt
index cde22e8..cf0c7d8 100644
--- a/Documentation/RelNotes/2.31.0.txt
+++ b/Documentation/RelNotes/2.31.0.txt
@@ -16,6 +16,8 @@ Backward incompatible and other important changes
* The support for deprecated PCRE1 library has been dropped.
+ * Fixes for CVE-2021-21300 in Git 2.30.2 (and earlier) is included.
+
UI, Workflows & Features
diff --git a/Documentation/RelNotes/2.31.1.txt b/Documentation/RelNotes/2.31.1.txt
new file mode 100644
index 0000000..f9b06b8
--- /dev/null
+++ b/Documentation/RelNotes/2.31.1.txt
@@ -0,0 +1,27 @@
+Git 2.31.1 Release Notes
+========================
+
+Fixes since v2.31
+-----------------
+
+ * The fsmonitor interface read from its input without making sure
+ there is something to read from. This bug is new in 2.31
+ timeframe.
+
+ * The data structure used by fsmonitor interface was not properly
+ duplicated during an in-core merge, leading to use-after-free etc.
+
+ * "git bisect" reimplemented more in C during 2.30 timeframe did not
+ take an annotated tag as a good/bad endpoint well. This regression
+ has been corrected.
+
+ * Fix macros that can silently inject unintended null-statements.
+
+ * CALLOC_ARRAY() macro replaces many uses of xcalloc().
+
+ * Update insn in Makefile comments to run fuzz-all target.
+
+ * Fix a corner case bug in "git mv" on case insensitive systems,
+ which was introduced in 2.29 timeframe.
+
+Also contains various documentation updates and code clean-ups.
diff --git a/Documentation/RelNotes/2.32.0.txt b/Documentation/RelNotes/2.32.0.txt
new file mode 100644
index 0000000..3f73411
--- /dev/null
+++ b/Documentation/RelNotes/2.32.0.txt
@@ -0,0 +1,250 @@
+Git 2.32 Release Notes
+======================
+
+Backward compatibility notes
+----------------------------
+
+ * ".gitattributes", ".gitignore", and ".mailmap" files that are
+ symbolic links are ignored.
+
+
+Updates since v2.31
+-------------------
+
+UI, Workflows & Features
+
+ * It does not make sense to make ".gitattributes", ".gitignore" and
+ ".mailmap" symlinks, as they are supposed to be usable from the
+ object store (think: bare repositories where HEAD:.mailmap etc. are
+ used). When these files are symbolic links, we used to read the
+ contents of the files pointed by them by mistake, which has been
+ corrected.
+
+ * "git stash show" learned to optionally show untracked part of the
+ stash.
+
+ * "git log --format='...'" learned "%(describe)" placeholder.
+
+ * "git repack" so far has been only capable of repacking everything
+ under the sun into a single pack (or split by size). A cleverer
+ strategy to reduce the cost of repacking a repository has been
+ introduced.
+
+ * The http codepath learned to let the credential layer to cache the
+ password used to unlock a certificate that has successfully been
+ used.
+
+ * "git commit --fixup=<commit>", which was to tweak the changes made
+ to the contents while keeping the original log message intact,
+ learned "--fixup=(amend|reword):<commit>", that can be used to
+ tweak both the message and the contents, and only the message,
+ respectively.
+
+ * When accessing a server with a URL like https://user:pass@site/, we
+ did not to fall back to the basic authentication with the
+ credential material embedded in the URL after the "Negotiate"
+ authentication failed. Now we do.
+
+ * "git send-email" learned to honor the core.hooksPath configuration.
+
+ * "git format-patch -v<n>" learned to allow a reroll count that is
+ not an integer.
+
+ * "git commit" learned "--trailer <key>[=<value>]" option; together
+ with the interpret-trailers command, this will make it easier to
+ support custom trailers.
+
+ * "git clone --reject-shallow" option fails the clone as soon as we
+ notice that we are cloning from a shallow repository.
+
+ * A configuration variable has been added to force tips of certain
+ refs to be given a reachability bitmap.
+
+ * "gitweb" learned "e-mail privacy" feature to redact strings that
+ look like e-mail addresses on various pages.
+
+ * "git apply --3way" has always been "to fall back to 3-way merge
+ only when straight application fails". Swap the order of falling
+ back so that 3-way is always attempted first (only when the option
+ is given, of course) and then straight patch application is used as
+ a fallback when it fails.
+
+ * "git apply" now takes "--3way" and "--cached" at the same time, and
+ work and record results only in the index.
+
+ * The command line completion (in contrib/) has learned that
+ CHERRY_PICK_HEAD is a possible pseudo-ref.
+
+ * Userdiff patterns for "Scheme" has been added.
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * Rename detection rework continues.
+
+ * GIT_TEST_FAIL_PREREQS is a mechanism to skip test pieces with
+ prerequisites to catch broken tests that depend on the side effects
+ of optional pieces, but did not work at all when negative
+ prerequisites were involved.
+ (merge 27d578d904 jk/fail-prereq-testfix later to maint).
+
+ * "git diff-index" codepath has been taught to trust fsmonitor status
+ to reduce number of lstat() calls.
+ (merge 7e5aa13d2c nk/diff-index-fsmonitor later to maint).
+
+ * Reorganize Makefile to allow building git.o and other essential
+ objects without extra stuff needed only for testing.
+
+ * Preparatory API changes for parallel checkout.
+
+ * A simple IPC interface gets introduced to build services like
+ fsmonitor on top.
+
+ * Fsck API clean-up.
+
+ * SECURITY.md that is facing individual contributors and end users
+ has been introduced. Also a procedure to follow when preparing
+ embargoed releases has been spelled out.
+ (merge 09420b7648 js/security-md later to maint).
+
+ * Optimize "rev-list --use-bitmap-index --objects" corner case that
+ uses negative tags as the stopping points.
+
+ * CMake update for vsbuild.
+
+ * An on-disk reverse-index to map the in-pack location of an object
+ back to its object name across multiple packfiles is introduced.
+
+ * Generate [ec]tags under $(QUIET_GEN).
+
+ * Clean-up codepaths that implements "git send-email --validate"
+ option and improves the message from it.
+
+ * The last remnant of gettext-poison has been removed.
+
+ * The test framework has been taught to optionally turn the default
+ merge strategy to "ort" throughout the system where we use
+ three-way merges internally, like cherry-pick, rebase etc.,
+ primarily to enhance its test coverage (the strategy has been
+ available as an explicit "-s ort" choice).
+
+ * A bit of code clean-up and a lot of test clean-up around userdiff
+ area.
+
+
+Fixes since v2.31
+-----------------
+
+ * The fsmonitor interface read from its input without making sure
+ there is something to read from. This bug is new in 2.31
+ timeframe.
+
+ * The data structure used by fsmonitor interface was not properly
+ duplicated during an in-core merge, leading to use-after-free etc.
+
+ * "git bisect" reimplemented more in C during 2.30 timeframe did not
+ take an annotated tag as a good/bad endpoint well. This regression
+ has been corrected.
+
+ * Fix macros that can silently inject unintended null-statements.
+
+ * CALLOC_ARRAY() macro replaces many uses of xcalloc().
+
+ * Update insn in Makefile comments to run fuzz-all target.
+
+ * Fix a corner case bug in "git mv" on case insensitive systems,
+ which was introduced in 2.29 timeframe.
+
+ * We had a code to diagnose and die cleanly when a required
+ clean/smudge filter is missing, but an assert before that
+ unnecessarily fired, hiding the end-user facing die() message.
+ (merge 6fab35f748 mt/cleanly-die-upon-missing-required-filter later to maint).
+
+ * Update C code that sets a few configuration variables when a remote
+ is configured so that it spells configuration variable names in the
+ canonical camelCase.
+ (merge 0f1da600e6 ab/remote-write-config-in-camel-case later to maint).
+
+ * A new configuration variable has been introduced to allow choosing
+ which version of the generation number gets used in the
+ commit-graph file.
+ (merge 702110aac6 ds/commit-graph-generation-config later to maint).
+
+ * Perf test update to work better in secondary worktrees.
+ (merge 36e834abc1 jk/perf-in-worktrees later to maint).
+
+ * Updates to memory allocation code around the use of pcre2 library.
+ (merge c1760352e0 ab/grep-pcre2-allocfix later to maint).
+
+ * "git -c core.bare=false clone --bare ..." would have segfaulted,
+ which has been corrected.
+ (merge 75555676ad bc/clone-bare-with-conflicting-config later to maint).
+
+ * When "git checkout" removes a path that does not exist in the
+ commit it is checking out, it wasn't careful enough not to follow
+ symbolic links, which has been corrected.
+ (merge fab78a0c3d mt/checkout-remove-nofollow later to maint).
+
+ * A few option description strings started with capital letters,
+ which were corrected.
+ (merge 5ee90326dc cc/downcase-opt-help later to maint).
+
+ * Plug or annotate remaining leaks that trigger while running the
+ very basic set of tests.
+ (merge 68ffe095a2 ah/plugleaks later to maint).
+
+ * The hashwrite() API uses a buffering mechanism to avoid calling
+ write(2) too frequently. This logic has been refactored to be
+ easier to understand.
+ (merge ddaf1f62e3 ds/clarify-hashwrite later to maint).
+
+ * "git cherry-pick/revert" with or without "--[no-]edit" did not spawn
+ the editor as expected (e.g. "revert --no-edit" after a conflict
+ still asked to edit the message), which has been corrected.
+ (merge 39edfd5cbc en/sequencer-edit-upon-conflict-fix later to maint).
+
+ * "git daemon" has been tightened against systems that take backslash
+ as directory separator.
+ (merge 9a7f1ce8b7 rs/daemon-sanitize-dir-sep later to maint).
+
+ * A NULL-dereference bug has been corrected in an error codepath in
+ "git for-each-ref", "git branch --list" etc.
+ (merge c685450880 jk/ref-filter-segfault-fix later to maint).
+
+ * Streamline the codepath to fix the UTF-8 encoding issues in the
+ argv[] and the prefix on macOS.
+ (merge c7d0e61016 tb/precompose-prefix-simplify later to maint).
+
+ * The command-line completion script (in contrib/) had a couple of
+ references that would have given a warning under the "-u" (nounset)
+ option.
+ (merge c5c0548d79 vs/completion-with-set-u later to maint).
+
+ * When "git pack-objects" makes a literal copy of a part of existing
+ packfile using the reachability bitmaps, its update to the progress
+ meter was broken.
+ (merge 8e118e8490 jk/pack-objects-bitmap-progress-fix later to maint).
+
+ * The dependencies for config-list.h and command-list.h were broken
+ when the former was split out of the latter, which has been
+ corrected.
+ (merge 56550ea718 sg/bugreport-fixes later to maint).
+
+ * Other code cleanup, docfix, build fix, etc.
+ (merge f451960708 dl/cat-file-doc-cleanup later to maint).
+ (merge 12604a8d0c sv/t9801-test-path-is-file-cleanup later to maint).
+ (merge ea7e63921c jr/doc-ignore-typofix later to maint).
+ (merge 23c781f173 ps/update-ref-trans-hook-doc later to maint).
+ (merge 42efa1231a jk/filter-branch-sha256 later to maint).
+ (merge 4c8e3dca6e tb/push-simple-uses-branch-merge-config later to maint).
+ (merge 6534d436a2 bs/asciidoctor-installation-hints later to maint).
+ (merge 47957485b3 ab/read-tree later to maint).
+ (merge 2be927f3d1 ab/diff-no-index-tests later to maint).
+ (merge 76593c09bb ab/detox-gettext-tests later to maint).
+ (merge 28e29ee38b jc/doc-format-patch-clarify later to maint).
+ (merge fc12b6fdde fm/user-manual-use-preface later to maint).
+ (merge dba94e3a85 cc/test-helper-bloom-usage-fix later to maint).
+ (merge 61a7660516 hn/reftable-tables-doc-update later to maint).
+ (merge 81ed96a9b2 jt/fetch-pack-request-fix later to maint).
+ (merge 151b6c2dd7 jc/doc-do-not-capitalize-clarification later to maint).
+ (merge 9160068ac6 js/access-nul-emulation-on-windows later to maint).
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 0452db2..55287d7 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -117,10 +117,13 @@ If in doubt which identifier to use, run `git log --no-merges` on the
files you are modifying to see the current conventions.
[[summary-section]]
-It's customary to start the remainder of the first line after "area: "
-with a lower-case letter. E.g. "doc: clarify...", not "doc:
-Clarify...", or "githooks.txt: improve...", not "githooks.txt:
-Improve...".
+The title sentence after the "area:" prefix omits the full stop at the
+end, and its first word is not capitalized unless there is a reason to
+capitalize it other than because it is the first word in the sentence.
+E.g. "doc: clarify...", not "doc: Clarify...", or "githooks.txt:
+improve...", not "githooks.txt: Improve...". But "refs: HEAD is also
+treated as a ref" is correct, as we spell `HEAD` in all caps even when
+it appears in the middle of a sentence.
[[meaningful-message]]
The body should provide a meaningful commit message, which:
diff --git a/Documentation/config.txt b/Documentation/config.txt
index d08e83a..bf82766 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -46,7 +46,7 @@ Subsection names are case sensitive and can contain any characters except
newline and the null byte. Doublequote `"` and backslash can be included
by escaping them as `\"` and `\\`, respectively. Backslashes preceding
other characters are dropped when reading; for example, `\t` is read as
-`t` and `\0` is read as `0` Section headers cannot span multiple lines.
+`t` and `\0` is read as `0`. Section headers cannot span multiple lines.
Variables may belong directly to a section or to a given subsection. You
can have `[section]` if you have `[section "subsection"]`, but you don't
need to.
diff --git a/Documentation/config/clone.txt b/Documentation/config/clone.txt
index 47de36a..7bcfbd1 100644
--- a/Documentation/config/clone.txt
+++ b/Documentation/config/clone.txt
@@ -2,3 +2,7 @@ clone.defaultRemoteName::
The name of the remote to create when cloning a repository. Defaults to
`origin`, and can be overridden by passing the `--origin` command-line
option to linkgit:git-clone[1].
+
+clone.rejectShallow::
+ Reject to clone a repository if it is a shallow one, can be overridden by
+ passing option `--reject-shallow` in command line. See linkgit:git-clone[1]
diff --git a/Documentation/config/commitgraph.txt b/Documentation/config/commitgraph.txt
index 4582c39..30604e4 100644
--- a/Documentation/config/commitgraph.txt
+++ b/Documentation/config/commitgraph.txt
@@ -1,3 +1,9 @@
+commitGraph.generationVersion::
+ Specifies the type of generation number version to use when writing
+ or reading the commit-graph file. If version 1 is specified, then
+ the corrected commit dates will not be written or read. Defaults to
+ 2.
+
commitGraph.maxNewFilters::
Specifies the default value for the `--max-new-filters` option of `git
commit-graph write` (c.f., linkgit:git-commit-graph[1]).
diff --git a/Documentation/config/mergetool.txt b/Documentation/config/mergetool.txt
index 90f76f5..cafbbef 100644
--- a/Documentation/config/mergetool.txt
+++ b/Documentation/config/mergetool.txt
@@ -53,7 +53,7 @@ mergetool.hideResolved::
resolution. This flag causes 'LOCAL' and 'REMOTE' to be overwriten so
that only the unresolved conflicts are presented to the merge tool. Can
be configured per-tool via the `mergetool.<tool>.hideResolved`
- configuration variable. Defaults to `true`.
+ configuration variable. Defaults to `false`.
mergetool.keepBackup::
After performing a merge, the original file with conflict markers
diff --git a/Documentation/config/pack.txt b/Documentation/config/pack.txt
index 3da4ea9..c0844d8 100644
--- a/Documentation/config/pack.txt
+++ b/Documentation/config/pack.txt
@@ -122,6 +122,21 @@ pack.useSparse::
commits contain certain types of direct renames. Default is
`true`.
+pack.preferBitmapTips::
+ When selecting which commits will receive bitmaps, prefer a
+ commit at the tip of any reference that is a suffix of any value
+ of this configuration over any other commits in the "selection
+ window".
++
+Note that setting this configuration to `refs/foo` does not mean that
+the commits at the tips of `refs/foo/bar` and `refs/foo/baz` will
+necessarily be selected. This is because commits are selected for
+bitmaps from within a series of windows of variable length.
++
+If a commit at the tip of any reference which is a suffix of any value
+of this configuration is seen in a window, it is immediately given
+preference over any other commit in that window.
+
pack.writeBitmaps (deprecated)::
This is a deprecated synonym for `repack.writeBitmaps`.
diff --git a/Documentation/config/rebase.txt b/Documentation/config/rebase.txt
index 214f31b..8c979cb 100644
--- a/Documentation/config/rebase.txt
+++ b/Documentation/config/rebase.txt
@@ -1,10 +1,3 @@
-rebase.useBuiltin::
- Unused configuration variable. Used in Git versions 2.20 and
- 2.21 as an escape hatch to enable the legacy shellscript
- implementation of rebase. Now the built-in rewrite of it in C
- is always used. Setting this will emit a warning, to alert any
- remaining users that setting this now does nothing.
-
rebase.backend::
Default backend to use for rebasing. Possible choices are
'apply' or 'merge'. In the future, if the merge backend gains
diff --git a/Documentation/config/stash.txt b/Documentation/config/stash.txt
index 00eb354..413f907 100644
--- a/Documentation/config/stash.txt
+++ b/Documentation/config/stash.txt
@@ -5,6 +5,11 @@ stash.useBuiltin::
is always used. Setting this will emit a warning, to alert any
remaining users that setting this now does nothing.
+stash.showIncludeUntracked::
+ If this is set to true, the `git stash show` command without an
+ option will show the untracked files of a stash entry. Defaults to
+ false. See description of 'show' command in linkgit:git-stash[1].
+
stash.showPatch::
If this is set to true, the `git stash show` command without an
option will show the stash entry in patch form. Defaults to false.
diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt
index 07783de..9e7b4e1 100644
--- a/Documentation/fetch-options.txt
+++ b/Documentation/fetch-options.txt
@@ -110,6 +110,11 @@ ifndef::git-pull[]
setting `fetch.writeCommitGraph`.
endif::git-pull[]
+--prefetch::
+ Modify the configured refspec to place all refs into the
+ `refs/prefetch/` namespace. See the `prefetch` task in
+ linkgit:git-maintenance[1].
+
-p::
--prune::
Before fetching, remove any remote-tracking references that no
diff --git a/Documentation/git-apply.txt b/Documentation/git-apply.txt
index 91d9a86..aa1ae56 100644
--- a/Documentation/git-apply.txt
+++ b/Documentation/git-apply.txt
@@ -84,12 +84,13 @@ OPTIONS
-3::
--3way::
- When the patch does not apply cleanly, fall back on 3-way merge if
- the patch records the identity of blobs it is supposed to apply to,
- and we have those blobs available locally, possibly leaving the
+ Attempt 3-way merge if the patch records the identity of blobs it is supposed
+ to apply to and we have those blobs available locally, possibly leaving the
conflict markers in the files in the working tree for the user to
- resolve. This option implies the `--index` option, and is incompatible
- with the `--reject` and the `--cached` options.
+ resolve. This option implies the `--index` option unless the
+ `--cached` option is used, and is incompatible with the `--reject` option.
+ When used with the `--cached` option, any conflicts are left at higher stages
+ in the cache.
--build-fake-ancestor=<file>::
Newer 'git diff' output has embedded 'index information'
diff --git a/Documentation/git-cat-file.txt b/Documentation/git-cat-file.txt
index 8e192d8..4eb0421 100644
--- a/Documentation/git-cat-file.txt
+++ b/Documentation/git-cat-file.txt
@@ -35,42 +35,42 @@ OPTIONS
-t::
Instead of the content, show the object type identified by
- <object>.
+ `<object>`.
-s::
Instead of the content, show the object size identified by
- <object>.
+ `<object>`.
-e::
- Exit with zero status if <object> exists and is a valid
- object. If <object> is of an invalid format exit with non-zero and
+ Exit with zero status if `<object>` exists and is a valid
+ object. If `<object>` is of an invalid format exit with non-zero and
emits an error on stderr.
-p::
- Pretty-print the contents of <object> based on its type.
+ Pretty-print the contents of `<object>` based on its type.
<type>::
- Typically this matches the real type of <object> but asking
+ Typically this matches the real type of `<object>` but asking
for a type that can trivially be dereferenced from the given
- <object> is also permitted. An example is to ask for a
- "tree" with <object> being a commit object that contains it,
- or to ask for a "blob" with <object> being a tag object that
+ `<object>` is also permitted. An example is to ask for a
+ "tree" with `<object>` being a commit object that contains it,
+ or to ask for a "blob" with `<object>` being a tag object that
points at it.
--textconv::
Show the content as transformed by a textconv filter. In this case,
- <object> has to be of the form <tree-ish>:<path>, or :<path> in
+ `<object>` has to be of the form `<tree-ish>:<path>`, or `:<path>` in
order to apply the filter to the content recorded in the index at
- <path>.
+ `<path>`.
--filters::
Show the content as converted by the filters configured in
- the current working tree for the given <path> (i.e. smudge filters,
- end-of-line conversion, etc). In this case, <object> has to be of
- the form <tree-ish>:<path>, or :<path>.
+ the current working tree for the given `<path>` (i.e. smudge filters,
+ end-of-line conversion, etc). In this case, `<object>` has to be of
+ the form `<tree-ish>:<path>`, or `:<path>`.
--path=<path>::
- For use with --textconv or --filters, to allow specifying an object
+ For use with `--textconv` or `--filters`, to allow specifying an object
name and a path separately, e.g. when it is difficult to figure out
the revision from which the blob came.
@@ -115,15 +115,15 @@ OPTIONS
repository.
--allow-unknown-type::
- Allow -s or -t to query broken/corrupt objects of unknown type.
+ Allow `-s` or `-t` to query broken/corrupt objects of unknown type.
--follow-symlinks::
- With --batch or --batch-check, follow symlinks inside the
+ With `--batch` or `--batch-check`, follow symlinks inside the
repository when requesting objects with extended SHA-1
expressions of the form tree-ish:path-in-tree. Instead of
providing output about the link itself, provide output about
the linked-to object. If a symlink points outside the
- tree-ish (e.g. a link to /foo or a root-level link to ../foo),
+ tree-ish (e.g. a link to `/foo` or a root-level link to `../foo`),
the portion of the link which is outside the tree will be
printed.
+
@@ -175,15 +175,15 @@ respectively print:
OUTPUT
------
-If `-t` is specified, one of the <type>.
+If `-t` is specified, one of the `<type>`.
-If `-s` is specified, the size of the <object> in bytes.
+If `-s` is specified, the size of the `<object>` in bytes.
-If `-e` is specified, no output, unless the <object> is malformed.
+If `-e` is specified, no output, unless the `<object>` is malformed.
-If `-p` is specified, the contents of <object> are pretty-printed.
+If `-p` is specified, the contents of `<object>` are pretty-printed.
-If <type> is specified, the raw (though uncompressed) contents of the <object>
+If `<type>` is specified, the raw (though uncompressed) contents of the `<object>`
will be returned.
BATCH OUTPUT
@@ -200,7 +200,7 @@ object, with placeholders of the form `%(atom)` expanded, followed by a
newline. The available atoms are:
`objectname`::
- The 40-hex object name of the object.
+ The full hex representation of the object name.
`objecttype`::
The type of the object (the same as `cat-file -t` reports).
@@ -215,8 +215,9 @@ newline. The available atoms are:
`deltabase`::
If the object is stored as a delta on-disk, this expands to the
- 40-hex sha1 of the delta base object. Otherwise, expands to the
- null sha1 (40 zeroes). See `CAVEATS` below.
+ full hex representation of the delta base object name.
+ Otherwise, expands to the null OID (all zeroes). See `CAVEATS`
+ below.
`rest`::
If this atom is used in the output string, input lines are split
@@ -235,14 +236,14 @@ newline.
For example, `--batch` without a custom format would produce:
------------
-<sha1> SP <type> SP <size> LF
+<oid> SP <type> SP <size> LF
<contents> LF
------------
Whereas `--batch-check='%(objectname) %(objecttype)'` would produce:
------------
-<sha1> SP <type> LF
+<oid> SP <type> LF
------------
If a name is specified on stdin that cannot be resolved to an object in
@@ -258,7 +259,7 @@ If a name is specified that might refer to more than one object (an ambiguous sh
<object> SP ambiguous LF
------------
-If --follow-symlinks is used, and a symlink in the repository points
+If `--follow-symlinks` is used, and a symlink in the repository points
outside the repository, then `cat-file` will ignore any custom format
and print:
@@ -267,11 +268,11 @@ symlink SP <size> LF
<symlink> LF
------------
-The symlink will either be absolute (beginning with a /), or relative
-to the tree root. For instance, if dir/link points to ../../foo, then
-<symlink> will be ../foo. <size> is the size of the symlink in bytes.
+The symlink will either be absolute (beginning with a `/`), or relative
+to the tree root. For instance, if dir/link points to `../../foo`, then
+`<symlink>` will be `../foo`. `<size>` is the size of the symlink in bytes.
-If --follow-symlinks is used, the following error messages will be
+If `--follow-symlinks` is used, the following error messages will be
displayed:
------------
diff --git a/Documentation/git-clone.txt b/Documentation/git-clone.txt
index 02d9c19..3fe3810 100644
--- a/Documentation/git-clone.txt
+++ b/Documentation/git-clone.txt
@@ -15,7 +15,7 @@ SYNOPSIS
[--dissociate] [--separate-git-dir <git dir>]
[--depth <depth>] [--[no-]single-branch] [--no-tags]
[--recurse-submodules[=<pathspec>]] [--[no-]shallow-submodules]
- [--[no-]remote-submodules] [--jobs <n>] [--sparse]
+ [--[no-]remote-submodules] [--jobs <n>] [--sparse] [--[no-]reject-shallow]
[--filter=<filter>] [--] <repository>
[<directory>]
@@ -149,6 +149,11 @@ objects from the source repository into a pack in the cloned repository.
--no-checkout::
No checkout of HEAD is performed after the clone is complete.
+--[no-]reject-shallow::
+ Fail if the source repository is a shallow repository.
+ The 'clone.rejectShallow' configuration variable can be used to
+ specify the default.
+
--bare::
Make a 'bare' Git repository. That is, instead of
creating `<directory>` and placing the administrative
diff --git a/Documentation/git-commit.txt b/Documentation/git-commit.txt
index 17150fa..340c5fb 100644
--- a/Documentation/git-commit.txt
+++ b/Documentation/git-commit.txt
@@ -9,12 +9,13 @@ SYNOPSIS
--------
[verse]
'git commit' [-a | --interactive | --patch] [-s] [-v] [-u<mode>] [--amend]
- [--dry-run] [(-c | -C | --fixup | --squash) <commit>]
+ [--dry-run] [(-c | -C | --squash) <commit> | --fixup [(amend|reword):]<commit>)]
[-F <file> | -m <msg>] [--reset-author] [--allow-empty]
[--allow-empty-message] [--no-verify] [-e] [--author=<author>]
[--date=<date>] [--cleanup=<mode>] [--[no-]status]
[-i | -o] [--pathspec-from-file=<file> [--pathspec-file-nul]]
- [-S[<keyid>]] [--] [<pathspec>...]
+ [(--trailer <token>[(=|:)<value>])...] [-S[<keyid>]]
+ [--] [<pathspec>...]
DESCRIPTION
-----------
@@ -86,11 +87,44 @@ OPTIONS
Like '-C', but with `-c` the editor is invoked, so that
the user can further edit the commit message.
---fixup=<commit>::
- Construct a commit message for use with `rebase --autosquash`.
- The commit message will be the subject line from the specified
- commit with a prefix of "fixup! ". See linkgit:git-rebase[1]
- for details.
+--fixup=[(amend|reword):]<commit>::
+ Create a new commit which "fixes up" `<commit>` when applied with
+ `git rebase --autosquash`. Plain `--fixup=<commit>` creates a
+ "fixup!" commit which changes the content of `<commit>` but leaves
+ its log message untouched. `--fixup=amend:<commit>` is similar but
+ creates an "amend!" commit which also replaces the log message of
+ `<commit>` with the log message of the "amend!" commit.
+ `--fixup=reword:<commit>` creates an "amend!" commit which
+ replaces the log message of `<commit>` with its own log message
+ but makes no changes to the content of `<commit>`.
++
+The commit created by plain `--fixup=<commit>` has a subject
+composed of "fixup!" followed by the subject line from <commit>,
+and is recognized specially by `git rebase --autosquash`. The `-m`
+option may be used to supplement the log message of the created
+commit, but the additional commentary will be thrown away once the
+"fixup!" commit is squashed into `<commit>` by
+`git rebase --autosquash`.
++
+The commit created by `--fixup=amend:<commit>` is similar but its
+subject is instead prefixed with "amend!". The log message of
+<commit> is copied into the log message of the "amend!" commit and
+opened in an editor so it can be refined. When `git rebase
+--autosquash` squashes the "amend!" commit into `<commit>`, the
+log message of `<commit>` is replaced by the refined log message
+from the "amend!" commit. It is an error for the "amend!" commit's
+log message to be empty unless `--allow-empty-message` is
+specified.
++
+`--fixup=reword:<commit>` is shorthand for `--fixup=amend:<commit>
+--only`. It creates an "amend!" commit with only a log message
+(ignoring any changes staged in the index). When squashed by `git
+rebase --autosquash`, it replaces the log message of `<commit>`
+without making any other changes.
++
+Neither "fixup!" nor "amend!" commits change authorship of
+`<commit>` when applied by `git rebase --autosquash`.
+See linkgit:git-rebase[1] for details.
--squash=<commit>::
Construct a commit message for use with `rebase --autosquash`.
@@ -166,6 +200,17 @@ The `-m` option is mutually exclusive with `-c`, `-C`, and `-F`.
include::signoff-option.txt[]
+--trailer <token>[(=|:)<value>]::
+ Specify a (<token>, <value>) pair that should be applied as a
+ trailer. (e.g. `git commit --trailer "Signed-off-by:C O Mitter \
+ <committer@example.com>" --trailer "Helped-by:C O Mitter \
+ <committer@example.com>"` will add the "Signed-off-by" trailer
+ and the "Helped-by" trailer to the commit message.)
+ The `trailer.*` configuration variables
+ (linkgit:git-interpret-trailers[1]) can be used to define if
+ a duplicated trailer is omitted, where in the run of trailers
+ each trailer would appear, and other details.
+
-n::
--no-verify::
This option bypasses the pre-commit and commit-msg hooks.
diff --git a/Documentation/git-format-patch.txt b/Documentation/git-format-patch.txt
index 3e49bf2..911da18 100644
--- a/Documentation/git-format-patch.txt
+++ b/Documentation/git-format-patch.txt
@@ -36,11 +36,28 @@ SYNOPSIS
DESCRIPTION
-----------
-Prepare each commit with its patch in
-one file per commit, formatted to resemble UNIX mailbox format.
+Prepare each commit with its "patch" in
+one "message" per commit, formatted to resemble a UNIX mailbox.
The output of this command is convenient for e-mail submission or
for use with 'git am'.
+A "message" generated by the command consists of three parts:
+
+* A brief metadata header that begins with `From <commit>`
+ with a fixed `Mon Sep 17 00:00:00 2001` datestamp to help programs
+ like "file(1)" to recognize that the file is an output from this
+ command, fields that record the author identity, the author date,
+ and the title of the change (taken from the first paragraph of the
+ commit log message).
+
+* The second and subsequent paragraphs of the commit log message.
+
+* The "patch", which is the "diff -p --stat" output (see
+ linkgit:git-diff[1]) between the commit and its parent.
+
+The log message and the patch is separated by a line with a
+three-dash line.
+
There are two ways to specify which commits to operate on.
1. A single commit, <since>, specifies that the commits leading
@@ -221,6 +238,11 @@ populated with placeholder text.
`--subject-prefix` option) has ` v<n>` appended to it. E.g.
`--reroll-count=4` may produce `v4-0001-add-makefile.patch`
file that has "Subject: [PATCH v4 1/20] Add makefile" in it.
+ `<n>` does not have to be an integer (e.g. "--reroll-count=4.4",
+ or "--reroll-count=4rev2" are allowed), but the downside of
+ using such a reroll-count is that the range-diff/interdiff
+ with the previous version does not state exactly which
+ version the new interation is compared against.
--to=<email>::
Add a `To:` header to the email headers. This is in addition
diff --git a/Documentation/git-maintenance.txt b/Documentation/git-maintenance.txt
index 80ddd33..1e738ad 100644
--- a/Documentation/git-maintenance.txt
+++ b/Documentation/git-maintenance.txt
@@ -92,10 +92,8 @@ commit-graph::
prefetch::
The `prefetch` task updates the object directory with the latest
objects from all registered remotes. For each remote, a `git fetch`
- command is run. The refmap is custom to avoid updating local or remote
- branches (those in `refs/heads` or `refs/remotes`). Instead, the
- remote refs are stored in `refs/prefetch/<remote>/`. Also, tags are
- not updated.
+ command is run. The configured refspec is modified to place all
+ requested refs within `refs/prefetch/`. Also, tags are not updated.
+
This is done to avoid disrupting the remote-tracking branches. The end users
expect these refs to stay unmoved unless they initiate a fetch. With prefetch
diff --git a/Documentation/git-mergetool.txt b/Documentation/git-mergetool.txt
index 6b14702..e587c77 100644
--- a/Documentation/git-mergetool.txt
+++ b/Documentation/git-mergetool.txt
@@ -99,6 +99,10 @@ success of the resolution after the custom tool has exited.
(see linkgit:git-config[1]). To cancel `diff.orderFile`,
use `-O/dev/null`.
+CONFIGURATION
+-------------
+include::config/mergetool.txt[]
+
TEMPORARY FILES
---------------
`git mergetool` creates `*.orig` backup files while resolving merges.
diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt
index eb0caa0..ffd601b 100644
--- a/Documentation/git-multi-pack-index.txt
+++ b/Documentation/git-multi-pack-index.txt
@@ -9,7 +9,8 @@ git-multi-pack-index - Write and verify multi-pack-indexes
SYNOPSIS
--------
[verse]
-'git multi-pack-index' [--object-dir=<dir>] [--[no-]progress] <subcommand>
+'git multi-pack-index' [--object-dir=<dir>] [--[no-]progress]
+ [--preferred-pack=<pack>] <subcommand>
DESCRIPTION
-----------
@@ -30,7 +31,16 @@ OPTIONS
The following subcommands are available:
write::
- Write a new MIDX file.
+ Write a new MIDX file. The following options are available for
+ the `write` sub-command:
++
+--
+ --preferred-pack=<pack>::
+ Optionally specify the tie-breaking pack used when
+ multiple packs contain the same object. If not given,
+ ties are broken in favor of the pack with the lowest
+ mtime.
+--
verify::
Verify the contents of the MIDX file.
diff --git a/Documentation/git-pack-objects.txt b/Documentation/git-pack-objects.txt
index f85cb7e..25d9fbe 100644
--- a/Documentation/git-pack-objects.txt
+++ b/Documentation/git-pack-objects.txt
@@ -85,6 +85,16 @@ base-name::
reference was included in the resulting packfile. This
can be useful to send new tags to native Git clients.
+--stdin-packs::
+ Read the basenames of packfiles (e.g., `pack-1234abcd.pack`)
+ from the standard input, instead of object names or revision
+ arguments. The resulting pack contains all objects listed in the
+ included packs (those not beginning with `^`), excluding any
+ objects listed in the excluded packs (beginning with `^`).
++
+Incompatible with `--revs`, or options that imply `--revs` (such as
+`--all`), with the exception of `--unpacked`, which is compatible.
+
--window=<n>::
--depth=<n>::
These two options affect how the objects contained in
diff --git a/Documentation/git-push.txt b/Documentation/git-push.txt
index ab103c8..a953c7c 100644
--- a/Documentation/git-push.txt
+++ b/Documentation/git-push.txt
@@ -600,7 +600,7 @@ EXAMPLES
`git push origin`::
Without additional configuration, pushes the current branch to
- the configured upstream (`remote.origin.merge` configuration
+ the configured upstream (`branch.<name>.merge` configuration
variable) if it has the same name as the current branch, and
errors out without pushing otherwise.
+
diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt
index a0487b5..f08ae27 100644
--- a/Documentation/git-rebase.txt
+++ b/Documentation/git-rebase.txt
@@ -593,16 +593,17 @@ See also INCOMPATIBLE OPTIONS below.
--autosquash::
--no-autosquash::
- When the commit log message begins with "squash! ..." (or
- "fixup! ..."), and there is already a commit in the todo list that
- matches the same `...`, automatically modify the todo list of rebase
- -i so that the commit marked for squashing comes right after the
- commit to be modified, and change the action of the moved commit
- from `pick` to `squash` (or `fixup`). A commit matches the `...` if
- the commit subject matches, or if the `...` refers to the commit's
- hash. As a fall-back, partial matches of the commit subject work,
- too. The recommended way to create fixup/squash commits is by using
- the `--fixup`/`--squash` options of linkgit:git-commit[1].
+ When the commit log message begins with "squash! ..." or "fixup! ..."
+ or "amend! ...", and there is already a commit in the todo list that
+ matches the same `...`, automatically modify the todo list of
+ `rebase -i`, so that the commit marked for squashing comes right after
+ the commit to be modified, and change the action of the moved commit
+ from `pick` to `squash` or `fixup` or `fixup -C` respectively. A commit
+ matches the `...` if the commit subject matches, or if the `...` refers
+ to the commit's hash. As a fall-back, partial matches of the commit
+ subject work, too. The recommended way to create fixup/amend/squash
+ commits is by using the `--fixup`, `--fixup=amend:` or `--fixup=reword:`
+ and `--squash` options respectively of linkgit:git-commit[1].
+
If the `--autosquash` option is enabled by default using the
configuration variable `rebase.autoSquash`, this option can be
@@ -887,9 +888,17 @@ If you want to fold two or more commits into one, replace the command
"pick" for the second and subsequent commits with "squash" or "fixup".
If the commits had different authors, the folded commit will be
attributed to the author of the first commit. The suggested commit
-message for the folded commit is the concatenation of the commit
-messages of the first commit and of those with the "squash" command,
-but omits the commit messages of commits with the "fixup" command.
+message for the folded commit is the concatenation of the first
+commit's message with those identified by "squash" commands, omitting the
+messages of commits identified by "fixup" commands, unless "fixup -c"
+is used. In that case the suggested commit message is only the message
+of the "fixup -c" commit, and an editor is opened allowing you to edit
+the message. The contents (patch) of the "fixup -c" commit are still
+incorporated into the folded commit. If there is more than one "fixup -c"
+commit, the message from the final one is used. You can also use
+"fixup -C" to get the same behavior as "fixup -c" except without opening
+an editor.
+
'git rebase' will stop when "pick" has been replaced with "edit" or
when a command fails due to merge errors. When you are done editing
diff --git a/Documentation/git-repack.txt b/Documentation/git-repack.txt
index fbd4b4a..317d63c 100644
--- a/Documentation/git-repack.txt
+++ b/Documentation/git-repack.txt
@@ -165,6 +165,29 @@ depth is 4095.
Pass the `--delta-islands` option to `git-pack-objects`, see
linkgit:git-pack-objects[1].
+-g=<factor>::
+--geometric=<factor>::
+ Arrange resulting pack structure so that each successive pack
+ contains at least `<factor>` times the number of objects as the
+ next-largest pack.
++
+`git repack` ensures this by determining a "cut" of packfiles that need
+to be repacked into one in order to ensure a geometric progression. It
+picks the smallest set of packfiles such that as many of the larger
+packfiles (by count of objects contained in that pack) may be left
+intact.
++
+Unlike other repack modes, the set of objects to pack is determined
+uniquely by the set of packs being "rolled-up"; in other words, the
+packs determined to need to be combined in order to restore a geometric
+progression.
++
+When `--unpacked` is specified, loose objects are implicitly included in
+this "roll-up", without respect to their reachability. This is subject
+to change in the future. This option (implying a drastically different
+repack mode) is not guaranteed to work with all other combinations of
+option to `git repack`).
+
CONFIGURATION
-------------
diff --git a/Documentation/git-stash.txt b/Documentation/git-stash.txt
index f1197d6..a8c8c32 100644
--- a/Documentation/git-stash.txt
+++ b/Documentation/git-stash.txt
@@ -9,7 +9,7 @@ SYNOPSIS
--------
[verse]
'git stash' list [<log-options>]
-'git stash' show [<diff-options>] [<stash>]
+'git stash' show [-u|--include-untracked|--only-untracked] [<diff-options>] [<stash>]
'git stash' drop [-q|--quiet] [<stash>]
'git stash' ( pop | apply ) [--index] [-q|--quiet] [<stash>]
'git stash' branch <branchname> [<stash>]
@@ -83,7 +83,7 @@ stash@{1}: On master: 9cc0589... Add git-stash
The command takes options applicable to the 'git log'
command to control what is shown and how. See linkgit:git-log[1].
-show [<diff-options>] [<stash>]::
+show [-u|--include-untracked|--only-untracked] [<diff-options>] [<stash>]::
Show the changes recorded in the stash entry as a diff between the
stashed contents and the commit back when the stash entry was first
@@ -91,8 +91,8 @@ show [<diff-options>] [<stash>]::
By default, the command shows the diffstat, but it will accept any
format known to 'git diff' (e.g., `git stash show -p stash@{1}`
to view the second most recent entry in patch form).
- You can use stash.showStat and/or stash.showPatch config variables
- to change the default behavior.
+ You can use stash.showIncludeUntracked, stash.showStat, and
+ stash.showPatch config variables to change the default behavior.
pop [--index] [-q|--quiet] [<stash>]::
@@ -160,10 +160,18 @@ up with `git clean`.
-u::
--include-untracked::
- This option is only valid for `push` and `save` commands.
+--no-include-untracked::
+ When used with the `push` and `save` commands,
+ all untracked files are also stashed and then cleaned up with
+ `git clean`.
++
+When used with the `show` command, show the untracked files in the stash
+entry as part of the diff.
+
+--only-untracked::
+ This option is only valid for the `show` command.
+
-All untracked files are also stashed and then cleaned up with
-`git clean`.
+Show only the untracked files in the stash entry as part of the diff.
--index::
This option is only valid for `pop` and `apply` commands.
diff --git a/Documentation/gitattributes.txt b/Documentation/gitattributes.txt
index e84e104..cfcfa80 100644
--- a/Documentation/gitattributes.txt
+++ b/Documentation/gitattributes.txt
@@ -845,6 +845,8 @@ patterns are available:
- `rust` suitable for source code in the Rust language.
+- `scheme` suitable for source code in the Scheme language.
+
- `tex` suitable for source code for LaTeX documents.
@@ -1174,7 +1176,8 @@ tag then no replacement will be done. The placeholders are the same
as those for the option `--pretty=format:` of linkgit:git-log[1],
except that they need to be wrapped like this: `$Format:PLACEHOLDERS$`
in the file. E.g. the string `$Format:%H$` will be replaced by the
-commit hash.
+commit hash. However, only one `%(describe)` placeholder is expanded
+per archive to avoid denial-of-service attacks.
Packing objects
diff --git a/Documentation/gitdiffcore.txt b/Documentation/gitdiffcore.txt
index 1c72696..0d57f86 100644
--- a/Documentation/gitdiffcore.txt
+++ b/Documentation/gitdiffcore.txt
@@ -187,7 +187,7 @@ mark a file pair as a rename and stop considering other candidates for
better matches. At most, one comparison is done per file in this
preliminary pass; so if there are several remaining ext.txt files
throughout the directory hierarchy after exact rename detection, this
-preliminary step will be skipped for those files.
+preliminary step may be skipped for those files.
Note. When the "-C" option is used with `--find-copies-harder`
option, 'git diff-{asterisk}' commands feed unmodified filepairs to
diff --git a/Documentation/githooks.txt b/Documentation/githooks.txt
index 1f3b57d..b51959f 100644
--- a/Documentation/githooks.txt
+++ b/Documentation/githooks.txt
@@ -138,7 +138,7 @@ given); `template` (if a `-t` option was given or the
configuration option `commit.template` is set); `merge` (if the
commit is a merge or a `.git/MERGE_MSG` file exists); `squash`
(if a `.git/SQUASH_MSG` file exists); or `commit`, followed by
-a commit SHA-1 (if a `-c`, `-C` or `--amend` option was given).
+a commit object name (if a `-c`, `-C` or `--amend` option was given).
If the exit status is non-zero, `git commit` will abort.
@@ -231,19 +231,19 @@ named remote is not being used both values will be the same.
Information about what is to be pushed is provided on the hook's standard
input with lines of the form:
- <local ref> SP <local sha1> SP <remote ref> SP <remote sha1> LF
+ <local ref> SP <local object name> SP <remote ref> SP <remote object name> LF
For instance, if the command +git push origin master:foreign+ were run the
hook would receive a line like the following:
refs/heads/master 67890 refs/heads/foreign 12345
-although the full, 40-character SHA-1s would be supplied. If the foreign ref
-does not yet exist the `<remote SHA-1>` will be 40 `0`. If a ref is to be
-deleted, the `<local ref>` will be supplied as `(delete)` and the `<local
-SHA-1>` will be 40 `0`. If the local commit was specified by something other
-than a name which could be expanded (such as `HEAD~`, or a SHA-1) it will be
-supplied as it was originally given.
+although the full object name would be supplied. If the foreign ref does not
+yet exist the `<remote object name>` will be the all-zeroes object name. If a
+ref is to be deleted, the `<local ref>` will be supplied as `(delete)` and the
+`<local object name>` will be the all-zeroes object name. If the local commit
+was specified by something other than a name which could be expanded (such as
+`HEAD~`, or an object name) it will be supplied as it was originally given.
If this hook exits with a non-zero status, `git push` will abort without
pushing anything. Information about why the push is rejected may be sent
@@ -268,7 +268,7 @@ input a line of the format:
where `<old-value>` is the old object name stored in the ref,
`<new-value>` is the new object name to be stored in the ref and
`<ref-name>` is the full name of the ref.
-When creating a new ref, `<old-value>` is 40 `0`.
+When creating a new ref, `<old-value>` is the all-zeroes object name.
If the hook exits with non-zero status, none of the refs will be
updated. If the hook exits with zero, updating of individual refs can
@@ -473,7 +473,8 @@ reference-transaction
This hook is invoked by any Git command that performs reference
updates. It executes whenever a reference transaction is prepared,
-committed or aborted and may thus get called multiple times.
+committed or aborted and may thus get called multiple times. The hook
+does not cover symbolic references (but that may change in the future).
The hook takes exactly one argument, which is the current state the
given reference transaction is in:
@@ -492,6 +493,14 @@ receives on standard input a line of the format:
<old-value> SP <new-value> SP <ref-name> LF
+where `<old-value>` is the old object name passed into the reference
+transaction, `<new-value>` is the new object name to be stored in the
+ref and `<ref-name>` is the full name of the ref. When force updating
+the reference regardless of its current value or when the reference is
+to be created anew, `<old-value>` is the all-zeroes object name. To
+distinguish these cases, you can inspect the current value of
+`<ref-name>` via `git rev-parse`.
+
The exit status of the hook is ignored for any state except for the
"prepared" state. In the "prepared" state, a non-zero exit status will
cause the transaction to be aborted. The hook will not be called with
@@ -550,7 +559,7 @@ command-dependent arguments may be passed in the future.
The hook receives a list of the rewritten commits on stdin, in the
format
- <old-sha1> SP <new-sha1> [ SP <extra-info> ] LF
+ <old-object-name> SP <new-object-name> [ SP <extra-info> ] LF
The 'extra-info' is again command-dependent. If it is empty, the
preceding SP is also omitted. Currently, no commands pass any
@@ -566,7 +575,7 @@ rebase::
For the 'squash' and 'fixup' operation, all commits that were
squashed are listed as being rewritten to the squashed commit.
This means that there will be several lines sharing the same
- 'new-sha1'.
+ 'new-object-name'.
+
The commits are guaranteed to be listed in the order that they were
processed by rebase.
diff --git a/Documentation/gitignore.txt b/Documentation/gitignore.txt
index d47b1ae..5751603 100644
--- a/Documentation/gitignore.txt
+++ b/Documentation/gitignore.txt
@@ -153,7 +153,7 @@ EXAMPLES
--------
- The pattern `hello.*` matches any file or folder
- whose name begins with `hello`. If one wants to restrict
+ whose name begins with `hello.`. If one wants to restrict
this only to the directory and not in its subdirectories,
one can prepend the pattern with a slash, i.e. `/hello.*`;
the pattern now matches `hello.txt`, `hello.c` but not
diff --git a/Documentation/gitweb.conf.txt b/Documentation/gitweb.conf.txt
index 7963a79..34b1d6e 100644
--- a/Documentation/gitweb.conf.txt
+++ b/Documentation/gitweb.conf.txt
@@ -751,6 +751,17 @@ default font sizes or lineheights are changed (e.g. via adding extra
CSS stylesheet in `@stylesheets`), it may be appropriate to change
these values.
+email-privacy::
+ Redact e-mail addresses from the generated HTML, etc. content.
+ This obscures e-mail addresses retrieved from the author/committer
+ and comment sections of the Git log.
+ It is meant to hinder web crawlers that harvest and abuse addresses.
+ Such crawlers may not respect robots.txt.
+ Note that users and user tools also see the addresses as redacted.
+ If Gitweb is not the final step in a workflow then subsequent steps
+ may misbehave because of the redacted information they receive.
+ Disabled by default.
+
highlight::
Server-side syntax highlight support in "blob" view. It requires
`$highlight_bin` program to be available (see the description of
diff --git a/Documentation/howto/coordinate-embargoed-releases.txt b/Documentation/howto/coordinate-embargoed-releases.txt
new file mode 100644
index 0000000..601aae8
--- /dev/null
+++ b/Documentation/howto/coordinate-embargoed-releases.txt
@@ -0,0 +1,131 @@
+Content-type: text/asciidoc
+Abstract: When a critical vulnerability is discovered and fixed, we follow this
+ script to coordinate a public release.
+
+How we coordinate embargoed releases
+====================================
+
+To protect Git users from critical vulnerabilities, we do not just release
+fixed versions like regular maintenance releases. Instead, we coordinate
+releases with packagers, keeping the fixes under an embargo until the release
+date. That way, users will have a chance to upgrade on that date, no matter
+what Operating System or distribution they run.
+
+Open a Security Advisory draft
+------------------------------
+
+The first step is to https://github.com/git/git/security/advisories/new[open an
+advisory]. Technically, it is not necessary, but it is convenient and saves a
+bit of hassle. This advisory can also be used to obtain the CVE number and it
+will give us a private fork associated with it that can be used to collaborate
+on a fix.
+
+Release date of the embargoed version
+-------------------------------------
+
+If the vulnerability affects Windows users, we want to have our friends over at
+Visual Studio on board. This means we need to target a "Patch Tuesday" (i.e. a
+second Tuesday of the month), at the minimum three weeks from heads-up to
+coordinated release.
+
+If the vulnerability affects the server side, or can benefit from scans on the
+server side (i.e. if `git fsck` can detect an attack), it is important to give
+all involved Git repository hosting sites enough time to scan all of those
+repositories.
+
+Notifying the Linux distributions
+---------------------------------
+
+At most two weeks before release date, we need to send a notification to
+distros@vs.openwall.org, preferably less than 7 days before the release date.
+This will reach most (all?) Linux distributions. See an example below, and the
+guidelines for this mailing list at
+https://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists[here].
+
+Once the version has been published, we send a note about that to oss-security.
+As an example, see https://www.openwall.com/lists/oss-security/2019/12/13/1[the
+v2.24.1 mail];
+https://oss-security.openwall.org/wiki/mailing-lists/oss-security[Here] are
+their guidelines.
+
+The mail to oss-security should also describe the exploit, and give credit to
+the reporter(s): security researchers still receive too little respect for the
+invaluable service they provide, and public credit goes a long way to keep them
+paid by their respective organizations.
+
+Technically, describing any exploit can be delayed up to 7 days, but we usually
+refrain from doing that, including it right away.
+
+As a courtesy we typically attach a Git bundle (as `.tar.xz` because the list
+will drop `.bundle` attachments) in the mail to distros@ so that the involved
+parties can take care of integrating/backporting them. This bundle is typically
+created using a command like this:
+
+ git bundle create cve-xxx.bundle ^origin/master vA.B.C vD.E.F
+ tar cJvf cve-xxx.bundle.tar.xz cve-xxx.bundle
+
+Example mail to distros@vs.openwall.org
+---------------------------------------
+
+....
+To: distros@vs.openwall.org
+Cc: git-security@googlegroups.com, <other people involved in the report/fix>
+Subject: [vs] Upcoming Git security fix release
+
+Team,
+
+The Git project will release new versions on <date> at 10am Pacific Time or
+soon thereafter. I have attached a Git bundle (embedded in a `.tar.xz` to avoid
+it being dropped) which you can fetch into a clone of
+https://github.com/git/git via `git fetch --tags /path/to/cve-xxx.bundle`,
+containing the tags for versions <versions>.
+
+You can verify with `git tag -v <tag>` that the versions were signed by
+the Git maintainer, using the same GPG key as e.g. v2.24.0.
+
+Please use these tags to prepare `git` packages for your various
+distributions, using the appropriate tagged versions. The added test cases
+help verify the correctness.
+
+The addressed issues are:
+
+<list of CVEs with a short description, typically copy/pasted from Git's
+release notes, usually demo exploit(s), too>
+
+Credit for finding the vulnerability goes to <reporter>, credit for fixing
+it goes to <developer>.
+
+Thanks,
+<name>
+
+....
+
+Example mail to oss-security@lists.openwall.com
+-----------------------------------------------
+
+....
+To: oss-security@lists.openwall.com
+Cc: git-security@googlegroups.com, <other people involved in the report/fix>
+Subject: git: <copy from security advisory>
+
+Team,
+
+The Git project released new versions on <date>, addressing <CVE>.
+
+All supported platforms are affected in one way or another, and all Git
+versions all the way back to <version> are affected. The fixed versions are:
+<versions>.
+
+Link to the announcement: <link to lore.kernel.org/git>
+
+We highly recommend to upgrade.
+
+The addressed issues are:
+* <list of CVEs and their explanations, along with demo exploits>
+
+Credit for finding the vulnerability goes to <reporter>, credit for fixing
+it goes to <developer>.
+
+Thanks,
+<name>
+....
diff --git a/Documentation/pretty-formats.txt b/Documentation/pretty-formats.txt
index 6b59e28..4513306 100644
--- a/Documentation/pretty-formats.txt
+++ b/Documentation/pretty-formats.txt
@@ -208,6 +208,19 @@ The placeholders are:
'%cs':: committer date, short format (`YYYY-MM-DD`)
'%d':: ref names, like the --decorate option of linkgit:git-log[1]
'%D':: ref names without the " (", ")" wrapping.
+'%(describe[:options])':: human-readable name, like
+ linkgit:git-describe[1]; empty string for
+ undescribable commits. The `describe` string
+ may be followed by a colon and zero or more
+ comma-separated options. Descriptions can be
+ inconsistent when tags are added or removed at
+ the same time.
++
+** 'match=<pattern>': Only consider tags matching the given
+ `glob(7)` pattern, excluding the "refs/tags/" prefix.
+** 'exclude=<pattern>': Do not consider tags matching the given
+ `glob(7)` pattern, excluding the "refs/tags/" prefix.
+
'%S':: ref name given on the command line by which the commit was reached
(like `git log --source`), only works with `git log`
'%e':: encoding
diff --git a/Documentation/technical/api-error-handling.txt b/Documentation/technical/api-error-handling.txt
index ceeedd4..8be4f4d 100644
--- a/Documentation/technical/api-error-handling.txt
+++ b/Documentation/technical/api-error-handling.txt
@@ -1,8 +1,11 @@
Error reporting in git
======================
-`die`, `usage`, `error`, and `warning` report errors of various
-kinds.
+`BUG`, `die`, `usage`, `error`, and `warning` report errors of
+various kinds.
+
+- `BUG` is for failed internal assertions that should never happen,
+ i.e. a bug in git itself.
- `die` is for fatal application errors. It prints a message to
the user and exits with status 128.
@@ -20,6 +23,9 @@ kinds.
without running into too many problems. Like `error`, it
returns -1 after reporting the situation to the caller.
+These reports will be logged via the trace2 facility. See the "error"
+event in link:api-trace2.txt[trace2 API].
+
Customizable error handlers
---------------------------
diff --git a/Documentation/technical/api-simple-ipc.txt b/Documentation/technical/api-simple-ipc.txt
new file mode 100644
index 0000000..d79ad32
--- /dev/null
+++ b/Documentation/technical/api-simple-ipc.txt
@@ -0,0 +1,105 @@
+Simple-IPC API
+==============
+
+The Simple-IPC API is a collection of `ipc_` prefixed library routines
+and a basic communication protocol that allow an IPC-client process to
+send an application-specific IPC-request message to an IPC-server
+process and receive an application-specific IPC-response message.
+
+Communication occurs over a named pipe on Windows and a Unix domain
+socket on other platforms. IPC-clients and IPC-servers rendezvous at
+a previously agreed-to application-specific pathname (which is outside
+the scope of this design) that is local to the computer system.
+
+The IPC-server routines within the server application process create a
+thread pool to listen for connections and receive request messages
+from multiple concurrent IPC-clients. When received, these messages
+are dispatched up to the server application callbacks for handling.
+IPC-server routines then incrementally relay responses back to the
+IPC-client.
+
+The IPC-client routines within a client application process connect
+to the IPC-server and send a request message and wait for a response.
+When received, the response is returned back the caller.
+
+For example, the `fsmonitor--daemon` feature will be built as a server
+application on top of the IPC-server library routines. It will have
+threads watching for file system events and a thread pool waiting for
+client connections. Clients, such as `git status` will request a list
+of file system events since a point in time and the server will
+respond with a list of changed files and directories. The formats of
+the request and response are application-specific; the IPC-client and
+IPC-server routines treat them as opaque byte streams.
+
+
+Comparison with sub-process model
+---------------------------------
+
+The Simple-IPC mechanism differs from the existing `sub-process.c`
+model (Documentation/technical/long-running-process-protocol.txt) and
+used by applications like Git-LFS. In the LFS-style sub-process model
+the helper is started by the foreground process, communication happens
+via a pair of file descriptors bound to the stdin/stdout of the
+sub-process, the sub-process only serves the current foreground
+process, and the sub-process exits when the foreground process
+terminates.
+
+In the Simple-IPC model the server is a very long-running service. It
+can service many clients at the same time and has a private socket or
+named pipe connection to each active client. It might be started
+(on-demand) by the current client process or it might have been
+started by a previous client or by the OS at boot time. The server
+process is not associated with a terminal and it persists after
+clients terminate. Clients do not have access to the stdin/stdout of
+the server process and therefore must communicate over sockets or
+named pipes.
+
+
+Server startup and shutdown
+---------------------------
+
+How an application server based upon IPC-server is started is also
+outside the scope of the Simple-IPC design and is a property of the
+application using it. For example, the server might be started or
+restarted during routine maintenance operations, or it might be
+started as a system service during the system boot-up sequence, or it
+might be started on-demand by a foreground Git command when needed.
+
+Similarly, server shutdown is a property of the application using
+the simple-ipc routines. For example, the server might decide to
+shutdown when idle or only upon explicit request.
+
+
+Simple-IPC protocol
+-------------------
+
+The Simple-IPC protocol consists of a single request message from the
+client and an optional response message from the server. Both the
+client and server messages are unlimited in length and are terminated
+with a flush packet.
+
+The pkt-line routines (Documentation/technical/protocol-common.txt)
+are used to simplify buffer management during message generation,
+transmission, and reception. A flush packet is used to mark the end
+of the message. This allows the sender to incrementally generate and
+transmit the message. It allows the receiver to incrementally receive
+the message in chunks and to know when they have received the entire
+message.
+
+The actual byte format of the client request and server response
+messages are application specific. The IPC layer transmits and
+receives them as opaque byte buffers without any concern for the
+content within. It is the job of the calling application layer to
+understand the contents of the request and response messages.
+
+
+Summary
+-------
+
+Conceptually, the Simple-IPC protocol is similar to an HTTP REST
+request. Clients connect, make an application-specific and
+stateless request, receive an application-specific
+response, and disconnect. It is a one round trip facility for
+querying the server. The Simple-IPC routines hide the socket,
+named pipe, and thread pool details and allow the application
+layer to focus on the application at hand.
diff --git a/Documentation/technical/api-trace2.txt b/Documentation/technical/api-trace2.txt
index c65ffaf..3f52f98 100644
--- a/Documentation/technical/api-trace2.txt
+++ b/Documentation/technical/api-trace2.txt
@@ -465,7 +465,7 @@ completed.)
------------
`"error"`::
- This event is emitted when one of the `error()`, `die()`,
+ This event is emitted when one of the `BUG()`, `error()`, `die()`,
`warning()`, or `usage()` functions are called.
+
------------
diff --git a/Documentation/technical/multi-pack-index.txt b/Documentation/technical/multi-pack-index.txt
index e8e377a..fb68897 100644
--- a/Documentation/technical/multi-pack-index.txt
+++ b/Documentation/technical/multi-pack-index.txt
@@ -43,8 +43,9 @@ Design Details
a change in format.
- The MIDX keeps only one record per object ID. If an object appears
- in multiple packfiles, then the MIDX selects the copy in the most-
- recently modified packfile.
+ in multiple packfiles, then the MIDX selects the copy in the
+ preferred packfile, otherwise selecting from the most-recently
+ modified packfile.
- If there exist packfiles in the pack directory not registered in
the MIDX, then those packfiles are loaded into the `packed_git`
diff --git a/Documentation/technical/pack-format.txt b/Documentation/technical/pack-format.txt
index 1faa949..8d2f42f 100644
--- a/Documentation/technical/pack-format.txt
+++ b/Documentation/technical/pack-format.txt
@@ -379,3 +379,86 @@ CHUNK DATA:
TRAILER:
Index checksum of the above contents.
+
+== multi-pack-index reverse indexes
+
+Similar to the pack-based reverse index, the multi-pack index can also
+be used to generate a reverse index.
+
+Instead of mapping between offset, pack-, and index position, this
+reverse index maps between an object's position within the MIDX, and
+that object's position within a pseudo-pack that the MIDX describes
+(i.e., the ith entry of the multi-pack reverse index holds the MIDX
+position of ith object in pseudo-pack order).
+
+To clarify the difference between these orderings, consider a multi-pack
+reachability bitmap (which does not yet exist, but is what we are
+building towards here). Each bit needs to correspond to an object in the
+MIDX, and so we need an efficient mapping from bit position to MIDX
+position.
+
+One solution is to let bits occupy the same position in the oid-sorted
+index stored by the MIDX. But because oids are effectively random, their
+resulting reachability bitmaps would have no locality, and thus compress
+poorly. (This is the reason that single-pack bitmaps use the pack
+ordering, and not the .idx ordering, for the same purpose.)
+
+So we'd like to define an ordering for the whole MIDX based around
+pack ordering, which has far better locality (and thus compresses more
+efficiently). We can think of a pseudo-pack created by the concatenation
+of all of the packs in the MIDX. E.g., if we had a MIDX with three packs
+(a, b, c), with 10, 15, and 20 objects respectively, we can imagine an
+ordering of the objects like:
+
+ |a,0|a,1|...|a,9|b,0|b,1|...|b,14|c,0|c,1|...|c,19|
+
+where the ordering of the packs is defined by the MIDX's pack list,
+and then the ordering of objects within each pack is the same as the
+order in the actual packfile.
+
+Given the list of packs and their counts of objects, you can
+naïvely reconstruct that pseudo-pack ordering (e.g., the object at
+position 27 must be (c,1) because packs "a" and "b" consumed 25 of the
+slots). But there's a catch. Objects may be duplicated between packs, in
+which case the MIDX only stores one pointer to the object (and thus we'd
+want only one slot in the bitmap).
+
+Callers could handle duplicates themselves by reading objects in order
+of their bit-position, but that's linear in the number of objects, and
+much too expensive for ordinary bitmap lookups. Building a reverse index
+solves this, since it is the logical inverse of the index, and that
+index has already removed duplicates. But, building a reverse index on
+the fly can be expensive. Since we already have an on-disk format for
+pack-based reverse indexes, let's reuse it for the MIDX's pseudo-pack,
+too.
+
+Objects from the MIDX are ordered as follows to string together the
+pseudo-pack. Let `pack(o)` return the pack from which `o` was selected
+by the MIDX, and define an ordering of packs based on their numeric ID
+(as stored by the MIDX). Let `offset(o)` return the object offset of `o`
+within `pack(o)`. Then, compare `o1` and `o2` as follows:
+
+ - If one of `pack(o1)` and `pack(o2)` is preferred and the other
+ is not, then the preferred one sorts first.
++
+(This is a detail that allows the MIDX bitmap to determine which
+pack should be used by the pack-reuse mechanism, since it can ask
+the MIDX for the pack containing the object at bit position 0).
+
+ - If `pack(o1) ≠ pack(o2)`, then sort the two objects in descending
+ order based on the pack ID.
+
+ - Otherwise, `pack(o1) = pack(o2)`, and the objects are sorted in
+ pack-order (i.e., `o1` sorts ahead of `o2` exactly when `offset(o1)
+ < offset(o2)`).
+
+In short, a MIDX's pseudo-pack is the de-duplicated concatenation of
+objects in packs stored by the MIDX, laid out in pack order, and the
+packs arranged in MIDX order (with the preferred pack coming first).
+
+Finally, note that the MIDX's reverse index is not stored as a chunk in
+the multi-pack-index itself. This is done because the reverse index
+includes the checksum of the pack or MIDX to which it belongs, which
+makes it impossible to write in the MIDX. To avoid races when rewriting
+the MIDX, a MIDX reverse index includes the MIDX's checksum in its
+filename (e.g., `multi-pack-index-xyz.rev`).
diff --git a/Documentation/technical/reftable.txt b/Documentation/technical/reftable.txt
index 3ef169a..d7c3b64 100644
--- a/Documentation/technical/reftable.txt
+++ b/Documentation/technical/reftable.txt
@@ -1011,8 +1011,13 @@ reftable stack, reload `tables.list`, and delete any tables no longer mentioned
in `tables.list`.
Irregular program exit may still leave about unused files. In this case, a
-cleanup operation can read `tables.list`, note its modification timestamp, and
-delete any unreferenced `*.ref` files that are older.
+cleanup operation should proceed as follows:
+
+* take a lock `tables.list.lock` to prevent concurrent modifications
+* refresh the reftable stack, by reading `tables.list`
+* for each `*.ref` file, remove it if
+** it is not mentioned in `tables.list`, and
+** its max update_index is not beyond the max update_index of the stack
Alternatives considered
diff --git a/Documentation/user-manual.txt b/Documentation/user-manual.txt
index fd480b8..f9e54b8 100644
--- a/Documentation/user-manual.txt
+++ b/Documentation/user-manual.txt
@@ -1,5 +1,8 @@
= Git User Manual
+[preface]
+== Introduction
+
Git is a fast distributed revision control system.
This manual is designed to be readable by someone with basic UNIX
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 7b99fe5..d2ff917 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.31.0-rc1
+DEF_VER=v2.31.GIT
LF='
'
diff --git a/INSTALL b/INSTALL
index 8474ad0..66389ce 100644
--- a/INSTALL
+++ b/INSTALL
@@ -197,7 +197,9 @@ Issues of note:
Building and installing the pdf file additionally requires
dblatex. Version >= 0.2.7 is known to work.
- All formats require at least asciidoc 8.4.1.
+ All formats require at least asciidoc 8.4.1. Alternatively, you can
+ use Asciidoctor (requires Ruby) by passing USE_ASCIIDOCTOR=YesPlease
+ to make. You need at least Asciidoctor version 1.5.
There are also "make quick-install-doc", "make quick-install-man"
and "make quick-install-html" which install preformatted man pages
diff --git a/Makefile b/Makefile
index 89b1d53..bebc734 100644
--- a/Makefile
+++ b/Makefile
@@ -578,7 +578,9 @@ GENERATED_H =
EXTRA_CPPFLAGS =
FUZZ_OBJS =
FUZZ_PROGRAMS =
+GIT_OBJS =
LIB_OBJS =
+OBJECTS =
PROGRAM_OBJS =
PROGRAMS =
EXCLUDED_PROGRAMS =
@@ -587,6 +589,7 @@ SCRIPT_PYTHON =
SCRIPT_SH =
SCRIPT_LIB =
TEST_BUILTINS_OBJS =
+TEST_OBJS =
TEST_PROGRAMS_NEED_X =
THIRD_PARTY_SOURCES =
@@ -662,6 +665,8 @@ ETAGS_TARGET = TAGS
FUZZ_OBJS += fuzz-commit-graph.o
FUZZ_OBJS += fuzz-pack-headers.o
FUZZ_OBJS += fuzz-pack-idx.o
+.PHONY: fuzz-objs
+fuzz-objs: $(FUZZ_OBJS)
# Always build fuzz objects even if not testing, to prevent bit-rot.
all:: $(FUZZ_OBJS)
@@ -679,6 +684,8 @@ PROGRAM_OBJS += http-backend.o
PROGRAM_OBJS += imap-send.o
PROGRAM_OBJS += sh-i18n--envsubst.o
PROGRAM_OBJS += shell.o
+.PHONY: program-objs
+program-objs: $(PROGRAM_OBJS)
# Binary suffix, set to .exe for Windows builds
X =
@@ -686,6 +693,7 @@ X =
PROGRAMS += $(patsubst %.o,git-%$X,$(PROGRAM_OBJS))
TEST_BUILTINS_OBJS += test-advise.o
+TEST_BUILTINS_OBJS += test-bitmap.o
TEST_BUILTINS_OBJS += test-bloom.o
TEST_BUILTINS_OBJS += test-chmtime.o
TEST_BUILTINS_OBJS += test-config.o
@@ -737,6 +745,7 @@ TEST_BUILTINS_OBJS += test-serve-v2.o
TEST_BUILTINS_OBJS += test-sha1.o
TEST_BUILTINS_OBJS += test-sha256.o
TEST_BUILTINS_OBJS += test-sigchain.o
+TEST_BUILTINS_OBJS += test-simple-ipc.o
TEST_BUILTINS_OBJS += test-strcmp-offset.o
TEST_BUILTINS_OBJS += test-string-list.o
TEST_BUILTINS_OBJS += test-submodule-config.o
@@ -744,6 +753,7 @@ TEST_BUILTINS_OBJS += test-submodule-nested-repo-config.o
TEST_BUILTINS_OBJS += test-subprocess.o
TEST_BUILTINS_OBJS += test-trace2.o
TEST_BUILTINS_OBJS += test-urlmatch-normalization.o
+TEST_BUILTINS_OBJS += test-userdiff.o
TEST_BUILTINS_OBJS += test-wildmatch.o
TEST_BUILTINS_OBJS += test-windows-named-pipe.o
TEST_BUILTINS_OBJS += test-write-cache.o
@@ -1673,6 +1683,14 @@ ifdef NO_UNIX_SOCKETS
BASIC_CFLAGS += -DNO_UNIX_SOCKETS
else
LIB_OBJS += unix-socket.o
+ LIB_OBJS += unix-stream-server.o
+ LIB_OBJS += compat/simple-ipc/ipc-shared.o
+ LIB_OBJS += compat/simple-ipc/ipc-unix-socket.o
+endif
+
+ifdef USE_WIN32_IPC
+ LIB_OBJS += compat/simple-ipc/ipc-shared.o
+ LIB_OBJS += compat/simple-ipc/ipc-win32.o
endif
ifdef NO_ICONV
@@ -2187,13 +2205,13 @@ $(BUILT_INS): git$X
config-list.h: generate-configlist.sh
-config-list.h:
+config-list.h: Documentation/*config.txt Documentation/config/*.txt
$(QUIET_GEN)$(SHELL_PATH) ./generate-configlist.sh \
>$@+ && mv $@+ $@
command-list.h: generate-cmdlist.sh command-list.txt
-command-list.h: $(wildcard Documentation/git*.txt) Documentation/*config.txt Documentation/config/*.txt
+command-list.h: $(wildcard Documentation/git*.txt)
$(QUIET_GEN)$(SHELL_PATH) ./generate-cmdlist.sh \
$(patsubst %,--exclude-program %,$(EXCLUDED_PROGRAMS)) \
command-list.txt >$@+ && mv $@+ $@
@@ -2379,16 +2397,30 @@ XDIFF_OBJS += xdiff/xmerge.o
XDIFF_OBJS += xdiff/xpatience.o
XDIFF_OBJS += xdiff/xprepare.o
XDIFF_OBJS += xdiff/xutils.o
+.PHONY: xdiff-objs
+xdiff-objs: $(XDIFF_OBJS)
TEST_OBJS := $(patsubst %$X,%.o,$(TEST_PROGRAMS)) $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS))
-OBJECTS := $(LIB_OBJS) $(BUILTIN_OBJS) $(PROGRAM_OBJS) $(TEST_OBJS) \
- $(XDIFF_OBJS) \
- $(FUZZ_OBJS) \
- common-main.o \
- git.o
+.PHONY: test-objs
+test-objs: $(TEST_OBJS)
+
+GIT_OBJS += $(LIB_OBJS)
+GIT_OBJS += $(BUILTIN_OBJS)
+GIT_OBJS += common-main.o
+GIT_OBJS += git.o
+.PHONY: git-objs
+git-objs: $(GIT_OBJS)
+
+OBJECTS += $(GIT_OBJS)
+OBJECTS += $(PROGRAM_OBJS)
+OBJECTS += $(TEST_OBJS)
+OBJECTS += $(XDIFF_OBJS)
+OBJECTS += $(FUZZ_OBJS)
ifndef NO_CURL
OBJECTS += http.o http-walker.o remote-curl.o
endif
+.PHONY: objects
+objects: $(OBJECTS)
dep_files := $(foreach f,$(OBJECTS),$(dir $f).depend/$(notdir $f).d)
dep_dirs := $(addsuffix .depend,$(sort $(dir $(OBJECTS))))
@@ -2670,12 +2702,14 @@ FIND_SOURCE_FILES = ( \
)
$(ETAGS_TARGET): FORCE
- $(RM) $(ETAGS_TARGET)
- $(FIND_SOURCE_FILES) | xargs etags -a -o $(ETAGS_TARGET)
+ $(QUIET_GEN)$(RM) "$(ETAGS_TARGET)+" && \
+ $(FIND_SOURCE_FILES) | xargs etags -a -o "$(ETAGS_TARGET)+" && \
+ mv "$(ETAGS_TARGET)+" "$(ETAGS_TARGET)"
tags: FORCE
- $(RM) tags
- $(FIND_SOURCE_FILES) | xargs ctags -a
+ $(QUIET_GEN)$(RM) tags+ && \
+ $(FIND_SOURCE_FILES) | xargs ctags -a -o tags+ && \
+ mv tags+ tags
cscope:
$(RM) cscope*
@@ -3300,11 +3334,11 @@ cover_db_html: cover_db
# are not necessarily appropriate for general builds, and that vary greatly
# depending on the compiler version used.
#
-# An example command to build against libFuzzer from LLVM 4.0.0:
+# An example command to build against libFuzzer from LLVM 11.0.0:
#
# make CC=clang CXX=clang++ \
-# CFLAGS="-fsanitize-coverage=trace-pc-guard -fsanitize=address" \
-# LIB_FUZZING_ENGINE=/usr/lib/llvm-4.0/lib/libFuzzer.a \
+# CFLAGS="-fsanitize=fuzzer-no-link,address" \
+# LIB_FUZZING_ENGINE="-fsanitize=fuzzer" \
# fuzz-all
#
FUZZ_CXXFLAGS ?= $(CFLAGS)
diff --git a/RelNotes b/RelNotes
index 3324fc0..aece21e 120000
--- a/RelNotes
+++ b/RelNotes
@@ -1 +1 @@
-Documentation/RelNotes/2.31.0.txt \ No newline at end of file
+Documentation/RelNotes/2.32.0.txt \ No newline at end of file
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..c720c2a
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,51 @@
+# Security Policy
+
+## Reporting a vulnerability
+
+Please send a detailed mail to git-security@googlegroups.com to
+report vulnerabilities in Git.
+
+Even when unsure whether the bug in question is an exploitable
+vulnerability, it is recommended to send the report to
+git-security@googlegroups.com (and obviously not to discuss the
+issue anywhere else).
+
+Vulnerabilities are expected to be discussed _only_ on that
+list, and not in public, until the official announcement on the
+Git mailing list on the release date.
+
+Examples for details to include:
+
+- Ideally a short description (or a script) to demonstrate an
+ exploit.
+- The affected platforms and scenarios (the vulnerability might
+ only affect setups with case-sensitive file systems, for
+ example).
+- The name and affiliation of the security researchers who are
+ involved in the discovery, if any.
+- Whether the vulnerability has already been disclosed.
+- How long an embargo would be required to be safe.
+
+## Supported Versions
+
+There are no official "Long Term Support" versions in Git.
+Instead, the maintenance track (i.e. the versions based on the
+most recently published feature release, also known as ".0"
+version) sees occasional updates with bug fixes.
+
+Fixes to vulnerabilities are made for the maintenance track for
+the latest feature release and merged up to the in-development
+branches. The Git project makes no formal guarantee for any
+older maintenance tracks to receive updates. In practice,
+though, critical vulnerability fixes are applied not only to the
+most recent track, but to at least a couple more maintenance
+tracks.
+
+This is typically done by making the fix on the oldest and still
+relevant maintenance track, and merging it upwards to newer and
+newer maintenance tracks.
+
+For example, v2.24.1 was released to address a couple of
+[CVEs](https://cve.mitre.org/), and at the same time v2.14.6,
+v2.15.4, v2.16.6, v2.17.3, v2.18.2, v2.19.3, v2.20.2, v2.21.1,
+v2.22.2 and v2.23.1 were released.
diff --git a/add-interactive.c b/add-interactive.c
index 9b8cdb4..36ebdbd 100644
--- a/add-interactive.c
+++ b/add-interactive.c
@@ -413,7 +413,7 @@ struct file_item {
static void add_file_item(struct string_list *files, const char *name)
{
- struct file_item *item = xcalloc(sizeof(*item), 1);
+ struct file_item *item = xcalloc(1, sizeof(*item));
string_list_append(files, name)->util = item;
}
@@ -476,7 +476,7 @@ static void collect_changes_cb(struct diff_queue_struct *q,
add_file_item(s->files, name);
- entry = xcalloc(sizeof(*entry), 1);
+ CALLOC_ARRAY(entry, 1);
hashmap_entry_init(&entry->ent, hash);
entry->name = s->files->items[s->files->nr - 1].string;
entry->item = s->files->items[s->files->nr - 1].util;
@@ -1120,7 +1120,7 @@ int run_add_i(struct repository *r, const struct pathspec *ps)
int res = 0;
for (i = 0; i < ARRAY_SIZE(command_list); i++) {
- struct command_item *util = xcalloc(sizeof(*util), 1);
+ struct command_item *util = xcalloc(1, sizeof(*util));
util->command = command_list[i].command;
string_list_append(&commands.items, command_list[i].string)
->util = util;
diff --git a/apply.c b/apply.c
index 668b16e..8c5b298 100644
--- a/apply.c
+++ b/apply.c
@@ -21,6 +21,7 @@
#include "quote.h"
#include "rerere.h"
#include "apply.h"
+#include "entry.h"
struct gitdiff_data {
struct strbuf *root;
@@ -133,8 +134,6 @@ int check_apply_state(struct apply_state *state, int force_apply)
if (state->apply_with_reject && state->threeway)
return error(_("--reject and --3way cannot be used together."));
- if (state->cached && state->threeway)
- return error(_("--cached and --3way cannot be used together."));
if (state->threeway) {
if (is_not_gitdir)
return error(_("--3way outside a repository"));
@@ -1781,7 +1780,7 @@ static int parse_single_patch(struct apply_state *state,
struct fragment *fragment;
int len;
- fragment = xcalloc(1, sizeof(*fragment));
+ CALLOC_ARRAY(fragment, 1);
fragment->linenr = state->linenr;
len = parse_fragment(state, line, size, patch, fragment);
if (len <= 0) {
@@ -1959,7 +1958,7 @@ static struct fragment *parse_binary_hunk(struct apply_state *state,
size -= llen;
}
- frag = xcalloc(1, sizeof(*frag));
+ CALLOC_ARRAY(frag, 1);
frag->patch = inflate_it(data, hunk_size, origlen);
frag->free_patch = 1;
if (!frag->patch)
@@ -3569,10 +3568,10 @@ static int try_threeway(struct apply_state *state,
write_object_file("", 0, blob_type, &pre_oid);
else if (get_oid(patch->old_oid_prefix, &pre_oid) ||
read_blob_object(&buf, &pre_oid, patch->old_mode))
- return error(_("repository lacks the necessary blob to fall back on 3-way merge."));
+ return error(_("repository lacks the necessary blob to perform 3-way merge."));
if (state->apply_verbosity > verbosity_silent)
- fprintf(stderr, _("Falling back to three-way merge...\n"));
+ fprintf(stderr, _("Performing three-way merge...\n"));
img = strbuf_detach(&buf, &len);
prepare_image(&tmp_image, img, len, 1);
@@ -3604,7 +3603,7 @@ static int try_threeway(struct apply_state *state,
if (status < 0) {
if (state->apply_verbosity > verbosity_silent)
fprintf(stderr,
- _("Failed to fall back on three-way merge...\n"));
+ _("Failed to perform three-way merge...\n"));
return status;
}
@@ -3637,10 +3636,9 @@ static int apply_data(struct apply_state *state, struct patch *patch,
if (load_preimage(state, &image, patch, st, ce) < 0)
return -1;
- if (patch->direct_to_threeway ||
- apply_fragments(state, &image, patch) < 0) {
+ if (!state->threeway || try_threeway(state, &image, patch, st, ce) < 0) {
/* Note: with --reject, apply_fragments() returns 0 */
- if (!state->threeway || try_threeway(state, &image, patch, st, ce) < 0)
+ if (patch->direct_to_threeway || apply_fragments(state, &image, patch) < 0)
return -1;
}
patch->result = image.buf;
@@ -4646,7 +4644,12 @@ static int write_out_results(struct apply_state *state, struct patch *list)
}
string_list_clear(&cpath, 0);
- repo_rerere(state->repo, 0);
+ /*
+ * rerere relies on the partially merged result being in the working
+ * tree with conflict markers, but that isn't written with --cached.
+ */
+ if (!state->cached)
+ repo_rerere(state->repo, 0);
}
return errs;
@@ -4681,7 +4684,7 @@ static int apply_patch(struct apply_state *state,
struct patch *patch;
int nr;
- patch = xcalloc(1, sizeof(*patch));
+ CALLOC_ARRAY(patch, 1);
patch->inaccurate_eof = !!(options & APPLY_OPT_INACCURATE_EOF);
patch->recount = !!(options & APPLY_OPT_RECOUNT);
nr = parse_chunk(state, buf.buf + offset, buf.len - offset, patch);
@@ -5017,7 +5020,7 @@ int apply_parse_options(int argc, const char **argv,
OPT_BOOL(0, "apply", force_apply,
N_("also apply the patch (use with --stat/--summary/--check)")),
OPT_BOOL('3', "3way", &state->threeway,
- N_( "attempt three-way merge if a patch does not apply")),
+ N_( "attempt three-way merge, fall back on normal patch if that fails")),
OPT_FILENAME(0, "build-fake-ancestor", &state->fake_ancestor,
N_("build a temporary index based on embedded index information")),
/* Think twice before adding "--nul" synonym to this */
diff --git a/archive-tar.c b/archive-tar.c
index a971fdc..05d2455 100644
--- a/archive-tar.c
+++ b/archive-tar.c
@@ -371,7 +371,7 @@ static int tar_filter_config(const char *var, const char *value, void *data)
ar = find_tar_filter(name, namelen);
if (!ar) {
- ar = xcalloc(1, sizeof(*ar));
+ CALLOC_ARRAY(ar, 1);
ar->name = xmemdupz(name, namelen);
ar->write_archive = write_tar_filter_archive;
ar->flags = ARCHIVER_WANT_COMPRESSION_LEVELS |
diff --git a/archive.c b/archive.c
index 4921dc8..2956155 100644
--- a/archive.c
+++ b/archive.c
@@ -37,13 +37,10 @@ void init_archivers(void)
static void format_subst(const struct commit *commit,
const char *src, size_t len,
- struct strbuf *buf)
+ struct strbuf *buf, struct pretty_print_context *ctx)
{
char *to_free = NULL;
struct strbuf fmt = STRBUF_INIT;
- struct pretty_print_context ctx = {0};
- ctx.date_mode.type = DATE_NORMAL;
- ctx.abbrev = DEFAULT_ABBREV;
if (src == buf->buf)
to_free = strbuf_detach(buf, NULL);
@@ -61,7 +58,7 @@ static void format_subst(const struct commit *commit,
strbuf_add(&fmt, b + 8, c - b - 8);
strbuf_add(buf, src, b - src);
- format_commit_message(commit, fmt.buf, buf, &ctx);
+ format_commit_message(commit, fmt.buf, buf, ctx);
len -= c + 1 - src;
src = c + 1;
}
@@ -94,7 +91,7 @@ static void *object_file_to_archive(const struct archiver_args *args,
strbuf_attach(&buf, buffer, *sizep, *sizep + 1);
convert_to_working_tree(args->repo->index, path, buf.buf, buf.len, &buf, &meta);
if (commit)
- format_subst(commit, buf.buf, buf.len, &buf);
+ format_subst(commit, buf.buf, buf.len, &buf, args->pretty_ctx);
buffer = strbuf_detach(&buf, &size);
*sizep = size;
}
@@ -631,12 +628,19 @@ int write_archive(int argc, const char **argv, const char *prefix,
const char *name_hint, int remote)
{
const struct archiver *ar = NULL;
+ struct pretty_print_describe_status describe_status = {0};
+ struct pretty_print_context ctx = {0};
struct archiver_args args;
int rc;
git_config_get_bool("uploadarchive.allowunreachable", &remote_allow_unreachable);
git_config(git_default_config, NULL);
+ describe_status.max_invocations = 1;
+ ctx.date_mode.type = DATE_NORMAL;
+ ctx.abbrev = DEFAULT_ABBREV;
+ ctx.describe_status = &describe_status;
+ args.pretty_ctx = &ctx;
args.repo = repo;
args.prefix = prefix;
string_list_init(&args.extra_files, 1);
diff --git a/archive.h b/archive.h
index 33551b7..49fab71 100644
--- a/archive.h
+++ b/archive.h
@@ -5,6 +5,7 @@
#include "pathspec.h"
struct repository;
+struct pretty_print_context;
struct archiver_args {
struct repository *repo;
@@ -22,6 +23,7 @@ struct archiver_args {
unsigned int convert : 1;
int compression_level;
struct string_list extra_files;
+ struct pretty_print_context *pretty_ctx;
};
/* main api */
diff --git a/attr.c b/attr.c
index 8de5532..9e897e4 100644
--- a/attr.c
+++ b/attr.c
@@ -278,6 +278,10 @@ struct match_attr {
static const char blank[] = " \t\r\n";
+/* Flags usable in read_attr() and parse_attr_line() family of functions. */
+#define READ_ATTR_MACRO_OK (1<<0)
+#define READ_ATTR_NOFOLLOW (1<<1)
+
/*
* Parse a whitespace-delimited attribute state (i.e., "attr",
* "-attr", "!attr", or "attr=value") from the string starting at src.
@@ -331,7 +335,7 @@ static const char *parse_attr(const char *src, int lineno, const char *cp,
}
static struct match_attr *parse_attr_line(const char *line, const char *src,
- int lineno, int macro_ok)
+ int lineno, unsigned flags)
{
int namelen;
int num_attr, i;
@@ -355,7 +359,7 @@ static struct match_attr *parse_attr_line(const char *line, const char *src,
if (strlen(ATTRIBUTE_MACRO_PREFIX) < namelen &&
starts_with(name, ATTRIBUTE_MACRO_PREFIX)) {
- if (!macro_ok) {
+ if (!(flags & READ_ATTR_MACRO_OK)) {
fprintf_ln(stderr, _("%s not allowed: %s:%d"),
name, src, lineno);
goto fail_return;
@@ -569,7 +573,7 @@ struct attr_check *attr_check_initl(const char *one, ...)
check = attr_check_alloc();
check->nr = cnt;
check->alloc = cnt;
- check->items = xcalloc(cnt, sizeof(struct attr_check_item));
+ CALLOC_ARRAY(check->items, cnt);
check->items[0].attr = git_attr(one);
va_start(params, one);
@@ -653,11 +657,11 @@ static void handle_attr_line(struct attr_stack *res,
const char *line,
const char *src,
int lineno,
- int macro_ok)
+ unsigned flags)
{
struct match_attr *a;
- a = parse_attr_line(line, src, lineno, macro_ok);
+ a = parse_attr_line(line, src, lineno, flags);
if (!a)
return;
ALLOC_GROW(res->attrs, res->num_matches + 1, res->alloc);
@@ -670,9 +674,10 @@ static struct attr_stack *read_attr_from_array(const char **list)
const char *line;
int lineno = 0;
- res = xcalloc(1, sizeof(*res));
+ CALLOC_ARRAY(res, 1);
while ((line = *(list++)) != NULL)
- handle_attr_line(res, line, "[builtin]", ++lineno, 1);
+ handle_attr_line(res, line, "[builtin]", ++lineno,
+ READ_ATTR_MACRO_OK);
return res;
}
@@ -698,21 +703,31 @@ void git_attr_set_direction(enum git_attr_direction new_direction)
direction = new_direction;
}
-static struct attr_stack *read_attr_from_file(const char *path, int macro_ok)
+static struct attr_stack *read_attr_from_file(const char *path, unsigned flags)
{
- FILE *fp = fopen_or_warn(path, "r");
+ int fd;
+ FILE *fp;
struct attr_stack *res;
char buf[2048];
int lineno = 0;
- if (!fp)
+ if (flags & READ_ATTR_NOFOLLOW)
+ fd = open_nofollow(path, O_RDONLY);
+ else
+ fd = open(path, O_RDONLY);
+
+ if (fd < 0) {
+ warn_on_fopen_errors(path);
return NULL;
- res = xcalloc(1, sizeof(*res));
+ }
+ fp = xfdopen(fd, "r");
+
+ CALLOC_ARRAY(res, 1);
while (fgets(buf, sizeof(buf), fp)) {
char *bufp = buf;
if (!lineno)
skip_utf8_bom(&bufp, strlen(bufp));
- handle_attr_line(res, bufp, path, ++lineno, macro_ok);
+ handle_attr_line(res, bufp, path, ++lineno, flags);
}
fclose(fp);
return res;
@@ -720,7 +735,7 @@ static struct attr_stack *read_attr_from_file(const char *path, int macro_ok)
static struct attr_stack *read_attr_from_index(struct index_state *istate,
const char *path,
- int macro_ok)
+ unsigned flags)
{
struct attr_stack *res;
char *buf, *sp;
@@ -733,7 +748,7 @@ static struct attr_stack *read_attr_from_index(struct index_state *istate,
if (!buf)
return NULL;
- res = xcalloc(1, sizeof(*res));
+ CALLOC_ARRAY(res, 1);
for (sp = buf; *sp; ) {
char *ep;
int more;
@@ -741,7 +756,7 @@ static struct attr_stack *read_attr_from_index(struct index_state *istate,
ep = strchrnul(sp, '\n');
more = (*ep == '\n');
*ep = '\0';
- handle_attr_line(res, sp, path, ++lineno, macro_ok);
+ handle_attr_line(res, sp, path, ++lineno, flags);
sp = ep + more;
}
free(buf);
@@ -749,19 +764,19 @@ static struct attr_stack *read_attr_from_index(struct index_state *istate,
}
static struct attr_stack *read_attr(struct index_state *istate,
- const char *path, int macro_ok)
+ const char *path, unsigned flags)
{
struct attr_stack *res = NULL;
if (direction == GIT_ATTR_INDEX) {
- res = read_attr_from_index(istate, path, macro_ok);
+ res = read_attr_from_index(istate, path, flags);
} else if (!is_bare_repository()) {
if (direction == GIT_ATTR_CHECKOUT) {
- res = read_attr_from_index(istate, path, macro_ok);
+ res = read_attr_from_index(istate, path, flags);
if (!res)
- res = read_attr_from_file(path, macro_ok);
+ res = read_attr_from_file(path, flags);
} else if (direction == GIT_ATTR_CHECKIN) {
- res = read_attr_from_file(path, macro_ok);
+ res = read_attr_from_file(path, flags);
if (!res)
/*
* There is no checked out .gitattributes file
@@ -769,12 +784,12 @@ static struct attr_stack *read_attr(struct index_state *istate,
* We allow operation in a sparsely checked out
* work tree, so read from it.
*/
- res = read_attr_from_index(istate, path, macro_ok);
+ res = read_attr_from_index(istate, path, flags);
}
}
if (!res)
- res = xcalloc(1, sizeof(*res));
+ CALLOC_ARRAY(res, 1);
return res;
}
@@ -844,6 +859,7 @@ static void bootstrap_attr_stack(struct index_state *istate,
struct attr_stack **stack)
{
struct attr_stack *e;
+ unsigned flags = READ_ATTR_MACRO_OK;
if (*stack)
return;
@@ -854,27 +870,27 @@ static void bootstrap_attr_stack(struct index_state *istate,
/* system-wide frame */
if (git_attr_system()) {
- e = read_attr_from_file(git_etc_gitattributes(), 1);
+ e = read_attr_from_file(git_etc_gitattributes(), flags);
push_stack(stack, e, NULL, 0);
}
/* home directory */
if (get_home_gitattributes()) {
- e = read_attr_from_file(get_home_gitattributes(), 1);
+ e = read_attr_from_file(get_home_gitattributes(), flags);
push_stack(stack, e, NULL, 0);
}
/* root directory */
- e = read_attr(istate, GITATTRIBUTES_FILE, 1);
+ e = read_attr(istate, GITATTRIBUTES_FILE, flags | READ_ATTR_NOFOLLOW);
push_stack(stack, e, xstrdup(""), 0);
/* info frame */
if (startup_info->have_repository)
- e = read_attr_from_file(git_path_info_attributes(), 1);
+ e = read_attr_from_file(git_path_info_attributes(), flags);
else
e = NULL;
if (!e)
- e = xcalloc(1, sizeof(struct attr_stack));
+ CALLOC_ARRAY(e, 1);
push_stack(stack, e, NULL, 0);
}
@@ -956,7 +972,7 @@ static void prepare_attr_stack(struct index_state *istate,
strbuf_add(&pathbuf, path + pathbuf.len, (len - pathbuf.len));
strbuf_addf(&pathbuf, "/%s", GITATTRIBUTES_FILE);
- next = read_attr(istate, pathbuf.buf, 0);
+ next = read_attr(istate, pathbuf.buf, READ_ATTR_NOFOLLOW);
/* reset the pathbuf to not include "/.gitattributes" */
strbuf_setlen(&pathbuf, len);
diff --git a/bisect.c b/bisect.c
index ae48d19..af2863d 100644
--- a/bisect.c
+++ b/bisect.c
@@ -423,7 +423,7 @@ void find_bisection(struct commit_list **commit_list, int *reaches,
show_list("bisection 2 sorted", 0, nr, list);
*all = nr;
- weights = xcalloc(on_list, sizeof(*weights));
+ CALLOC_ARRAY(weights, on_list);
/* Do the real work of finding bisection commit. */
best = do_find_bisection(list, nr, weights, bisect_flags);
diff --git a/blame.c b/blame.c
index a5044fc..5018bb8 100644
--- a/blame.c
+++ b/blame.c
@@ -951,13 +951,13 @@ static int *fuzzy_find_matching_lines(struct blame_origin *parent,
max_search_distance_b = ((2 * max_search_distance_a + 1) * length_b
- 1) / length_a;
- result = xcalloc(sizeof(int), length_b);
- second_best_result = xcalloc(sizeof(int), length_b);
- certainties = xcalloc(sizeof(int), length_b);
+ CALLOC_ARRAY(result, length_b);
+ CALLOC_ARRAY(second_best_result, length_b);
+ CALLOC_ARRAY(certainties, length_b);
/* See get_similarity() for details of similarities. */
similarity_count = length_b * (max_search_distance_a * 2 + 1);
- similarities = xcalloc(sizeof(int), similarity_count);
+ CALLOC_ARRAY(similarities, similarity_count);
for (i = 0; i < length_b; ++i) {
result[i] = -1;
@@ -995,7 +995,7 @@ static void fill_origin_fingerprints(struct blame_origin *o)
return;
o->num_lines = find_line_starts(&line_starts, o->file.ptr,
o->file.size);
- o->fingerprints = xcalloc(sizeof(struct fingerprint), o->num_lines);
+ CALLOC_ARRAY(o->fingerprints, o->num_lines);
get_line_fingerprints(o->fingerprints, o->file.ptr, line_starts,
0, o->num_lines);
free(line_starts);
@@ -1853,8 +1853,7 @@ static void blame_chunk(struct blame_entry ***dstq, struct blame_entry ***srcq,
diffp = NULL;
if (ignore_diffs && same - tlno > 0) {
- line_blames = xcalloc(sizeof(struct blame_line_tracker),
- same - tlno);
+ CALLOC_ARRAY(line_blames, same - tlno);
guess_line_blames(parent, target, tlno, offset, same,
parent_len, line_blames);
}
@@ -2216,7 +2215,7 @@ static struct blame_list *setup_blame_list(struct blame_entry *unblamed,
for (e = unblamed, num_ents = 0; e; e = e->next)
num_ents++;
if (num_ents) {
- blame_list = xcalloc(num_ents, sizeof(struct blame_list));
+ CALLOC_ARRAY(blame_list, num_ents);
for (e = unblamed, i = 0; e; e = e->next)
blame_list[i++].ent = e;
}
@@ -2428,7 +2427,7 @@ static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin,
else if (num_sg < ARRAY_SIZE(sg_buf))
memset(sg_buf, 0, sizeof(sg_buf));
else
- sg_origin = xcalloc(num_sg, sizeof(*sg_origin));
+ CALLOC_ARRAY(sg_origin, num_sg);
/*
* The first pass looks for unrenamed path to optimize for
diff --git a/block-sha1/sha1.c b/block-sha1/sha1.c
index 8681031..1bb6e7c 100644
--- a/block-sha1/sha1.c
+++ b/block-sha1/sha1.c
@@ -70,7 +70,7 @@
* the input data, the next mix it from the 512-bit array.
*/
#define SHA_SRC(t) get_be32((unsigned char *) block + (t)*4)
-#define SHA_MIX(t) SHA_ROL(W((t)+13) ^ W((t)+8) ^ W((t)+2) ^ W(t), 1);
+#define SHA_MIX(t) SHA_ROL(W((t)+13) ^ W((t)+8) ^ W((t)+2) ^ W(t), 1)
#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
unsigned int TEMP = input(t); setW(t, TEMP); \
diff --git a/bloom.c b/bloom.c
index b176f28..52b8747 100644
--- a/bloom.c
+++ b/bloom.c
@@ -277,7 +277,7 @@ struct bloom_filter *get_or_compute_bloom_filter(struct repository *r,
*computed |= BLOOM_TRUNC_EMPTY;
filter->len = 1;
}
- filter->data = xcalloc(filter->len, sizeof(unsigned char));
+ CALLOC_ARRAY(filter->data, filter->len);
hashmap_for_each_entry(&pathmap, &iter, e, entry) {
struct bloom_key key;
diff --git a/branch.c b/branch.c
index 9c9dae1..b71a2de 100644
--- a/branch.c
+++ b/branch.c
@@ -344,6 +344,7 @@ void remove_merge_branch_state(struct repository *r)
unlink(git_path_merge_rr(r));
unlink(git_path_merge_msg(r));
unlink(git_path_merge_mode(r));
+ unlink(git_path_auto_merge(r));
save_autostash(git_path_merge_autostash(r));
}
diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c
index d69e133..1fdb7d9 100644
--- a/builtin/bisect--helper.c
+++ b/builtin/bisect--helper.c
@@ -874,12 +874,19 @@ static enum bisect_error bisect_state(struct bisect_terms *terms, const char **a
*/
for (; argc; argc--, argv++) {
+ struct commit *commit;
+
if (get_oid(*argv, &oid)){
error(_("Bad rev input: %s"), *argv);
oid_array_clear(&revs);
return BISECT_FAILED;
}
- oid_array_append(&revs, &oid);
+
+ commit = lookup_commit_reference(the_repository, &oid);
+ if (!commit)
+ die(_("Bad rev input (not a commit): %s"), *argv);
+
+ oid_array_append(&revs, &commit->object.oid);
}
if (strbuf_read_file(&buf, git_path_bisect_expected_rev(), 0) < the_hash_algo->hexsz ||
diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c
index 2c2936a..c9a3c71 100644
--- a/builtin/checkout-index.c
+++ b/builtin/checkout-index.c
@@ -11,6 +11,7 @@
#include "quote.h"
#include "cache-tree.h"
#include "parse-options.h"
+#include "entry.h"
#define CHECKOUT_ALL 4
static int nul_term_line;
diff --git a/builtin/checkout.c b/builtin/checkout.c
index d0dbe63..5bd9128 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -26,6 +26,7 @@
#include "unpack-trees.h"
#include "wt-status.h"
#include "xdiff-interface.h"
+#include "entry.h"
static const char * const checkout_usage[] = {
N_("git checkout [<options>] <branch>"),
diff --git a/builtin/clean.c b/builtin/clean.c
index 687ab47..995053b 100644
--- a/builtin/clean.c
+++ b/builtin/clean.c
@@ -623,7 +623,7 @@ static int *list_and_choose(struct menu_opts *opts, struct menu_stuff *stuff)
nr += chosen[i];
}
- result = xcalloc(st_add(nr, 1), sizeof(int));
+ CALLOC_ARRAY(result, st_add(nr, 1));
for (i = 0; i < stuff->nr && j < nr; i++) {
if (chosen[i])
result[j++] = i;
diff --git a/builtin/clone.c b/builtin/clone.c
index 51e844a..f6b0c48 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -50,6 +50,8 @@ static int option_no_checkout, option_bare, option_mirror, option_single_branch
static int option_local = -1, option_no_hardlinks, option_shared;
static int option_no_tags;
static int option_shallow_submodules;
+static int option_reject_shallow = -1; /* unspecified */
+static int config_reject_shallow = -1; /* unspecified */
static int deepen;
static char *option_template, *option_depth, *option_since;
static char *option_origin = NULL;
@@ -90,6 +92,8 @@ static struct option builtin_clone_options[] = {
OPT__VERBOSITY(&option_verbosity),
OPT_BOOL(0, "progress", &option_progress,
N_("force progress reporting")),
+ OPT_BOOL(0, "reject-shallow", &option_reject_shallow,
+ N_("don't clone shallow repository")),
OPT_BOOL('n', "no-checkout", &option_no_checkout,
N_("don't create a checkout")),
OPT_BOOL(0, "bare", &option_bare, N_("create a bare repository")),
@@ -858,6 +862,9 @@ static int git_clone_config(const char *k, const char *v, void *cb)
free(remote_name);
remote_name = xstrdup(v);
}
+ if (!strcmp(k, "clone.rejectshallow"))
+ config_reject_shallow = git_config_bool(k, v);
+
return git_default_config(k, v, cb);
}
@@ -963,11 +970,12 @@ static int path_exists(const char *path)
int cmd_clone(int argc, const char **argv, const char *prefix)
{
int is_bundle = 0, is_local;
+ int reject_shallow = 0;
const char *repo_name, *repo, *work_tree, *git_dir;
- char *path, *dir, *display_repo = NULL;
+ char *path = NULL, *dir, *display_repo = NULL;
int dest_exists, real_dest_exists = 0;
const struct ref *refs, *remote_head;
- const struct ref *remote_head_points_at;
+ struct ref *remote_head_points_at = NULL;
const struct ref *our_head_points_at;
struct ref *mapped_refs;
const struct ref *ref;
@@ -1017,9 +1025,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
repo_name = argv[0];
path = get_repo_path(repo_name, &is_bundle);
- if (path)
+ if (path) {
+ FREE_AND_NULL(path);
repo = absolute_pathdup(repo_name);
- else if (strchr(repo_name, ':')) {
+ } else if (strchr(repo_name, ':')) {
repo = repo_name;
display_repo = transport_anonymize_url(repo);
} else
@@ -1157,6 +1166,15 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
git_config(git_clone_config, NULL);
/*
+ * If option_reject_shallow is specified from CLI option,
+ * ignore config_reject_shallow from git_clone_config.
+ */
+ if (config_reject_shallow != -1)
+ reject_shallow = config_reject_shallow;
+ if (option_reject_shallow != -1)
+ reject_shallow = option_reject_shallow;
+
+ /*
* apply the remote name provided by --origin only after this second
* call to git_config, to ensure it overrides all config-based values.
*/
@@ -1216,6 +1234,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
if (filter_options.choice)
warning(_("--filter is ignored in local clones; use file:// instead."));
if (!access(mkpath("%s/shallow", path), F_OK)) {
+ if (reject_shallow)
+ die(_("source repository is shallow, reject to clone."));
if (option_local > 0)
warning(_("source repository is shallow, ignoring --local"));
is_local = 0;
@@ -1227,6 +1247,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
transport_set_option(transport, TRANS_OPT_KEEP, "yes");
+ if (reject_shallow)
+ transport_set_option(transport, TRANS_OPT_REJECT_SHALLOW, "1");
if (option_depth)
transport_set_option(transport, TRANS_OPT_DEPTH,
option_depth);
@@ -1393,6 +1415,11 @@ cleanup:
strbuf_release(&reflog_msg);
strbuf_release(&branch_top);
strbuf_release(&key);
+ free_refs(mapped_refs);
+ free_refs(remote_head_points_at);
+ free(dir);
+ free(path);
+ UNLEAK(repo);
junk_mode = JUNK_LEAVE_ALL;
strvec_clear(&transport_ls_refs_options.ref_prefixes);
diff --git a/builtin/column.c b/builtin/column.c
index e815e14..40d4b3b 100644
--- a/builtin/column.c
+++ b/builtin/column.c
@@ -27,10 +27,10 @@ int cmd_column(int argc, const char **argv, const char *prefix)
OPT_STRING(0, "command", &real_command, N_("name"), N_("lookup config vars")),
OPT_COLUMN(0, "mode", &colopts, N_("layout to use")),
OPT_INTEGER(0, "raw-mode", &colopts, N_("layout to use")),
- OPT_INTEGER(0, "width", &copts.width, N_("Maximum width")),
- OPT_STRING(0, "indent", &copts.indent, N_("string"), N_("Padding space on left border")),
- OPT_INTEGER(0, "nl", &copts.nl, N_("Padding space on right border")),
- OPT_INTEGER(0, "padding", &copts.padding, N_("Padding space between columns")),
+ OPT_INTEGER(0, "width", &copts.width, N_("maximum width")),
+ OPT_STRING(0, "indent", &copts.indent, N_("string"), N_("padding space on left border")),
+ OPT_INTEGER(0, "nl", &copts.nl, N_("padding space on right border")),
+ OPT_INTEGER(0, "padding", &copts.padding, N_("padding space between columns")),
OPT_END()
};
diff --git a/builtin/commit.c b/builtin/commit.c
index cf0c36d..190d215 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -105,7 +105,8 @@ static const char *template_file;
*/
static const char *author_message, *author_message_buffer;
static char *edit_message, *use_message;
-static char *fixup_message, *squash_message;
+static char *fixup_message, *fixup_commit, *squash_message;
+static const char *fixup_prefix;
static int all, also, interactive, patch_interactive, only, amend, signoff;
static int edit_flag = -1; /* unspecified */
static int quiet, verbose, no_verify, allow_empty, dry_run, renew_authorship;
@@ -113,6 +114,7 @@ static int config_commit_verbose = -1; /* unspecified */
static int no_post_rewrite, allow_empty_message, pathspec_file_nul;
static char *untracked_files_arg, *force_date, *ignore_submodule_arg, *ignored_arg;
static char *sign_commit, *pathspec_from_file;
+static struct strvec trailer_args = STRVEC_INIT;
/*
* The default commit message cleanup mode will remove the lines
@@ -131,6 +133,14 @@ static struct strbuf message = STRBUF_INIT;
static enum wt_status_format status_format = STATUS_FORMAT_UNSPECIFIED;
+static int opt_pass_trailer(const struct option *opt, const char *arg, int unset)
+{
+ BUG_ON_OPT_NEG(unset);
+
+ strvec_pushl(&trailer_args, "--trailer", arg, NULL);
+ return 0;
+}
+
static int opt_parse_porcelain(const struct option *opt, const char *arg, int unset)
{
enum wt_status_format *value = (enum wt_status_format *)opt->value;
@@ -359,7 +369,8 @@ static const char *prepare_index(const char **argv, const char *prefix,
die(_("--pathspec-file-nul requires --pathspec-from-file"));
}
- if (!pathspec.nr && (also || (only && !amend && !allow_empty)))
+ if (!pathspec.nr && (also || (only && !allow_empty &&
+ (!amend || (fixup_message && strcmp(fixup_prefix, "amend"))))))
die(_("No paths with --include/--only does not make sense."));
if (read_cache_preload(&pathspec) < 0)
@@ -683,6 +694,22 @@ static void adjust_comment_line_char(const struct strbuf *sb)
comment_line_char = *p;
}
+static void prepare_amend_commit(struct commit *commit, struct strbuf *sb,
+ struct pretty_print_context *ctx)
+{
+ const char *buffer, *subject, *fmt;
+
+ buffer = get_commit_buffer(commit, NULL);
+ find_commit_subject(buffer, &subject);
+ /*
+ * If we amend the 'amend!' commit then we don't want to
+ * duplicate the subject line.
+ */
+ fmt = starts_with(subject, "amend!") ? "%b" : "%B";
+ format_commit_message(commit, fmt, sb, ctx);
+ unuse_commit_buffer(commit, buffer);
+}
+
static int prepare_to_commit(const char *index_file, const char *prefix,
struct commit *current_head,
struct wt_status *s,
@@ -747,15 +774,33 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
} else if (fixup_message) {
struct pretty_print_context ctx = {0};
struct commit *commit;
- commit = lookup_commit_reference_by_name(fixup_message);
+ char *fmt;
+ commit = lookup_commit_reference_by_name(fixup_commit);
if (!commit)
- die(_("could not lookup commit %s"), fixup_message);
+ die(_("could not lookup commit %s"), fixup_commit);
ctx.output_encoding = get_commit_output_encoding();
- format_commit_message(commit, "fixup! %s\n\n",
- &sb, &ctx);
- if (have_option_m)
- strbuf_addbuf(&sb, &message);
+ fmt = xstrfmt("%s! %%s\n\n", fixup_prefix);
+ format_commit_message(commit, fmt, &sb, &ctx);
+ free(fmt);
hook_arg1 = "message";
+
+ /*
+ * Only `-m` commit message option is checked here, as
+ * it supports `--fixup` to append the commit message.
+ *
+ * The other commit message options `-c`/`-C`/`-F` are
+ * incompatible with all the forms of `--fixup` and
+ * have already errored out while parsing the `git commit`
+ * options.
+ */
+ if (have_option_m && !strcmp(fixup_prefix, "fixup"))
+ strbuf_addbuf(&sb, &message);
+
+ if (!strcmp(fixup_prefix, "amend")) {
+ if (have_option_m)
+ die(_("cannot combine -m with --fixup:%s"), fixup_message);
+ prepare_amend_commit(commit, &sb, &ctx);
+ }
} else if (!stat(git_path_merge_msg(the_repository), &statbuf)) {
size_t merge_msg_start;
@@ -962,6 +1007,18 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
fclose(s->fp);
+ if (trailer_args.nr) {
+ struct child_process run_trailer = CHILD_PROCESS_INIT;
+
+ strvec_pushl(&run_trailer.args, "interpret-trailers",
+ "--in-place", git_path_commit_editmsg(), NULL);
+ strvec_pushv(&run_trailer.args, trailer_args.v);
+ run_trailer.git_cmd = 1;
+ if (run_command(&run_trailer))
+ die(_("unable to pass trailers to --trailers"));
+ strvec_clear(&trailer_args);
+ }
+
/*
* Reject an attempt to record a non-merge empty commit without
* explicit --allow-empty. In the cherry-pick case, it may be
@@ -1156,6 +1213,19 @@ static void finalize_deferred_config(struct wt_status *s)
s->ahead_behind_flags = AHEAD_BEHIND_FULL;
}
+static void check_fixup_reword_options(int argc, const char *argv[]) {
+ if (whence != FROM_COMMIT) {
+ if (whence == FROM_MERGE)
+ die(_("You are in the middle of a merge -- cannot reword."));
+ else if (is_from_cherry_pick(whence))
+ die(_("You are in the middle of a cherry-pick -- cannot reword."));
+ }
+ if (argc)
+ die(_("cannot combine reword option of --fixup with path '%s'"), *argv);
+ if (patch_interactive || interactive || all || also || only)
+ die(_("reword option of --fixup is mutually exclusive with --patch/--interactive/--all/--include/--only"));
+}
+
static int parse_and_validate_options(int argc, const char *argv[],
const struct option *options,
const char * const usage[],
@@ -1174,7 +1244,7 @@ static int parse_and_validate_options(int argc, const char *argv[],
if (force_author && renew_authorship)
die(_("Using both --reset-author and --author does not make sense"));
- if (logfile || have_option_m || use_message || fixup_message)
+ if (logfile || have_option_m || use_message)
use_editor = 0;
if (0 <= edit_flag)
use_editor = edit_flag;
@@ -1231,6 +1301,42 @@ static int parse_and_validate_options(int argc, const char *argv[],
if (also + only + all + interactive > 1)
die(_("Only one of --include/--only/--all/--interactive/--patch can be used."));
+
+ if (fixup_message) {
+ /*
+ * We limit --fixup's suboptions to only alpha characters.
+ * If the first character after a run of alpha is colon,
+ * then the part before the colon may be a known suboption
+ * name like `amend` or `reword`, or a misspelt suboption
+ * name. In either case, we treat it as
+ * --fixup=<suboption>:<arg>.
+ *
+ * Otherwise, we are dealing with --fixup=<commit>.
+ */
+ char *p = fixup_message;
+ while (isalpha(*p))
+ p++;
+ if (p > fixup_message && *p == ':') {
+ *p = '\0';
+ fixup_commit = p + 1;
+ if (!strcmp("amend", fixup_message) ||
+ !strcmp("reword", fixup_message)) {
+ fixup_prefix = "amend";
+ allow_empty = 1;
+ if (*fixup_message == 'r') {
+ check_fixup_reword_options(argc, argv);
+ only = 1;
+ }
+ } else {
+ die(_("unknown option: --fixup=%s:%s"), fixup_message, fixup_commit);
+ }
+ } else {
+ fixup_commit = fixup_message;
+ fixup_prefix = "fixup";
+ use_editor = 0;
+ }
+ }
+
cleanup_mode = get_cleanup_mode(cleanup_arg, use_editor);
handle_untracked_files_arg(s);
@@ -1508,9 +1614,14 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
OPT_CALLBACK('m', "message", &message, N_("message"), N_("commit message"), opt_parse_m),
OPT_STRING('c', "reedit-message", &edit_message, N_("commit"), N_("reuse and edit message from specified commit")),
OPT_STRING('C', "reuse-message", &use_message, N_("commit"), N_("reuse message from specified commit")),
- OPT_STRING(0, "fixup", &fixup_message, N_("commit"), N_("use autosquash formatted message to fixup specified commit")),
+ /*
+ * TRANSLATORS: Leave "[(amend|reword):]" as-is,
+ * and only translate <commit>.
+ */
+ OPT_STRING(0, "fixup", &fixup_message, N_("[(amend|reword):]commit"), N_("use autosquash formatted message to fixup or amend/reword specified commit")),
OPT_STRING(0, "squash", &squash_message, N_("commit"), N_("use autosquash formatted message to squash specified commit")),
OPT_BOOL(0, "reset-author", &renew_authorship, N_("the commit is authored by me now (used with -C/-c/--amend)")),
+ OPT_CALLBACK_F(0, "trailer", NULL, N_("trailer"), N_("add custom trailer(s)"), PARSE_OPT_NONEG, opt_pass_trailer),
OPT_BOOL('s', "signoff", &signoff, N_("add a Signed-off-by trailer")),
OPT_FILENAME('t', "template", &template_file, N_("use specified template file")),
OPT_BOOL('e', "edit", &edit_flag, N_("force edit of commit")),
@@ -1667,6 +1778,19 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
exit(1);
}
+ if (fixup_message && starts_with(sb.buf, "amend! ") &&
+ !allow_empty_message) {
+ struct strbuf body = STRBUF_INIT;
+ size_t len = commit_subject_length(sb.buf);
+ strbuf_addstr(&body, sb.buf + len);
+ if (message_is_empty(&body, cleanup_mode)) {
+ rollback_index_files();
+ fprintf(stderr, _("Aborting commit due to empty commit message body.\n"));
+ exit(1);
+ }
+ strbuf_release(&body);
+ }
+
if (amend) {
const char *exclude_gpgsig[3] = { "gpgsig", "gpgsig-sha256", NULL };
extra = read_commit_extra_headers(current_head, exclude_gpgsig);
diff --git a/builtin/credential-cache--daemon.c b/builtin/credential-cache--daemon.c
index c61f123..4c6c89a 100644
--- a/builtin/credential-cache--daemon.c
+++ b/builtin/credential-cache--daemon.c
@@ -203,9 +203,10 @@ static int serve_cache_loop(int fd)
static void serve_cache(const char *socket_path, int debug)
{
+ struct unix_stream_listen_opts opts = UNIX_STREAM_LISTEN_OPTS_INIT;
int fd;
- fd = unix_stream_listen(socket_path);
+ fd = unix_stream_listen(socket_path, &opts);
if (fd < 0)
die_errno("unable to bind to '%s'", socket_path);
diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c
index 9b3f709..76a6ba3 100644
--- a/builtin/credential-cache.c
+++ b/builtin/credential-cache.c
@@ -14,7 +14,7 @@
static int send_request(const char *socket, const struct strbuf *out)
{
int got_data = 0;
- int fd = unix_stream_connect(socket);
+ int fd = unix_stream_connect(socket, 0);
if (fd < 0)
return -1;
diff --git a/builtin/difftool.c b/builtin/difftool.c
index 32c914d..0202a43 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -23,6 +23,7 @@
#include "lockfile.h"
#include "object-store.h"
#include "dir.h"
+#include "entry.h"
static int trust_exit_code;
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index dd4d09c..3afa81c 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -3322,7 +3322,7 @@ static void option_rewrite_submodules(const char *arg, struct string_list *list)
die(_("Expected format name:filename for submodule rewrite option"));
*f = '\0';
f++;
- ms = xcalloc(1, sizeof(*ms));
+ CALLOC_ARRAY(ms, 1);
fp = fopen(f, "r");
if (!fp)
@@ -3519,9 +3519,9 @@ int cmd_fast_import(int argc, const char **argv, const char *prefix)
alloc_objects(object_entry_alloc);
strbuf_init(&command_buf, 0);
- atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*));
- branch_table = xcalloc(branch_table_sz, sizeof(struct branch*));
- avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*));
+ CALLOC_ARRAY(atom_table, atom_table_sz);
+ CALLOC_ARRAY(branch_table, branch_table_sz);
+ CALLOC_ARRAY(avail_tree_table, avail_tree_table_sz);
marks = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
hashmap_init(&object_table, object_entry_hashcmp, NULL, 0);
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 0b90de8..97c4fe6 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -48,6 +48,7 @@ enum {
static int fetch_prune_config = -1; /* unspecified */
static int fetch_show_forced_updates = 1;
static uint64_t forced_updates_ms = 0;
+static int prefetch = 0;
static int prune = -1; /* unspecified */
#define PRUNE_BY_DEFAULT 0 /* do we prune by default? */
@@ -158,6 +159,8 @@ static struct option builtin_fetch_options[] = {
N_("do not fetch all tags (--no-tags)"), TAGS_UNSET),
OPT_INTEGER('j', "jobs", &max_jobs,
N_("number of submodules fetched in parallel")),
+ OPT_BOOL(0, "prefetch", &prefetch,
+ N_("modify the refspec to place all refs within refs/prefetch/")),
OPT_BOOL('p', "prune", &prune,
N_("prune remote-tracking branches no longer on remote")),
OPT_BOOL('P', "prune-tags", &prune_tags,
@@ -436,6 +439,56 @@ static void find_non_local_tags(const struct ref *refs,
oidset_clear(&fetch_oids);
}
+static void filter_prefetch_refspec(struct refspec *rs)
+{
+ int i;
+
+ if (!prefetch)
+ return;
+
+ for (i = 0; i < rs->nr; i++) {
+ struct strbuf new_dst = STRBUF_INIT;
+ char *old_dst;
+ const char *sub = NULL;
+
+ if (rs->items[i].negative)
+ continue;
+ if (!rs->items[i].dst ||
+ (rs->items[i].src &&
+ !strncmp(rs->items[i].src, "refs/tags/", 10))) {
+ int j;
+
+ free(rs->items[i].src);
+ free(rs->items[i].dst);
+
+ for (j = i + 1; j < rs->nr; j++) {
+ rs->items[j - 1] = rs->items[j];
+ rs->raw[j - 1] = rs->raw[j];
+ }
+ rs->nr--;
+ i--;
+ continue;
+ }
+
+ old_dst = rs->items[i].dst;
+ strbuf_addstr(&new_dst, "refs/prefetch/");
+
+ /*
+ * If old_dst starts with "refs/", then place
+ * sub after that prefix. Otherwise, start at
+ * the beginning of the string.
+ */
+ if (!skip_prefix(old_dst, "refs/", &sub))
+ sub = old_dst;
+ strbuf_addstr(&new_dst, sub);
+
+ rs->items[i].dst = strbuf_detach(&new_dst, NULL);
+ rs->items[i].force = 1;
+
+ free(old_dst);
+ }
+}
+
static struct ref *get_ref_map(struct remote *remote,
const struct ref *remote_refs,
struct refspec *rs,
@@ -452,6 +505,10 @@ static struct ref *get_ref_map(struct remote *remote,
struct hashmap existing_refs;
int existing_refs_populated = 0;
+ filter_prefetch_refspec(rs);
+ if (remote)
+ filter_prefetch_refspec(&remote->fetch);
+
if (rs->nr) {
struct refspec *fetch_refspec;
@@ -520,7 +577,7 @@ static struct ref *get_ref_map(struct remote *remote,
if (has_merge &&
!strcmp(branch->remote_name, remote->name))
add_merge_config(&ref_map, remote_refs, branch, &tail);
- } else {
+ } else if (!prefetch) {
ref_map = get_remote_ref(remote_refs, "HEAD");
if (!ref_map)
die(_("Couldn't find remote ref HEAD"));
diff --git a/builtin/fsck.c b/builtin/fsck.c
index 4d7f5c6..87a99b0 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -71,11 +71,6 @@ static const char *printable_type(const struct object_id *oid,
return ret;
}
-static int fsck_config(const char *var, const char *value, void *cb)
-{
- return fsck_config_internal(var, value, cb, &fsck_obj_options);
-}
-
static int objerror(struct object *obj, const char *err)
{
errors_found |= ERROR_OBJECT;
@@ -89,7 +84,9 @@ static int objerror(struct object *obj, const char *err)
static int fsck_error_func(struct fsck_options *o,
const struct object_id *oid,
enum object_type object_type,
- int msg_type, const char *message)
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
{
switch (msg_type) {
case FSCK_WARN:
@@ -197,7 +194,8 @@ static int traverse_reachable(void)
return !!result;
}
-static int mark_used(struct object *obj, int type, void *data, struct fsck_options *options)
+static int mark_used(struct object *obj, enum object_type object_type,
+ void *data, struct fsck_options *options)
{
if (!obj)
return 1;
@@ -727,7 +725,7 @@ static int fsck_cache_tree(struct cache_tree *it)
static void mark_object_for_connectivity(const struct object_id *oid)
{
- struct object *obj = lookup_unknown_object(oid);
+ struct object *obj = lookup_unknown_object(the_repository, oid);
obj->flags |= HAS_OBJ;
}
@@ -803,7 +801,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
if (name_objects)
fsck_enable_object_names(&fsck_walk_options);
- git_config(fsck_config, NULL);
+ git_config(git_fsck_config, &fsck_obj_options);
if (connectivity_only) {
for_each_loose_object(mark_loose_for_connectivity, NULL, 0);
diff --git a/builtin/gc.c b/builtin/gc.c
index ef7226d..98a8031 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -873,55 +873,40 @@ static int maintenance_task_commit_graph(struct maintenance_run_opts *opts)
return 0;
}
-static int fetch_remote(const char *remote, struct maintenance_run_opts *opts)
+static int fetch_remote(struct remote *remote, void *cbdata)
{
+ struct maintenance_run_opts *opts = cbdata;
struct child_process child = CHILD_PROCESS_INIT;
+ if (remote->skip_default_update)
+ return 0;
+
child.git_cmd = 1;
- strvec_pushl(&child.args, "fetch", remote, "--prune", "--no-tags",
+ strvec_pushl(&child.args, "fetch", remote->name,
+ "--prefetch", "--prune", "--no-tags",
"--no-write-fetch-head", "--recurse-submodules=no",
- "--refmap=", NULL);
+ NULL);
if (opts->quiet)
strvec_push(&child.args, "--quiet");
- strvec_pushf(&child.args, "+refs/heads/*:refs/prefetch/%s/*", remote);
-
return !!run_command(&child);
}
-static int append_remote(struct remote *remote, void *cbdata)
-{
- struct string_list *remotes = (struct string_list *)cbdata;
-
- string_list_append(remotes, remote->name);
- return 0;
-}
-
static int maintenance_task_prefetch(struct maintenance_run_opts *opts)
{
- int result = 0;
- struct string_list_item *item;
- struct string_list remotes = STRING_LIST_INIT_DUP;
-
git_config_set_multivar_gently("log.excludedecoration",
"refs/prefetch/",
"refs/prefetch/",
CONFIG_FLAGS_FIXED_VALUE |
CONFIG_FLAGS_MULTI_REPLACE);
- if (for_each_remote(append_remote, &remotes)) {
- error(_("failed to fill remotes"));
- result = 1;
- goto cleanup;
+ if (for_each_remote(fetch_remote, opts)) {
+ error(_("failed to prefetch remotes"));
+ return 1;
}
- for_each_string_list_item(item, &remotes)
- result |= fetch_remote(item->string, opts);
-
-cleanup:
- string_list_clear(&remotes, 0);
- return result;
+ return 0;
}
static int maintenance_task_gc(struct maintenance_run_opts *opts)
diff --git a/builtin/grep.c b/builtin/grep.c
index c2d4041..b71b4a2 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -211,7 +211,7 @@ static void start_threads(struct grep_opt *opt)
strbuf_init(&todo[i].out, 0);
}
- threads = xcalloc(num_threads, sizeof(*threads));
+ CALLOC_ARRAY(threads, num_threads);
for (i = 0; i < num_threads; i++) {
int err;
struct grep_opt *o = grep_opt_dup(opt);
@@ -1183,6 +1183,5 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
run_pager(&opt, prefix);
clear_pathspec(&pathspec);
free_grep_patterns(&opt);
- grep_destroy();
return !hit;
}
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index bad5748..15507b5 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -120,7 +120,7 @@ static int nr_threads;
static int from_stdin;
static int strict;
static int do_fsck_object;
-static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
+static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
static int verbose;
static int show_resolving_progress;
static int show_stat;
@@ -185,7 +185,7 @@ static void init_thread(void)
if (show_stat)
pthread_mutex_init(&deepest_delta_mutex, NULL);
pthread_key_create(&key, NULL);
- thread_data = xcalloc(nr_threads, sizeof(*thread_data));
+ CALLOC_ARRAY(thread_data, nr_threads);
for (i = 0; i < nr_threads; i++) {
thread_data[i].pack_fd = open(curr_pack, O_RDONLY);
if (thread_data[i].pack_fd == -1)
@@ -212,7 +212,8 @@ static void cleanup_thread(void)
free(thread_data);
}
-static int mark_link(struct object *obj, int type, void *data, struct fsck_options *options)
+static int mark_link(struct object *obj, enum object_type type,
+ void *data, struct fsck_options *options)
{
if (!obj)
return -1;
@@ -1674,7 +1675,7 @@ static void show_pack_info(int stat_only)
unsigned long *chain_histogram = NULL;
if (deepest_delta)
- chain_histogram = xcalloc(deepest_delta, sizeof(unsigned long));
+ CALLOC_ARRAY(chain_histogram, deepest_delta);
for (i = 0; i < nr_objects; i++) {
struct object_entry *obj = &objects[i];
@@ -1712,22 +1713,6 @@ static void show_pack_info(int stat_only)
}
}
-static int print_dangling_gitmodules(struct fsck_options *o,
- const struct object_id *oid,
- enum object_type object_type,
- int msg_type, const char *message)
-{
- /*
- * NEEDSWORK: Plumb the MSG_ID (from fsck.c) here and use it
- * instead of relying on this string check.
- */
- if (starts_with(message, "gitmodulesMissing")) {
- printf("%s\n", oid_to_hex(oid));
- return 0;
- }
- return fsck_error_function(o, oid, object_type, msg_type, message);
-}
-
int cmd_index_pack(int argc, const char **argv, const char *prefix)
{
int i, fix_thin_pack = 0, verify = 0, stat_only = 0, rev_index;
@@ -1912,10 +1897,10 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
curr_pack = open_pack_file(pack_name);
parse_pack_header();
- objects = xcalloc(st_add(nr_objects, 1), sizeof(struct object_entry));
+ CALLOC_ARRAY(objects, st_add(nr_objects, 1));
if (show_stat)
- obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat));
- ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry));
+ CALLOC_ARRAY(obj_stat, st_add(nr_objects, 1));
+ CALLOC_ARRAY(ofs_deltas, nr_objects);
parse_pack_objects(pack_hash);
if (report_end_of_input)
write_in_full(2, "\0", 1);
@@ -1948,13 +1933,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
else
close(input_fd);
- if (do_fsck_object) {
- struct fsck_options fo = fsck_options;
-
- fo.error_func = print_dangling_gitmodules;
- if (fsck_finish(&fo))
- die(_("fsck error in pack objects"));
- }
+ if (do_fsck_object && fsck_finish(&fsck_options))
+ die(_("fsck error in pack objects"));
free(objects);
strbuf_release(&index_name_buf);
diff --git a/builtin/init-db.c b/builtin/init-db.c
index dcc45be..c19b35f 100644
--- a/builtin/init-db.c
+++ b/builtin/init-db.c
@@ -25,7 +25,6 @@
static int init_is_bare_repository = 0;
static int init_shared_repository = -1;
-static const char *init_db_template_dir;
static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
DIR *dir)
@@ -94,7 +93,7 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
}
}
-static void copy_templates(const char *template_dir)
+static void copy_templates(const char *template_dir, const char *init_template_dir)
{
struct strbuf path = STRBUF_INIT;
struct strbuf template_path = STRBUF_INIT;
@@ -107,7 +106,7 @@ static void copy_templates(const char *template_dir)
if (!template_dir)
template_dir = getenv(TEMPLATE_DIR_ENVIRONMENT);
if (!template_dir)
- template_dir = init_db_template_dir;
+ template_dir = init_template_dir;
if (!template_dir)
template_dir = to_free = system_path(DEFAULT_GIT_TEMPLATE_DIR);
if (!template_dir[0]) {
@@ -154,17 +153,6 @@ free_return:
clear_repository_format(&template_format);
}
-static int git_init_db_config(const char *k, const char *v, void *cb)
-{
- if (!strcmp(k, "init.templatedir"))
- return git_config_pathname(&init_db_template_dir, k, v);
-
- if (starts_with(k, "core."))
- return platform_core_config(k, v, cb);
-
- return 0;
-}
-
/*
* If the git_dir is not directly inside the working tree, then git will not
* find it by default, and we need to set the worktree explicitly.
@@ -212,10 +200,8 @@ static int create_default_files(const char *template_path,
int reinit;
int filemode;
struct strbuf err = STRBUF_INIT;
-
- /* Just look for `init.templatedir` */
- init_db_template_dir = NULL; /* re-set in case it was set before */
- git_config(git_init_db_config, NULL);
+ const char *init_template_dir = NULL;
+ const char *work_tree = get_git_work_tree();
/*
* First copy the templates -- we might have the default
@@ -226,7 +212,8 @@ static int create_default_files(const char *template_path,
* values (since we've just potentially changed what's available on
* disk).
*/
- copy_templates(template_path);
+ git_config_get_value("init.templatedir", &init_template_dir);
+ copy_templates(template_path, init_template_dir);
git_config_clear();
reset_shared_repository();
git_config(git_default_config, NULL);
@@ -235,7 +222,7 @@ static int create_default_files(const char *template_path,
* We must make sure command-line options continue to override any
* values we might have just re-read from the config.
*/
- is_bare_repository_cfg = init_is_bare_repository;
+ is_bare_repository_cfg = init_is_bare_repository || !work_tree;
if (init_shared_repository != -1)
set_shared_repository(init_shared_repository);
@@ -299,7 +286,6 @@ static int create_default_files(const char *template_path,
if (is_bare_repository())
git_config_set("core.bare", "true");
else {
- const char *work_tree = get_git_work_tree();
git_config_set("core.bare", "false");
/* allow template config file to override the default */
if (log_all_ref_updates == LOG_REFS_UNSET)
@@ -422,8 +408,8 @@ int init_db(const char *git_dir, const char *real_git_dir,
}
startup_info->have_repository = 1;
- /* Just look for `core.hidedotfiles` */
- git_config(git_init_db_config, NULL);
+ /* Ensure `core.hidedotfiles` is processed */
+ git_config(platform_core_config, NULL);
safe_create_dir(git_dir, 0);
@@ -575,8 +561,10 @@ int cmd_init_db(int argc, const char **argv, const char *prefix)
if (real_git_dir && !is_absolute_path(real_git_dir))
real_git_dir = real_pathdup(real_git_dir, 1);
- if (template_dir && *template_dir && !is_absolute_path(template_dir))
+ if (template_dir && *template_dir && !is_absolute_path(template_dir)) {
template_dir = absolute_pathdup(template_dir);
+ UNLEAK(template_dir);
+ }
if (argc == 1) {
int mkdir_tried = 0;
diff --git a/builtin/log.c b/builtin/log.c
index 980de59..8acd285 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -1662,13 +1662,19 @@ static void print_bases(struct base_tree_info *bases, FILE *file)
oidclr(&bases->base_commit);
}
-static const char *diff_title(struct strbuf *sb, int reroll_count,
- const char *generic, const char *rerolled)
+static const char *diff_title(struct strbuf *sb,
+ const char *reroll_count,
+ const char *generic,
+ const char *rerolled)
{
- if (reroll_count <= 0)
+ int v;
+
+ /* RFC may be v0, so allow -v1 to diff against v0 */
+ if (reroll_count && !strtol_i(reroll_count, 10, &v) &&
+ v >= 1)
+ strbuf_addf(sb, rerolled, v - 1);
+ else
strbuf_addstr(sb, generic);
- else /* RFC may be v0, so allow -v1 to diff against v0 */
- strbuf_addf(sb, rerolled, reroll_count - 1);
return sb->buf;
}
@@ -1717,7 +1723,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
struct strbuf buf = STRBUF_INIT;
int use_patch_format = 0;
int quiet = 0;
- int reroll_count = -1;
+ const char *reroll_count = NULL;
char *cover_from_description_arg = NULL;
char *branch_name = NULL;
char *base_commit = NULL;
@@ -1751,7 +1757,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
N_("use <sfx> instead of '.patch'")),
OPT_INTEGER(0, "start-number", &start_number,
N_("start numbering patches at <n> instead of 1")),
- OPT_INTEGER('v', "reroll-count", &reroll_count,
+ OPT_STRING('v', "reroll-count", &reroll_count, N_("reroll-count"),
N_("mark the series as Nth re-roll")),
OPT_INTEGER(0, "filename-max-length", &fmt_patch_name_max,
N_("max length of output filename")),
@@ -1862,9 +1868,10 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
if (cover_from_description_arg)
cover_from_description_mode = parse_cover_from_description(cover_from_description_arg);
- if (0 < reroll_count) {
+ if (reroll_count) {
struct strbuf sprefix = STRBUF_INIT;
- strbuf_addf(&sprefix, "%s v%d",
+
+ strbuf_addf(&sprefix, "%s v%s",
rev.subject_prefix, reroll_count);
rev.reroll_count = reroll_count;
rev.subject_prefix = strbuf_detach(&sprefix, NULL);
diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c
index ef60475..1794548 100644
--- a/builtin/ls-remote.c
+++ b/builtin/ls-remote.c
@@ -88,7 +88,7 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
if (argc > 1) {
int i;
- pattern = xcalloc(argc, sizeof(const char *));
+ CALLOC_ARRAY(pattern, argc);
for (i = 1; i < argc; i++) {
pattern[i - 1] = xstrfmt("*/%s", argv[i]);
}
@@ -124,8 +124,6 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
int hash_algo = hash_algo_by_ptr(transport_get_hash_algo(transport));
repo_set_hash_algo(the_repository, hash_algo);
}
- if (transport_disconnect(transport))
- return 1;
if (!dest && !quiet)
fprintf(stderr, "From %s\n", *remote->url);
@@ -151,5 +149,7 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
}
ref_array_clear(&ref_array);
+ if (transport_disconnect(transport))
+ return 1;
return status;
}
diff --git a/builtin/merge.c b/builtin/merge.c
index eb00b27..3886195 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -210,7 +210,7 @@ static struct strategy *get_strategy(const char *name)
exit(1);
}
- ret = xcalloc(1, sizeof(struct strategy));
+ CALLOC_ARRAY(ret, 1);
ret->name = xstrdup(name);
ret->attr = NO_TRIVIAL;
return ret;
diff --git a/builtin/mktag.c b/builtin/mktag.c
index 41a399a..dddcccd 100644
--- a/builtin/mktag.c
+++ b/builtin/mktag.c
@@ -14,15 +14,12 @@ static int option_strict = 1;
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
-static int mktag_config(const char *var, const char *value, void *cb)
-{
- return fsck_config_internal(var, value, cb, &fsck_options);
-}
-
static int mktag_fsck_error_func(struct fsck_options *o,
const struct object_id *oid,
enum object_type object_type,
- int msg_type, const char *message)
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
{
switch (msg_type) {
case FSCK_WARN:
@@ -91,9 +88,10 @@ int cmd_mktag(int argc, const char **argv, const char *prefix)
die_errno(_("could not read from stdin"));
fsck_options.error_func = mktag_fsck_error_func;
- fsck_set_msg_type(&fsck_options, "extraheaderentry", "warn");
+ fsck_set_msg_type_from_ids(&fsck_options, FSCK_MSG_EXTRA_HEADER_ENTRY,
+ FSCK_WARN);
/* config might set fsck.extraHeaderEntry=* again */
- git_config(mktag_config, NULL);
+ git_config(git_fsck_config, &fsck_options);
if (fsck_tag_standalone(NULL, buf.buf, buf.len, &fsck_options,
&tagged_oid, &tagged_type))
die(_("tag on stdin did not pass our strict fsck check"));
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index 5bf88cd..5d3ea44 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -4,67 +4,181 @@
#include "parse-options.h"
#include "midx.h"
#include "trace2.h"
+#include "object-store.h"
+#define BUILTIN_MIDX_WRITE_USAGE \
+ N_("git multi-pack-index [<options>] write [--preferred-pack=<pack>]")
+
+#define BUILTIN_MIDX_VERIFY_USAGE \
+ N_("git multi-pack-index [<options>] verify")
+
+#define BUILTIN_MIDX_EXPIRE_USAGE \
+ N_("git multi-pack-index [<options>] expire")
+
+#define BUILTIN_MIDX_REPACK_USAGE \
+ N_("git multi-pack-index [<options>] repack [--batch-size=<size>]")
+
+static char const * const builtin_multi_pack_index_write_usage[] = {
+ BUILTIN_MIDX_WRITE_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_verify_usage[] = {
+ BUILTIN_MIDX_VERIFY_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_expire_usage[] = {
+ BUILTIN_MIDX_EXPIRE_USAGE,
+ NULL
+};
+static char const * const builtin_multi_pack_index_repack_usage[] = {
+ BUILTIN_MIDX_REPACK_USAGE,
+ NULL
+};
static char const * const builtin_multi_pack_index_usage[] = {
- N_("git multi-pack-index [<options>] (write|verify|expire|repack --batch-size=<size>)"),
+ BUILTIN_MIDX_WRITE_USAGE,
+ BUILTIN_MIDX_VERIFY_USAGE,
+ BUILTIN_MIDX_EXPIRE_USAGE,
+ BUILTIN_MIDX_REPACK_USAGE,
NULL
};
static struct opts_multi_pack_index {
const char *object_dir;
+ const char *preferred_pack;
unsigned long batch_size;
- int progress;
+ unsigned flags;
} opts;
-int cmd_multi_pack_index(int argc, const char **argv,
- const char *prefix)
+static struct option common_opts[] = {
+ OPT_FILENAME(0, "object-dir", &opts.object_dir,
+ N_("object directory containing set of packfile and pack-index pairs")),
+ OPT_BIT(0, "progress", &opts.flags, N_("force progress reporting"), MIDX_PROGRESS),
+ OPT_END(),
+};
+
+static struct option *add_common_options(struct option *prev)
{
- unsigned flags = 0;
+ return parse_options_concat(common_opts, prev);
+}
+
+static int cmd_multi_pack_index_write(int argc, const char **argv)
+{
+ struct option *options;
+ static struct option builtin_multi_pack_index_write_options[] = {
+ OPT_STRING(0, "preferred-pack", &opts.preferred_pack,
+ N_("preferred-pack"),
+ N_("pack for reuse when computing a multi-pack bitmap")),
+ OPT_END(),
+ };
+
+ options = add_common_options(builtin_multi_pack_index_write_options);
+
+ trace2_cmd_mode(argv[0]);
- static struct option builtin_multi_pack_index_options[] = {
- OPT_FILENAME(0, "object-dir", &opts.object_dir,
- N_("object directory containing set of packfile and pack-index pairs")),
- OPT_BOOL(0, "progress", &opts.progress, N_("force progress reporting")),
+ argc = parse_options(argc, argv, NULL,
+ options, builtin_multi_pack_index_write_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_write_usage,
+ options);
+
+ FREE_AND_NULL(options);
+
+ return write_midx_file(opts.object_dir, opts.preferred_pack,
+ opts.flags);
+}
+
+static int cmd_multi_pack_index_verify(int argc, const char **argv)
+{
+ struct option *options = common_opts;
+
+ trace2_cmd_mode(argv[0]);
+
+ argc = parse_options(argc, argv, NULL,
+ options, builtin_multi_pack_index_verify_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_verify_usage,
+ options);
+
+ return verify_midx_file(the_repository, opts.object_dir, opts.flags);
+}
+
+static int cmd_multi_pack_index_expire(int argc, const char **argv)
+{
+ struct option *options = common_opts;
+
+ trace2_cmd_mode(argv[0]);
+
+ argc = parse_options(argc, argv, NULL,
+ options, builtin_multi_pack_index_expire_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_expire_usage,
+ options);
+
+ return expire_midx_packs(the_repository, opts.object_dir, opts.flags);
+}
+
+static int cmd_multi_pack_index_repack(int argc, const char **argv)
+{
+ struct option *options;
+ static struct option builtin_multi_pack_index_repack_options[] = {
OPT_MAGNITUDE(0, "batch-size", &opts.batch_size,
N_("during repack, collect pack-files of smaller size into a batch that is larger than this size")),
OPT_END(),
};
+ options = add_common_options(builtin_multi_pack_index_repack_options);
+
+ trace2_cmd_mode(argv[0]);
+
+ argc = parse_options(argc, argv, NULL,
+ options,
+ builtin_multi_pack_index_repack_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+ if (argc)
+ usage_with_options(builtin_multi_pack_index_repack_usage,
+ options);
+
+ FREE_AND_NULL(options);
+
+ return midx_repack(the_repository, opts.object_dir,
+ (size_t)opts.batch_size, opts.flags);
+}
+
+int cmd_multi_pack_index(int argc, const char **argv,
+ const char *prefix)
+{
+ struct option *builtin_multi_pack_index_options = common_opts;
+
git_config(git_default_config, NULL);
- opts.progress = isatty(2);
+ if (isatty(2))
+ opts.flags |= MIDX_PROGRESS;
argc = parse_options(argc, argv, prefix,
builtin_multi_pack_index_options,
- builtin_multi_pack_index_usage, 0);
+ builtin_multi_pack_index_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
if (!opts.object_dir)
opts.object_dir = get_object_directory();
- if (opts.progress)
- flags |= MIDX_PROGRESS;
if (argc == 0)
+ goto usage;
+
+ if (!strcmp(argv[0], "repack"))
+ return cmd_multi_pack_index_repack(argc, argv);
+ else if (!strcmp(argv[0], "write"))
+ return cmd_multi_pack_index_write(argc, argv);
+ else if (!strcmp(argv[0], "verify"))
+ return cmd_multi_pack_index_verify(argc, argv);
+ else if (!strcmp(argv[0], "expire"))
+ return cmd_multi_pack_index_expire(argc, argv);
+ else {
+usage:
+ error(_("unrecognized subcommand: %s"), argv[0]);
usage_with_options(builtin_multi_pack_index_usage,
builtin_multi_pack_index_options);
-
- if (argc > 1) {
- die(_("too many arguments"));
- return 1;
}
-
- trace2_cmd_mode(argv[0]);
-
- if (!strcmp(argv[0], "repack"))
- return midx_repack(the_repository, opts.object_dir,
- (size_t)opts.batch_size, flags);
- if (opts.batch_size)
- die(_("--batch-size option is only for 'repack' subcommand"));
-
- if (!strcmp(argv[0], "write"))
- return write_midx_file(opts.object_dir, flags);
- if (!strcmp(argv[0], "verify"))
- return verify_midx_file(the_repository, opts.object_dir, flags);
- if (!strcmp(argv[0], "expire"))
- return expire_midx_packs(the_repository, opts.object_dir, flags);
-
- die(_("unrecognized subcommand: %s"), argv[0]);
}
diff --git a/builtin/mv.c b/builtin/mv.c
index 7dac714..3fccdcb 100644
--- a/builtin/mv.c
+++ b/builtin/mv.c
@@ -221,7 +221,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
}
argc += last - first;
}
- } else if (!(ce = cache_file_exists(src, length, ignore_case))) {
+ } else if (!(ce = cache_file_exists(src, length, 0))) {
bad = _("not under version control");
} else if (ce_stage(ce)) {
bad = _("conflicted");
diff --git a/builtin/notes.c b/builtin/notes.c
index 08b8914..74bba39 100644
--- a/builtin/notes.c
+++ b/builtin/notes.c
@@ -730,7 +730,7 @@ static int merge_commit(struct notes_merge_options *o)
else
oidclr(&parent_oid);
- t = xcalloc(1, sizeof(struct notes_tree));
+ CALLOC_ARRAY(t, 1);
init_notes(t, "NOTES_MERGE_PARTIAL", combine_notes_overwrite, 0);
o->local_ref = local_ref_to_free =
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 6d62aaf..6d13cd3 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -815,8 +815,8 @@ static struct reused_chunk {
/* The offset of the first object of this chunk in the original
* packfile. */
off_t original;
- /* The offset of the first object of this chunk in the generated
- * packfile minus "original". */
+ /* The difference for "original" minus the offset of the first object of
+ * this chunk in the generated packfile. */
off_t difference;
} *reused_chunks;
static int reused_chunks_nr;
@@ -1188,7 +1188,8 @@ static int have_duplicate_entry(const struct object_id *oid,
return 1;
}
-static int want_found_object(int exclude, struct packed_git *p)
+static int want_found_object(const struct object_id *oid, int exclude,
+ struct packed_git *p)
{
if (exclude)
return 1;
@@ -1204,27 +1205,82 @@ static int want_found_object(int exclude, struct packed_git *p)
* make sure no copy of this object appears in _any_ pack that makes us
* to omit the object, so we need to check all the packs.
*
- * We can however first check whether these options can possible matter;
+ * We can however first check whether these options can possibly matter;
* if they do not matter we know we want the object in generated pack.
* Otherwise, we signal "-1" at the end to tell the caller that we do
* not know either way, and it needs to check more packs.
*/
- if (!ignore_packed_keep_on_disk &&
- !ignore_packed_keep_in_core &&
- (!local || !have_non_local_packs))
- return 1;
+ /*
+ * Objects in packs borrowed from elsewhere are discarded regardless of
+ * if they appear in other packs that weren't borrowed.
+ */
if (local && !p->pack_local)
return 0;
- if (p->pack_local &&
- ((ignore_packed_keep_on_disk && p->pack_keep) ||
- (ignore_packed_keep_in_core && p->pack_keep_in_core)))
- return 0;
+
+ /*
+ * Then handle .keep first, as we have a fast(er) path there.
+ */
+ if (ignore_packed_keep_on_disk || ignore_packed_keep_in_core) {
+ /*
+ * Set the flags for the kept-pack cache to be the ones we want
+ * to ignore.
+ *
+ * That is, if we are ignoring objects in on-disk keep packs,
+ * then we want to search through the on-disk keep and ignore
+ * the in-core ones.
+ */
+ unsigned flags = 0;
+ if (ignore_packed_keep_on_disk)
+ flags |= ON_DISK_KEEP_PACKS;
+ if (ignore_packed_keep_in_core)
+ flags |= IN_CORE_KEEP_PACKS;
+
+ if (ignore_packed_keep_on_disk && p->pack_keep)
+ return 0;
+ if (ignore_packed_keep_in_core && p->pack_keep_in_core)
+ return 0;
+ if (has_object_kept_pack(oid, flags))
+ return 0;
+ }
+
+ /*
+ * At this point we know definitively that either we don't care about
+ * keep-packs, or the object is not in one. Keep checking other
+ * conditions...
+ */
+ if (!local || !have_non_local_packs)
+ return 1;
/* we don't know yet; keep looking for more packs */
return -1;
}
+static int want_object_in_pack_one(struct packed_git *p,
+ const struct object_id *oid,
+ int exclude,
+ struct packed_git **found_pack,
+ off_t *found_offset)
+{
+ off_t offset;
+
+ if (p == *found_pack)
+ offset = *found_offset;
+ else
+ offset = find_pack_entry_one(oid->hash, p);
+
+ if (offset) {
+ if (!*found_pack) {
+ if (!is_pack_valid(p))
+ return -1;
+ *found_offset = offset;
+ *found_pack = p;
+ }
+ return want_found_object(oid, exclude, p);
+ }
+ return -1;
+}
+
/*
* Check whether we want the object in the pack (e.g., we do not want
* objects found in non-local stores if the "--local" option was used).
@@ -1252,7 +1308,7 @@ static int want_object_in_pack(const struct object_id *oid,
* are present we will determine the answer right now.
*/
if (*found_pack) {
- want = want_found_object(exclude, *found_pack);
+ want = want_found_object(oid, exclude, *found_pack);
if (want != -1)
return want;
}
@@ -1260,51 +1316,20 @@ static int want_object_in_pack(const struct object_id *oid,
for (m = get_multi_pack_index(the_repository); m; m = m->next) {
struct pack_entry e;
if (fill_midx_entry(the_repository, oid, &e, m)) {
- struct packed_git *p = e.p;
- off_t offset;
-
- if (p == *found_pack)
- offset = *found_offset;
- else
- offset = find_pack_entry_one(oid->hash, p);
-
- if (offset) {
- if (!*found_pack) {
- if (!is_pack_valid(p))
- continue;
- *found_offset = offset;
- *found_pack = p;
- }
- want = want_found_object(exclude, p);
- if (want != -1)
- return want;
- }
+ want = want_object_in_pack_one(e.p, oid, exclude, found_pack, found_offset);
+ if (want != -1)
+ return want;
}
}
list_for_each(pos, get_packed_git_mru(the_repository)) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
- off_t offset;
-
- if (p == *found_pack)
- offset = *found_offset;
- else
- offset = find_pack_entry_one(oid->hash, p);
-
- if (offset) {
- if (!*found_pack) {
- if (!is_pack_valid(p))
- continue;
- *found_offset = offset;
- *found_pack = p;
- }
- want = want_found_object(exclude, p);
- if (!exclude && want > 0)
- list_move(&p->mru,
- get_packed_git_mru(the_repository));
- if (want != -1)
- return want;
- }
+ want = want_object_in_pack_one(p, oid, exclude, found_pack, found_offset);
+ if (!exclude && want > 0)
+ list_move(&p->mru,
+ get_packed_git_mru(the_repository));
+ if (want != -1)
+ return want;
}
if (uri_protocols.nr) {
@@ -1635,7 +1660,7 @@ static void add_preferred_base(struct object_id *oid)
}
}
- it = xcalloc(1, sizeof(*it));
+ CALLOC_ARRAY(it, 1);
it->next = pbase_tree;
pbase_tree = it;
@@ -2096,7 +2121,7 @@ static void get_object_details(void)
progress_state = start_progress(_("Counting objects"),
to_pack.nr_objects);
- sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
+ CALLOC_ARRAY(sorted_by_offset, to_pack.nr_objects);
for (i = 0; i < to_pack.nr_objects; i++)
sorted_by_offset[i] = to_pack.objects + i;
QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);
@@ -2428,7 +2453,7 @@ static void find_deltas(struct object_entry **list, unsigned *list_size,
struct unpacked *array;
unsigned long mem_usage = 0;
- array = xcalloc(window, sizeof(struct unpacked));
+ CALLOC_ARRAY(array, window);
for (;;) {
struct object_entry *entry;
@@ -2665,7 +2690,7 @@ static void ll_find_deltas(struct object_entry **list, unsigned list_size,
if (progress > pack_to_stdout)
fprintf_ln(stderr, _("Delta compression using up to %d threads"),
delta_search_threads);
- p = xcalloc(delta_search_threads, sizeof(*p));
+ CALLOC_ARRAY(p, delta_search_threads);
/* Partition the work amongst work threads. */
for (i = 0; i < delta_search_threads; i++) {
@@ -2986,6 +3011,191 @@ static int git_pack_config(const char *k, const char *v, void *cb)
return git_default_config(k, v, cb);
}
+/* Counters for trace2 output when in --stdin-packs mode. */
+static int stdin_packs_found_nr;
+static int stdin_packs_hints_nr;
+
+static int add_object_entry_from_pack(const struct object_id *oid,
+ struct packed_git *p,
+ uint32_t pos,
+ void *_data)
+{
+ struct rev_info *revs = _data;
+ struct object_info oi = OBJECT_INFO_INIT;
+ off_t ofs;
+ enum object_type type;
+
+ display_progress(progress_state, ++nr_seen);
+
+ if (have_duplicate_entry(oid, 0))
+ return 0;
+
+ ofs = nth_packed_object_offset(p, pos);
+ if (!want_object_in_pack(oid, 0, &p, &ofs))
+ return 0;
+
+ oi.typep = &type;
+ if (packed_object_info(the_repository, p, ofs, &oi) < 0)
+ die(_("could not get type of object %s in pack %s"),
+ oid_to_hex(oid), p->pack_name);
+ else if (type == OBJ_COMMIT) {
+ /*
+ * commits in included packs are used as starting points for the
+ * subsequent revision walk
+ */
+ add_pending_oid(revs, NULL, oid, 0);
+ }
+
+ stdin_packs_found_nr++;
+
+ create_object_entry(oid, type, 0, 0, 0, p, ofs);
+
+ return 0;
+}
+
+static void show_commit_pack_hint(struct commit *commit, void *_data)
+{
+ /* nothing to do; commits don't have a namehash */
+}
+
+static void show_object_pack_hint(struct object *object, const char *name,
+ void *_data)
+{
+ struct object_entry *oe = packlist_find(&to_pack, &object->oid);
+ if (!oe)
+ return;
+
+ /*
+ * Our 'to_pack' list was constructed by iterating all objects packed in
+ * included packs, and so doesn't have a non-zero hash field that you
+ * would typically pick up during a reachability traversal.
+ *
+ * Make a best-effort attempt to fill in the ->hash and ->no_try_delta
+ * here using a now in order to perhaps improve the delta selection
+ * process.
+ */
+ oe->hash = pack_name_hash(name);
+ oe->no_try_delta = name && no_try_delta(name);
+
+ stdin_packs_hints_nr++;
+}
+
+static int pack_mtime_cmp(const void *_a, const void *_b)
+{
+ struct packed_git *a = ((const struct string_list_item*)_a)->util;
+ struct packed_git *b = ((const struct string_list_item*)_b)->util;
+
+ /*
+ * order packs by descending mtime so that objects are laid out
+ * roughly as newest-to-oldest
+ */
+ if (a->mtime < b->mtime)
+ return 1;
+ else if (b->mtime < a->mtime)
+ return -1;
+ else
+ return 0;
+}
+
+static void read_packs_list_from_stdin(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list include_packs = STRING_LIST_INIT_DUP;
+ struct string_list exclude_packs = STRING_LIST_INIT_DUP;
+ struct string_list_item *item = NULL;
+
+ struct packed_git *p;
+ struct rev_info revs;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ /*
+ * Use a revision walk to fill in the namehash of objects in the include
+ * packs. To save time, we'll avoid traversing through objects that are
+ * in excluded packs.
+ *
+ * That may cause us to avoid populating all of the namehash fields of
+ * all included objects, but our goal is best-effort, since this is only
+ * an optimization during delta selection.
+ */
+ revs.no_kept_objects = 1;
+ revs.keep_pack_cache_flags |= IN_CORE_KEEP_PACKS;
+ revs.blob_objects = 1;
+ revs.tree_objects = 1;
+ revs.tag_objects = 1;
+ revs.ignore_missing_links = 1;
+
+ while (strbuf_getline(&buf, stdin) != EOF) {
+ if (!buf.len)
+ continue;
+
+ if (*buf.buf == '^')
+ string_list_append(&exclude_packs, buf.buf + 1);
+ else
+ string_list_append(&include_packs, buf.buf);
+
+ strbuf_reset(&buf);
+ }
+
+ string_list_sort(&include_packs);
+ string_list_sort(&exclude_packs);
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ const char *pack_name = pack_basename(p);
+
+ item = string_list_lookup(&include_packs, pack_name);
+ if (!item)
+ item = string_list_lookup(&exclude_packs, pack_name);
+
+ if (item)
+ item->util = p;
+ }
+
+ /*
+ * First handle all of the excluded packs, marking them as kept in-core
+ * so that later calls to add_object_entry() discards any objects that
+ * are also found in excluded packs.
+ */
+ for_each_string_list_item(item, &exclude_packs) {
+ struct packed_git *p = item->util;
+ if (!p)
+ die(_("could not find pack '%s'"), item->string);
+ p->pack_keep_in_core = 1;
+ }
+
+ /*
+ * Order packs by ascending mtime; use QSORT directly to access the
+ * string_list_item's ->util pointer, which string_list_sort() does not
+ * provide.
+ */
+ QSORT(include_packs.items, include_packs.nr, pack_mtime_cmp);
+
+ for_each_string_list_item(item, &include_packs) {
+ struct packed_git *p = item->util;
+ if (!p)
+ die(_("could not find pack '%s'"), item->string);
+ for_each_object_in_pack(p,
+ add_object_entry_from_pack,
+ &revs,
+ FOR_EACH_OBJECT_PACK_ORDER);
+ }
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+ traverse_commit_list(&revs,
+ show_commit_pack_hint,
+ show_object_pack_hint,
+ NULL);
+
+ trace2_data_intmax("pack-objects", the_repository, "stdin_packs_found",
+ stdin_packs_found_nr);
+ trace2_data_intmax("pack-objects", the_repository, "stdin_packs_hints",
+ stdin_packs_hints_nr);
+
+ strbuf_release(&buf);
+ string_list_clear(&include_packs, 0);
+ string_list_clear(&exclude_packs, 0);
+}
+
static void read_object_list_from_stdin(void)
{
char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];
@@ -3176,7 +3386,7 @@ static void add_objects_in_unpacked_packs(void)
for (i = 0; i < p->num_objects; i++) {
nth_packed_object_id(&oid, p, i);
- o = lookup_unknown_object(&oid);
+ o = lookup_unknown_object(the_repository, &oid);
if (!(o->flags & OBJECT_ADDED))
mark_in_pack_object(o, p, &in_pack);
o->flags |= OBJECT_ADDED;
@@ -3317,7 +3527,8 @@ static int get_object_list_from_bitmap(struct rev_info *revs)
&reuse_packfile_bitmap)) {
assert(reuse_packfile_objects);
nr_result += reuse_packfile_objects;
- display_progress(progress_state, nr_result);
+ nr_seen += reuse_packfile_objects;
+ display_progress(progress_state, nr_seen);
}
traverse_bitmap_commit_list(bitmap_git, revs,
@@ -3337,6 +3548,37 @@ static void record_recent_commit(struct commit *commit, void *data)
oid_array_append(&recent_objects, &commit->object.oid);
}
+static int mark_bitmap_preferred_tip(const char *refname,
+ const struct object_id *oid, int flags,
+ void *_data)
+{
+ struct object_id peeled;
+ struct object *object;
+
+ if (!peel_iterated_oid(oid, &peeled))
+ oid = &peeled;
+
+ object = parse_object_or_die(oid, refname);
+ if (object->type == OBJ_COMMIT)
+ object->flags |= NEEDS_BITMAP;
+
+ return 0;
+}
+
+static void mark_bitmap_preferred_tips(void)
+{
+ struct string_list_item *item;
+ const struct string_list *preferred_tips;
+
+ preferred_tips = bitmap_preferred_tips(the_repository);
+ if (!preferred_tips)
+ return;
+
+ for_each_string_list_item(item, preferred_tips) {
+ for_each_ref_in(item->string, mark_bitmap_preferred_tip, NULL);
+ }
+}
+
static void get_object_list(int ac, const char **av)
{
struct rev_info revs;
@@ -3391,6 +3633,9 @@ static void get_object_list(int ac, const char **av)
if (use_delta_islands)
load_delta_islands(the_repository, progress);
+ if (write_bitmap_index)
+ mark_bitmap_preferred_tips();
+
if (prepare_revision_walk(&revs))
die(_("revision walk setup failed"));
mark_edges_uninteresting(&revs, show_edge, sparse);
@@ -3489,6 +3734,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
struct strvec rp = STRVEC_INIT;
int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
int rev_list_index = 0;
+ int stdin_packs = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
@@ -3539,6 +3785,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,
N_("include objects referred to by the index"),
1, PARSE_OPT_NONEG),
+ OPT_BOOL(0, "stdin-packs", &stdin_packs,
+ N_("read packs from stdin")),
OPT_BOOL(0, "stdout", &pack_to_stdout,
N_("output pack to stdout")),
OPT_BOOL(0, "include-tag", &include_tag,
@@ -3645,7 +3893,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
use_internal_rev_list = 1;
strvec_push(&rp, "--indexed-objects");
}
- if (rev_list_unpacked) {
+ if (rev_list_unpacked && !stdin_packs) {
use_internal_rev_list = 1;
strvec_push(&rp, "--unpacked");
}
@@ -3690,8 +3938,13 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (filter_options.choice) {
if (!pack_to_stdout)
die(_("cannot use --filter without --stdout"));
+ if (stdin_packs)
+ die(_("cannot use --filter with --stdin-packs"));
}
+ if (stdin_packs && use_internal_rev_list)
+ die(_("cannot use internal rev list with --stdin-packs"));
+
/*
* "soft" reasons not to use bitmaps - for on-disk repack by default we want
*
@@ -3750,7 +4003,13 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (progress)
progress_state = start_progress(_("Enumerating objects"), 0);
- if (!use_internal_rev_list)
+ if (stdin_packs) {
+ /* avoids adding objects in excluded packs */
+ ignore_packed_keep_in_core = 1;
+ read_packs_list_from_stdin();
+ if (rev_list_unpacked)
+ add_unreachable_loose_objects();
+ } else if (!use_internal_rev_list)
read_object_list_from_stdin();
else {
get_object_list(rp.nr, rp.v);
diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c
index 6e115a8..7102996 100644
--- a/builtin/pack-redundant.c
+++ b/builtin/pack-redundant.c
@@ -373,7 +373,7 @@ static void sort_pack_list(struct pack_list **pl)
return;
/* prepare an array of packed_list for easier sorting */
- ary = xcalloc(n, sizeof(struct pack_list *));
+ CALLOC_ARRAY(ary, n);
for (n = 0, p = *pl; p; p = p->next)
ary[n++] = p;
diff --git a/builtin/range-diff.c b/builtin/range-diff.c
index 78bc9fa..5031884 100644
--- a/builtin/range-diff.c
+++ b/builtin/range-diff.c
@@ -25,7 +25,7 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
struct option range_diff_options[] = {
OPT_INTEGER(0, "creation-factor",
&range_diff_opts.creation_factor,
- N_("Percentage by which creation is weighted")),
+ N_("percentage by which creation is weighted")),
OPT_BOOL(0, "no-dual-color", &simple_color,
N_("use simple diff colors")),
OPT_PASSTHRU_ARGV(0, "notes", &other_arg,
diff --git a/builtin/rebase.c b/builtin/rebase.c
index de400f9..ed1da17 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -100,7 +100,6 @@ struct rebase_options {
char *strategy, *strategy_opts;
struct strbuf git_format_patch_opt;
int reschedule_failed_exec;
- int use_legacy_rebase;
int reapply_cherry_picks;
int fork_point;
};
@@ -739,6 +738,7 @@ static int finish_rebase(struct rebase_options *opts)
int ret = 0;
delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
+ unlink(git_path_auto_merge(the_repository));
apply_autostash(state_dir_path("autostash", opts));
close_object_store(the_repository->objects);
/*
@@ -1102,11 +1102,6 @@ static int rebase_config(const char *var, const char *value, void *data)
return 0;
}
- if (!strcmp(var, "rebase.usebuiltin")) {
- opts->use_legacy_rebase = !git_config_bool(var, value);
- return 0;
- }
-
if (!strcmp(var, "rebase.backend")) {
return git_config_string(&opts->default_backend, var, value);
}
@@ -1441,11 +1436,6 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
gpg_sign = options.gpg_sign_opt ? "" : NULL;
FREE_AND_NULL(options.gpg_sign_opt);
- if (options.use_legacy_rebase ||
- !git_env_bool("GIT_TEST_REBASE_USE_BUILTIN", -1))
- warning(_("the rebase.useBuiltin support has been removed!\n"
- "See its entry in 'git help config' for details."));
-
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/applying", apply_dir());
if(file_exists(buf.buf))
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index d26040c..6bc12c8 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -358,7 +358,7 @@ static void proc_receive_ref_append(const char *prefix)
char *p;
int len;
- ref_pattern = xcalloc(1, sizeof(struct proc_receive_ref));
+ CALLOC_ARRAY(ref_pattern, 1);
p = strchr(prefix, ':');
if (p) {
while (prefix < p) {
@@ -1024,7 +1024,7 @@ static int read_proc_receive_report(struct packet_reader *reader,
}
if (new_report) {
if (!hint->report) {
- hint->report = xcalloc(1, sizeof(struct ref_push_report));
+ CALLOC_ARRAY(hint->report, 1);
report = hint->report;
} else {
report = hint->report;
@@ -2313,11 +2313,9 @@ static void prepare_shallow_update(struct shallow_info *si)
ALLOC_ARRAY(si->used_shallow, si->shallow->nr);
assign_shallow_commits_to_refs(si, si->used_shallow, NULL);
- si->need_reachability_test =
- xcalloc(si->shallow->nr, sizeof(*si->need_reachability_test));
- si->reachable =
- xcalloc(si->shallow->nr, sizeof(*si->reachable));
- si->shallow_ref = xcalloc(si->ref->nr, sizeof(*si->shallow_ref));
+ CALLOC_ARRAY(si->need_reachability_test, si->shallow->nr);
+ CALLOC_ARRAY(si->reachable, si->shallow->nr);
+ CALLOC_ARRAY(si->shallow_ref, si->ref->nr);
for (i = 0; i < si->nr_ours; i++)
si->need_reachability_test[si->ours[i]] = 1;
diff --git a/builtin/remote.c b/builtin/remote.c
index d11a558..7f88e6c 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -221,7 +221,7 @@ static int add(int argc, const char **argv)
if (fetch_tags != TAGS_DEFAULT) {
strbuf_reset(&buf);
- strbuf_addf(&buf, "remote.%s.tagopt", name);
+ strbuf_addf(&buf, "remote.%s.tagOpt", name);
git_config_set(buf.buf,
fetch_tags == TAGS_SET ? "--tags" : "--no-tags");
}
@@ -746,7 +746,7 @@ static int mv(int argc, const char **argv)
}
if (info->push_remote_name && !strcmp(info->push_remote_name, rename.old_name)) {
strbuf_reset(&buf);
- strbuf_addf(&buf, "branch.%s.pushremote", item->string);
+ strbuf_addf(&buf, "branch.%s.pushRemote", item->string);
git_config_set(buf.buf, rename.new_name);
}
}
@@ -938,9 +938,6 @@ static int get_remote_ref_states(const char *name,
struct ref_states *states,
int query)
{
- struct transport *transport;
- const struct ref *remote_refs;
-
states->remote = remote_get(name);
if (!states->remote)
return error(_("No such remote: '%s'"), name);
@@ -948,10 +945,12 @@ static int get_remote_ref_states(const char *name,
read_branches();
if (query) {
+ struct transport *transport;
+ const struct ref *remote_refs;
+
transport = transport_get(states->remote, states->remote->url_nr > 0 ?
states->remote->url[0] : NULL);
remote_refs = transport_get_remote_refs(transport, NULL);
- transport_disconnect(transport);
states->queried = 1;
if (query & GET_REF_STATES)
@@ -960,6 +959,7 @@ static int get_remote_ref_states(const char *name,
get_head_names(remote_refs, states);
if (query & GET_PUSH_REF_STATES)
get_push_ref_states(remote_refs, states);
+ transport_disconnect(transport);
} else {
for_each_ref(append_ref_to_tracked_list, states);
string_list_sort(&states->tracked);
diff --git a/builtin/repack.c b/builtin/repack.c
index 01440de..2847fdf 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -297,6 +297,142 @@ static void repack_promisor_objects(const struct pack_objects_args *args,
#define ALL_INTO_ONE 1
#define LOOSEN_UNREACHABLE 2
+struct pack_geometry {
+ struct packed_git **pack;
+ uint32_t pack_nr, pack_alloc;
+ uint32_t split;
+};
+
+static uint32_t geometry_pack_weight(struct packed_git *p)
+{
+ if (open_pack_index(p))
+ die(_("cannot open index for %s"), p->pack_name);
+ return p->num_objects;
+}
+
+static int geometry_cmp(const void *va, const void *vb)
+{
+ uint32_t aw = geometry_pack_weight(*(struct packed_git **)va),
+ bw = geometry_pack_weight(*(struct packed_git **)vb);
+
+ if (aw < bw)
+ return -1;
+ if (aw > bw)
+ return 1;
+ return 0;
+}
+
+static void init_pack_geometry(struct pack_geometry **geometry_p)
+{
+ struct packed_git *p;
+ struct pack_geometry *geometry;
+
+ *geometry_p = xcalloc(1, sizeof(struct pack_geometry));
+ geometry = *geometry_p;
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ if (!pack_kept_objects && p->pack_keep)
+ continue;
+
+ ALLOC_GROW(geometry->pack,
+ geometry->pack_nr + 1,
+ geometry->pack_alloc);
+
+ geometry->pack[geometry->pack_nr] = p;
+ geometry->pack_nr++;
+ }
+
+ QSORT(geometry->pack, geometry->pack_nr, geometry_cmp);
+}
+
+static void split_pack_geometry(struct pack_geometry *geometry, int factor)
+{
+ uint32_t i;
+ uint32_t split;
+ off_t total_size = 0;
+
+ if (!geometry->pack_nr) {
+ geometry->split = geometry->pack_nr;
+ return;
+ }
+
+ /*
+ * First, count the number of packs (in descending order of size) which
+ * already form a geometric progression.
+ */
+ for (i = geometry->pack_nr - 1; i > 0; i--) {
+ struct packed_git *ours = geometry->pack[i];
+ struct packed_git *prev = geometry->pack[i - 1];
+
+ if (unsigned_mult_overflows(factor, geometry_pack_weight(prev)))
+ die(_("pack %s too large to consider in geometric "
+ "progression"),
+ prev->pack_name);
+
+ if (geometry_pack_weight(ours) < factor * geometry_pack_weight(prev))
+ break;
+ }
+
+ split = i;
+
+ if (split) {
+ /*
+ * Move the split one to the right, since the top element in the
+ * last-compared pair can't be in the progression. Only do this
+ * when we split in the middle of the array (otherwise if we got
+ * to the end, then the split is in the right place).
+ */
+ split++;
+ }
+
+ /*
+ * Then, anything to the left of 'split' must be in a new pack. But,
+ * creating that new pack may cause packs in the heavy half to no longer
+ * form a geometric progression.
+ *
+ * Compute an expected size of the new pack, and then determine how many
+ * packs in the heavy half need to be joined into it (if any) to restore
+ * the geometric progression.
+ */
+ for (i = 0; i < split; i++) {
+ struct packed_git *p = geometry->pack[i];
+
+ if (unsigned_add_overflows(total_size, geometry_pack_weight(p)))
+ die(_("pack %s too large to roll up"), p->pack_name);
+ total_size += geometry_pack_weight(p);
+ }
+ for (i = split; i < geometry->pack_nr; i++) {
+ struct packed_git *ours = geometry->pack[i];
+
+ if (unsigned_mult_overflows(factor, total_size))
+ die(_("pack %s too large to roll up"), ours->pack_name);
+
+ if (geometry_pack_weight(ours) < factor * total_size) {
+ if (unsigned_add_overflows(total_size,
+ geometry_pack_weight(ours)))
+ die(_("pack %s too large to roll up"),
+ ours->pack_name);
+
+ split++;
+ total_size += geometry_pack_weight(ours);
+ } else
+ break;
+ }
+
+ geometry->split = split;
+}
+
+static void clear_pack_geometry(struct pack_geometry *geometry)
+{
+ if (!geometry)
+ return;
+
+ free(geometry->pack);
+ geometry->pack_nr = 0;
+ geometry->pack_alloc = 0;
+ geometry->split = 0;
+}
+
int cmd_repack(int argc, const char **argv, const char *prefix)
{
struct child_process cmd = CHILD_PROCESS_INIT;
@@ -304,6 +440,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
struct string_list names = STRING_LIST_INIT_DUP;
struct string_list rollback = STRING_LIST_INIT_NODUP;
struct string_list existing_packs = STRING_LIST_INIT_DUP;
+ struct pack_geometry *geometry = NULL;
struct strbuf line = STRBUF_INIT;
int i, ext, ret;
FILE *out;
@@ -316,6 +453,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
int no_update_server_info = 0;
struct pack_objects_args po_args = {NULL};
+ int geometric_factor = 0;
struct option builtin_repack_options[] = {
OPT_BIT('a', NULL, &pack_everything,
@@ -356,6 +494,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
N_("repack objects in packs marked with .keep")),
OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
N_("do not repack this pack")),
+ OPT_INTEGER('g', "geometric", &geometric_factor,
+ N_("find a geometric progression with factor <N>")),
OPT_END()
};
@@ -382,6 +522,13 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (write_bitmaps && !(pack_everything & ALL_INTO_ONE))
die(_(incremental_bitmap_conflict_error));
+ if (geometric_factor) {
+ if (pack_everything)
+ die(_("--geometric is incompatible with -A, -a"));
+ init_pack_geometry(&geometry);
+ split_pack_geometry(geometry, geometric_factor);
+ }
+
packdir = mkpathdup("%s/pack", get_object_directory());
packtmp = mkpathdup("%s/.tmp-%d-pack", packdir, (int)getpid());
@@ -396,9 +543,21 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
strvec_pushf(&cmd.args, "--keep-pack=%s",
keep_pack_list.items[i].string);
strvec_push(&cmd.args, "--non-empty");
- strvec_push(&cmd.args, "--all");
- strvec_push(&cmd.args, "--reflog");
- strvec_push(&cmd.args, "--indexed-objects");
+ if (!geometry) {
+ /*
+ * We need to grab all reachable objects, including those that
+ * are reachable from reflogs and the index.
+ *
+ * When repacking into a geometric progression of packs,
+ * however, we ask 'git pack-objects --stdin-packs', and it is
+ * not about packing objects based on reachability but about
+ * repacking all the objects in specified packs and loose ones
+ * (indeed, --stdin-packs is incompatible with these options).
+ */
+ strvec_push(&cmd.args, "--all");
+ strvec_push(&cmd.args, "--reflog");
+ strvec_push(&cmd.args, "--indexed-objects");
+ }
if (has_promisor_remote())
strvec_push(&cmd.args, "--exclude-promisor-objects");
if (write_bitmaps > 0)
@@ -429,17 +588,37 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
strvec_push(&cmd.env_array, "GIT_REF_PARANOIA=1");
}
}
+ } else if (geometry) {
+ strvec_push(&cmd.args, "--stdin-packs");
+ strvec_push(&cmd.args, "--unpacked");
} else {
strvec_push(&cmd.args, "--unpacked");
strvec_push(&cmd.args, "--incremental");
}
- cmd.no_stdin = 1;
+ if (geometry)
+ cmd.in = -1;
+ else
+ cmd.no_stdin = 1;
ret = start_command(&cmd);
if (ret)
return ret;
+ if (geometry) {
+ FILE *in = xfdopen(cmd.in, "w");
+ /*
+ * The resulting pack should contain all objects in packs that
+ * are going to be rolled up, but exclude objects in packs which
+ * are being left alone.
+ */
+ for (i = 0; i < geometry->split; i++)
+ fprintf(in, "%s\n", pack_basename(geometry->pack[i]));
+ for (i = geometry->split; i < geometry->pack_nr; i++)
+ fprintf(in, "^%s\n", pack_basename(geometry->pack[i]));
+ fclose(in);
+ }
+
out = xfdopen(cmd.out, "r");
while (strbuf_getline_lf(&line, out) != EOF) {
if (line.len != the_hash_algo->hexsz)
@@ -507,6 +686,25 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
if (!string_list_has_string(&names, sha1))
remove_redundant_pack(packdir, item->string);
}
+
+ if (geometry) {
+ struct strbuf buf = STRBUF_INIT;
+
+ uint32_t i;
+ for (i = 0; i < geometry->split; i++) {
+ struct packed_git *p = geometry->pack[i];
+ if (string_list_has_string(&names,
+ hash_to_hex(p->hash)))
+ continue;
+
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, pack_basename(p));
+ strbuf_strip_suffix(&buf, ".pack");
+
+ remove_redundant_pack(packdir, buf.buf);
+ }
+ strbuf_release(&buf);
+ }
if (!po_args.quiet && isatty(2))
opts |= PRUNE_PACKED_VERBOSE;
prune_packed_objects(opts);
@@ -523,11 +721,12 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
remove_temporary_files();
if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0))
- write_midx_file(get_object_directory(), 0);
+ write_midx_file(get_object_directory(), NULL, 0);
string_list_clear(&names, 0);
string_list_clear(&rollback, 0);
string_list_clear(&existing_packs, 0);
+ clear_pack_geometry(geometry);
strbuf_release(&line);
return 0;
diff --git a/builtin/reset.c b/builtin/reset.c
index c635b06..43e855c 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -425,7 +425,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
dwim_ref(rev, strlen(rev), &dummy, &ref, 0);
if (ref && !starts_with(ref, "refs/"))
- ref = NULL;
+ FREE_AND_NULL(ref);
err = reset_index(ref, &oid, reset_type, quiet);
if (reset_type == KEEP && !err)
diff --git a/builtin/revert.c b/builtin/revert.c
index 314a86c..237f2f1 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -182,7 +182,7 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
"--signoff", opts->signoff,
"--no-commit", opts->no_commit,
"-x", opts->record_origin,
- "--edit", opts->edit,
+ "--edit", opts->edit > 0,
NULL);
if (cmd) {
@@ -230,8 +230,6 @@ int cmd_revert(int argc, const char **argv, const char *prefix)
struct replay_opts opts = REPLAY_OPTS_INIT;
int res;
- if (isatty(0))
- opts.edit = 1;
opts.action = REPLAY_REVERT;
sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 585343f..a4bdd7c 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -65,7 +65,7 @@ static int sparse_checkout_list(int argc, const char **argv)
pl.use_cone_patterns = core_sparse_checkout_cone;
sparse_filename = get_sparse_checkout_filename();
- res = add_patterns_from_file_to_list(sparse_filename, "", 0, &pl, NULL);
+ res = add_patterns_from_file_to_list(sparse_filename, "", 0, &pl, NULL, 0);
free(sparse_filename);
if (res < 0) {
@@ -333,7 +333,7 @@ static int sparse_checkout_init(int argc, const char **argv)
memset(&pl, 0, sizeof(pl));
sparse_filename = get_sparse_checkout_filename();
- res = add_patterns_from_file_to_list(sparse_filename, "", 0, &pl, NULL);
+ res = add_patterns_from_file_to_list(sparse_filename, "", 0, &pl, NULL, 0);
if (init_opts.sparse_index >= 0) {
if (set_sparse_index_config(the_repository, init_opts.sparse_index) < 0)
@@ -506,7 +506,7 @@ static void add_patterns_cone_mode(int argc, const char **argv,
existing.use_cone_patterns = core_sparse_checkout_cone;
if (add_patterns_from_file_to_list(sparse_filename, "", 0,
- &existing, NULL))
+ &existing, NULL, 0))
die(_("unable to load existing sparse-checkout patterns"));
free(sparse_filename);
@@ -530,7 +530,7 @@ static void add_patterns_literal(int argc, const char **argv,
{
char *sparse_filename = get_sparse_checkout_filename();
if (add_patterns_from_file_to_list(sparse_filename, "", 0,
- pl, NULL))
+ pl, NULL, 0))
die(_("unable to load existing sparse-checkout patterns"));
free(sparse_filename);
add_patterns_from_input(pl, argc, argv);
diff --git a/builtin/stash.c b/builtin/stash.c
index 6fb7178..d68ed78 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -10,11 +10,13 @@
#include "strvec.h"
#include "run-command.h"
#include "dir.h"
+#include "entry.h"
#include "rerere.h"
#include "revision.h"
#include "log-tree.h"
#include "diffcore.h"
#include "exec-cmd.h"
+#include "entry.h"
#define INCLUDE_ALL_FILES 2
@@ -768,6 +770,7 @@ static int list_stash(int argc, const char **argv, const char *prefix)
static int show_stat = 1;
static int show_patch;
+static int show_include_untracked;
static int use_legacy_stash;
static int git_stash_config(const char *var, const char *value, void *cb)
@@ -780,6 +783,10 @@ static int git_stash_config(const char *var, const char *value, void *cb)
show_patch = git_config_bool(var, value);
return 0;
}
+ if (!strcmp(var, "stash.showincludeuntracked")) {
+ show_include_untracked = git_config_bool(var, value);
+ return 0;
+ }
if (!strcmp(var, "stash.usebuiltin")) {
use_legacy_stash = !git_config_bool(var, value);
return 0;
@@ -787,6 +794,33 @@ static int git_stash_config(const char *var, const char *value, void *cb)
return git_diff_basic_config(var, value, cb);
}
+static void diff_include_untracked(const struct stash_info *info, struct diff_options *diff_opt)
+{
+ const struct object_id *oid[] = { &info->w_commit, &info->u_tree };
+ struct tree *tree[ARRAY_SIZE(oid)];
+ struct tree_desc tree_desc[ARRAY_SIZE(oid)];
+ struct unpack_trees_options unpack_tree_opt = { 0 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(oid); i++) {
+ tree[i] = parse_tree_indirect(oid[i]);
+ if (parse_tree(tree[i]) < 0)
+ die(_("failed to parse tree"));
+ init_tree_desc(&tree_desc[i], tree[i]->buffer, tree[i]->size);
+ }
+
+ unpack_tree_opt.head_idx = -1;
+ unpack_tree_opt.src_index = &the_index;
+ unpack_tree_opt.dst_index = &the_index;
+ unpack_tree_opt.merge = 1;
+ unpack_tree_opt.fn = stash_worktree_untracked_merge;
+
+ if (unpack_trees(ARRAY_SIZE(tree_desc), tree_desc, &unpack_tree_opt))
+ die(_("failed to unpack trees"));
+
+ do_diff_cache(&info->b_commit, diff_opt);
+}
+
static int show_stash(int argc, const char **argv, const char *prefix)
{
int i;
@@ -795,7 +829,18 @@ static int show_stash(int argc, const char **argv, const char *prefix)
struct rev_info rev;
struct strvec stash_args = STRVEC_INIT;
struct strvec revision_args = STRVEC_INIT;
+ enum {
+ UNTRACKED_NONE,
+ UNTRACKED_INCLUDE,
+ UNTRACKED_ONLY
+ } show_untracked = UNTRACKED_NONE;
struct option options[] = {
+ OPT_SET_INT('u', "include-untracked", &show_untracked,
+ N_("include untracked files in the stash"),
+ UNTRACKED_INCLUDE),
+ OPT_SET_INT_F(0, "only-untracked", &show_untracked,
+ N_("only show untracked files in the stash"),
+ UNTRACKED_ONLY, PARSE_OPT_NONEG),
OPT_END()
};
@@ -803,6 +848,10 @@ static int show_stash(int argc, const char **argv, const char *prefix)
git_config(git_diff_ui_config, NULL);
init_revisions(&rev, prefix);
+ argc = parse_options(argc, argv, prefix, options, git_stash_show_usage,
+ PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN |
+ PARSE_OPT_KEEP_DASHDASH);
+
strvec_push(&revision_args, argv[0]);
for (i = 1; i < argc; i++) {
if (argv[i][0] != '-')
@@ -827,6 +876,9 @@ static int show_stash(int argc, const char **argv, const char *prefix)
if (show_patch)
rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
+ if (show_include_untracked)
+ show_untracked = UNTRACKED_INCLUDE;
+
if (!show_stat && !show_patch) {
free_stash_info(&info);
return 0;
@@ -845,7 +897,17 @@ static int show_stash(int argc, const char **argv, const char *prefix)
rev.diffopt.flags.recursive = 1;
setup_diff_pager(&rev.diffopt);
- diff_tree_oid(&info.b_commit, &info.w_commit, "", &rev.diffopt);
+ switch (show_untracked) {
+ case UNTRACKED_NONE:
+ diff_tree_oid(&info.b_commit, &info.w_commit, "", &rev.diffopt);
+ break;
+ case UNTRACKED_ONLY:
+ diff_root_tree_oid(&info.u_tree, "", &rev.diffopt);
+ break;
+ case UNTRACKED_INCLUDE:
+ diff_include_untracked(&info, &rev.diffopt);
+ break;
+ }
log_tree_diff_flush(&rev);
free_stash_info(&info);
diff --git a/builtin/symbolic-ref.c b/builtin/symbolic-ref.c
index 80237f0..e547a08 100644
--- a/builtin/symbolic-ref.c
+++ b/builtin/symbolic-ref.c
@@ -24,9 +24,11 @@ static int check_symref(const char *HEAD, int quiet, int shorten, int print)
return 1;
}
if (print) {
+ char *to_free = NULL;
if (shorten)
- refname = shorten_unambiguous_ref(refname, 0);
+ refname = to_free = shorten_unambiguous_ref(refname, 0);
puts(refname);
+ free(to_free);
}
return 0;
}
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index dd4a75e..4a70b17 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -46,7 +46,7 @@ static struct obj_buffer *lookup_object_buffer(struct object *base)
static void add_object_buffer(struct object *object, char *buffer, unsigned long size)
{
struct obj_buffer *obj;
- obj = xcalloc(1, sizeof(struct obj_buffer));
+ CALLOC_ARRAY(obj, 1);
obj->buffer = buffer;
obj->size = size;
if (add_decoration(&obj_decorate, object, obj))
@@ -187,7 +187,8 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
* that have reachability requirements and calls this function.
* Verify its reachability and validity recursively and write it out.
*/
-static int check_object(struct object *obj, int type, void *data, struct fsck_options *options)
+static int check_object(struct object *obj, enum object_type type,
+ void *data, struct fsck_options *options)
{
struct obj_buffer *obj_buf;
@@ -500,7 +501,7 @@ static void unpack_all(void)
if (!quiet)
progress = start_progress(_("Unpacking objects"), nr_objects);
- obj_list = xcalloc(nr_objects, sizeof(*obj_list));
+ CALLOC_ARRAY(obj_list, nr_objects);
for (i = 0; i < nr_objects; i++) {
unpack_one(i);
display_progress(progress, i + 1);
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 1cd5c20..8771453 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -446,16 +446,18 @@ static void print_preparing_worktree_line(int detach,
static const char *dwim_branch(const char *path, const char **new_branch)
{
int n;
+ int branch_exists;
const char *s = worktree_basename(path, &n);
const char *branchname = xstrndup(s, n);
struct strbuf ref = STRBUF_INIT;
UNLEAK(branchname);
- if (!strbuf_check_branch_ref(&ref, branchname) &&
- ref_exists(ref.buf)) {
- strbuf_release(&ref);
+
+ branch_exists = !strbuf_check_branch_ref(&ref, branchname) &&
+ ref_exists(ref.buf);
+ strbuf_release(&ref);
+ if (branch_exists)
return branchname;
- }
*new_branch = branchname;
if (guess_remote) {
diff --git a/bulk-checkin.c b/bulk-checkin.c
index 583aacb..6f3c97c 100644
--- a/bulk-checkin.c
+++ b/bulk-checkin.c
@@ -211,7 +211,7 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
/* Note: idx is non-NULL when we are writing */
if ((flags & HASH_WRITE_OBJECT) != 0)
- idx = xcalloc(1, sizeof(*idx));
+ CALLOC_ARRAY(idx, 1);
already_hashed_to = 0;
diff --git a/cache-tree.c b/cache-tree.c
index 11bf1fc..45e5866 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -585,7 +585,7 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
* hence +2.
*/
it->subtree_alloc = subtree_nr + 2;
- it->down = xcalloc(it->subtree_alloc, sizeof(struct cache_tree_sub *));
+ CALLOC_ARRAY(it->down, it->subtree_alloc);
for (i = 0; i < subtree_nr; i++) {
/* read each subtree */
struct cache_tree *sub;
diff --git a/cache.h b/cache.h
index b7e20e9..b785ffb 100644
--- a/cache.h
+++ b/cache.h
@@ -1638,30 +1638,6 @@ const char *show_ident_date(const struct ident_split *id,
*/
int ident_cmp(const struct ident_split *, const struct ident_split *);
-struct checkout {
- struct index_state *istate;
- const char *base_dir;
- int base_dir_len;
- struct delayed_checkout *delayed_checkout;
- struct checkout_metadata meta;
- unsigned force:1,
- quiet:1,
- not_new:1,
- clone:1,
- refresh_cache:1;
-};
-#define CHECKOUT_INIT { NULL, "" }
-
-#define TEMPORARY_FILENAME_LENGTH 25
-int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath, int *nr_checkouts);
-void enable_delayed_checkout(struct checkout *state);
-int finish_delayed_checkout(struct checkout *state, int *nr_checkouts);
-/*
- * Unlink the last component and schedule the leading directories for
- * removal, such that empty directories get removed.
- */
-void unlink_entry(const struct cache_entry *ce);
-
struct cache_def {
struct strbuf path;
int flags;
@@ -1676,8 +1652,9 @@ static inline void cache_def_clear(struct cache_def *cache)
int has_symlink_leading_path(const char *name, int len);
int threaded_has_symlink_leading_path(struct cache_def *, const char *, int);
-int check_leading_path(const char *name, int len);
+int check_leading_path(const char *name, int len, int warn_on_lstat_err);
int has_dirs_only_path(const char *name, int len, int prefix_len);
+void invalidate_lstat_cache(void);
void schedule_dir_for_removal(const char *name, int len);
void remove_scheduled_dirs(void);
diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh
index a66b5e8..d19be40 100755
--- a/ci/run-build-and-tests.sh
+++ b/ci/run-build-and-tests.sh
@@ -16,6 +16,7 @@ linux-gcc)
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
make test
export GIT_TEST_SPLIT_INDEX=yes
+ export GIT_TEST_MERGE_ALGORITHM=recursive
export GIT_TEST_FULL_IN_PACK_ARRAY=true
export GIT_TEST_OE_SIZE=10
export GIT_TEST_OE_DELTA_SIZE=5
diff --git a/combine-diff.c b/combine-diff.c
index 9228aeb..06635f9 100644
--- a/combine-diff.c
+++ b/combine-diff.c
@@ -214,11 +214,11 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase,
* - Else if we have NEW, insert newend lline into base and
* consume newend
*/
- lcs = xcalloc(st_add(origbaselen, 1), sizeof(int*));
- directions = xcalloc(st_add(origbaselen, 1), sizeof(enum coalesce_direction*));
+ CALLOC_ARRAY(lcs, st_add(origbaselen, 1));
+ CALLOC_ARRAY(directions, st_add(origbaselen, 1));
for (i = 0; i < origbaselen + 1; i++) {
- lcs[i] = xcalloc(st_add(lennew, 1), sizeof(int));
- directions[i] = xcalloc(st_add(lennew, 1), sizeof(enum coalesce_direction));
+ CALLOC_ARRAY(lcs[i], st_add(lennew, 1));
+ CALLOC_ARRAY(directions[i], st_add(lennew, 1));
directions[i][0] = BASE;
}
for (j = 1; j < lennew + 1; j++)
@@ -398,8 +398,8 @@ static void consume_hunk(void *state_,
state->lost_bucket = &state->sline[state->nb-1];
}
if (!state->sline[state->nb-1].p_lno)
- state->sline[state->nb-1].p_lno =
- xcalloc(state->num_parent, sizeof(unsigned long));
+ CALLOC_ARRAY(state->sline[state->nb - 1].p_lno,
+ state->num_parent);
state->sline[state->nb-1].p_lno[state->n] = state->ob;
}
@@ -1159,7 +1159,7 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent,
if (result_size && result[result_size-1] != '\n')
cnt++; /* incomplete line */
- sline = xcalloc(st_add(cnt, 2), sizeof(*sline));
+ CALLOC_ARRAY(sline, st_add(cnt, 2));
sline[0].bol = result;
for (lno = 0, cp = result; cp < result + result_size; cp++) {
if (*cp == '\n') {
@@ -1178,7 +1178,7 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent,
/* Even p_lno[cnt+1] is valid -- that is for the end line number
* for deletion hunk at the end.
*/
- sline[0].p_lno = xcalloc(st_mult(st_add(cnt, 2), num_parent), sizeof(unsigned long));
+ CALLOC_ARRAY(sline[0].p_lno, st_mult(st_add(cnt, 2), num_parent));
for (lno = 0; lno <= cnt; lno++)
sline[lno+1].p_lno = sline[lno].p_lno + num_parent;
@@ -1319,7 +1319,7 @@ static struct diff_filepair *combined_pair(struct combine_diff_path *p,
struct diff_filespec *pool;
pair = xmalloc(sizeof(*pair));
- pool = xcalloc(st_add(num_parent, 1), sizeof(struct diff_filespec));
+ CALLOC_ARRAY(pool, st_add(num_parent, 1));
pair->one = pool + 1;
pair->two = pool;
@@ -1348,7 +1348,7 @@ static void handle_combined_callback(struct diff_options *opt,
struct diff_queue_struct q;
int i;
- q.queue = xcalloc(num_paths, sizeof(struct diff_filepair *));
+ CALLOC_ARRAY(q.queue, num_paths);
q.alloc = num_paths;
q.nr = num_paths;
for (i = 0, p = paths; p; p = p->next)
diff --git a/commit-graph.c b/commit-graph.c
index ca025ce..f18380b 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -96,6 +96,13 @@ define_commit_slab(commit_graph_data_slab, struct commit_graph_data);
static struct commit_graph_data_slab commit_graph_data_slab =
COMMIT_SLAB_INIT(1, commit_graph_data_slab);
+static int get_configured_generation_version(struct repository *r)
+{
+ int version = 2;
+ repo_config_get_int(r, "commitgraph.generationversion", &version);
+ return version;
+}
+
uint32_t commit_graph_position(const struct commit *c)
{
struct commit_graph_data *data =
@@ -394,10 +401,13 @@ struct commit_graph *parse_commit_graph(struct repository *r,
pair_chunk(cf, GRAPH_CHUNKID_DATA, &graph->chunk_commit_data);
pair_chunk(cf, GRAPH_CHUNKID_EXTRAEDGES, &graph->chunk_extra_edges);
pair_chunk(cf, GRAPH_CHUNKID_BASE, &graph->chunk_base_graphs);
- pair_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA,
- &graph->chunk_generation_data);
- pair_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW,
- &graph->chunk_generation_data_overflow);
+
+ if (get_configured_generation_version(r) >= 2) {
+ pair_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA,
+ &graph->chunk_generation_data);
+ pair_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW,
+ &graph->chunk_generation_data_overflow);
+ }
if (r->settings.commit_graph_read_changed_paths) {
pair_chunk(cf, GRAPH_CHUNKID_BLOOMINDEXES,
@@ -516,7 +526,7 @@ static struct commit_graph *load_commit_graph_chain(struct repository *r,
return NULL;
count = st.st_size / (the_hash_algo->hexsz + 1);
- oids = xcalloc(count, sizeof(struct object_id));
+ CALLOC_ARRAY(oids, count);
prepare_alt_odb(r);
@@ -1839,8 +1849,6 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
add_chunk(cf, GRAPH_CHUNKID_DATA, (hashsz + 16) * ctx->commits.nr,
write_graph_chunk_data);
- if (git_env_bool(GIT_TEST_COMMIT_GRAPH_NO_GDAT, 0))
- ctx->write_generation_data = 0;
if (ctx->write_generation_data)
add_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA,
sizeof(uint32_t) * ctx->commits.nr,
@@ -2223,6 +2231,7 @@ int write_commit_graph(struct object_directory *odb,
enum commit_graph_write_flags flags,
const struct commit_graph_opts *opts)
{
+ struct repository *r = the_repository;
struct write_commit_graph_context *ctx;
uint32_t i;
int res = 0;
@@ -2230,23 +2239,23 @@ int write_commit_graph(struct object_directory *odb,
struct bloom_filter_settings bloom_settings = DEFAULT_BLOOM_FILTER_SETTINGS;
struct topo_level_slab topo_levels;
- prepare_repo_settings(the_repository);
- if (!the_repository->settings.core_commit_graph) {
+ prepare_repo_settings(r);
+ if (!r->settings.core_commit_graph) {
warning(_("attempting to write a commit-graph, but 'core.commitGraph' is disabled"));
return 0;
}
- if (!commit_graph_compatible(the_repository))
+ if (!commit_graph_compatible(r))
return 0;
- ctx = xcalloc(1, sizeof(struct write_commit_graph_context));
- ctx->r = the_repository;
+ CALLOC_ARRAY(ctx, 1);
+ ctx->r = r;
ctx->odb = odb;
ctx->append = flags & COMMIT_GRAPH_WRITE_APPEND ? 1 : 0;
ctx->report_progress = flags & COMMIT_GRAPH_WRITE_PROGRESS ? 1 : 0;
ctx->split = flags & COMMIT_GRAPH_WRITE_SPLIT ? 1 : 0;
ctx->opts = opts;
ctx->total_bloom_filter_data_size = 0;
- ctx->write_generation_data = 1;
+ ctx->write_generation_data = (get_configured_generation_version(r) == 2);
ctx->num_generation_data_overflows = 0;
bloom_settings.bits_per_entry = git_env_ulong("GIT_TEST_BLOOM_SETTINGS_BITS_PER_ENTRY",
diff --git a/commit-graph.h b/commit-graph.h
index 97f3497..96c24fb 100644
--- a/commit-graph.h
+++ b/commit-graph.h
@@ -6,7 +6,6 @@
#include "oidset.h"
#define GIT_TEST_COMMIT_GRAPH "GIT_TEST_COMMIT_GRAPH"
-#define GIT_TEST_COMMIT_GRAPH_NO_GDAT "GIT_TEST_COMMIT_GRAPH_NO_GDAT"
#define GIT_TEST_COMMIT_GRAPH_DIE_ON_PARSE "GIT_TEST_COMMIT_GRAPH_DIE_ON_PARSE"
#define GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS "GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS"
diff --git a/commit-reach.c b/commit-reach.c
index 2ea84d3..c226ee3 100644
--- a/commit-reach.c
+++ b/commit-reach.c
@@ -183,7 +183,7 @@ static int remove_redundant_no_gen(struct repository *r,
int *filled_index;
int i, j, filled;
- work = xcalloc(cnt, sizeof(*work));
+ CALLOC_ARRAY(work, cnt);
redundant = xcalloc(cnt, 1);
ALLOC_ARRAY(filled_index, cnt - 1);
@@ -399,7 +399,7 @@ static struct commit_list *get_merge_bases_many_0(struct repository *r,
/* There are more than one */
cnt = commit_list_count(result);
- rslt = xcalloc(cnt, sizeof(*rslt));
+ CALLOC_ARRAY(rslt, cnt);
for (list = result, i = 0; list; list = list->next)
rslt[i++] = list->item;
free_commit_list(result);
@@ -541,7 +541,7 @@ struct commit_list *reduce_heads(struct commit_list *heads)
p->item->object.flags |= STALE;
num_head++;
}
- array = xcalloc(num_head, sizeof(*array));
+ CALLOC_ARRAY(array, num_head);
for (p = heads, i = 0; p; p = p->next) {
if (p->item->object.flags & STALE) {
array[i++] = p->item;
diff --git a/commit.c b/commit.c
index 6ccd774..8ea55a4 100644
--- a/commit.c
+++ b/commit.c
@@ -535,6 +535,20 @@ int find_commit_subject(const char *commit_buffer, const char **subject)
return eol - p;
}
+size_t commit_subject_length(const char *body)
+{
+ const char *p = body;
+ while (*p) {
+ const char *next = skip_blank_lines(p);
+ if (next != p)
+ break;
+ p = strchrnul(p, '\n');
+ if (*p)
+ p++;
+ }
+ return p - body;
+}
+
struct commit_list *commit_list_insert(struct commit *item, struct commit_list **list_p)
{
struct commit_list *new_list = xmalloc(sizeof(struct commit_list));
@@ -1171,7 +1185,7 @@ static void handle_signed_tag(struct commit *parent, struct commit_extra_header
* if (verify_signed_buffer(buf, len, buf + len, size - len, ...))
* warn("warning: signed tag unverified.");
*/
- mergetag = xcalloc(1, sizeof(*mergetag));
+ CALLOC_ARRAY(mergetag, 1);
mergetag->key = xstrdup("mergetag");
mergetag->value = buf;
mergetag->len = size;
@@ -1336,7 +1350,7 @@ static struct commit_extra_header *read_commit_extra_header_lines(
excluded_header_field(line, eof - line, exclude))
continue;
- it = xcalloc(1, sizeof(*it));
+ CALLOC_ARRAY(it, 1);
it->key = xmemdupz(line, eof-line);
*tail = it;
tail = &it->next;
diff --git a/commit.h b/commit.h
index 49c0f50..df42eb4 100644
--- a/commit.h
+++ b/commit.h
@@ -167,6 +167,9 @@ const void *detach_commit_buffer(struct commit *, unsigned long *sizep);
/* Find beginning and length of commit subject. */
int find_commit_subject(const char *commit_buffer, const char **subject);
+/* Return length of the commit subject from commit log message. */
+size_t commit_subject_length(const char *body);
+
struct commit_list *commit_list_insert(struct commit *item,
struct commit_list **list);
int commit_list_contains(struct commit *item,
diff --git a/compat/mingw.c b/compat/mingw.c
index a00f331..aa647b3 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -367,6 +367,8 @@ int mingw_rmdir(const char *pathname)
ask_yes_no_if_possible("Deletion of directory '%s' failed. "
"Should I try again?", pathname))
ret = _wrmdir(wpathname);
+ if (!ret)
+ invalidate_lstat_cache();
return ret;
}
@@ -683,6 +685,8 @@ ssize_t mingw_write(int fd, const void *buf, size_t len)
int mingw_access(const char *filename, int mode)
{
wchar_t wfilename[MAX_PATH];
+ if (!strcmp("nul", filename) || !strcmp("/dev/null", filename))
+ return 0;
if (xutftowcs_path(wfilename, filename) < 0)
return -1;
/* X_OK is not supported by the MSVCRT version */
diff --git a/compat/precompose_utf8.c b/compat/precompose_utf8.c
index ec56056..cce1d57 100644
--- a/compat/precompose_utf8.c
+++ b/compat/precompose_utf8.c
@@ -60,10 +60,12 @@ void probe_utf8_pathname_composition(void)
strbuf_release(&path);
}
-static inline const char *precompose_string_if_needed(const char *in)
+const char *precompose_string_if_needed(const char *in)
{
size_t inlen;
size_t outlen;
+ if (!in)
+ return NULL;
if (has_non_ascii(in, (size_t)-1, &inlen)) {
iconv_t ic_prec;
char *out;
@@ -96,10 +98,7 @@ const char *precompose_argv_prefix(int argc, const char **argv, const char *pref
argv[i] = precompose_string_if_needed(argv[i]);
i++;
}
- if (prefix) {
- prefix = precompose_string_if_needed(prefix);
- }
- return prefix;
+ return precompose_string_if_needed(prefix);
}
diff --git a/compat/precompose_utf8.h b/compat/precompose_utf8.h
index d70b846..fea06cf 100644
--- a/compat/precompose_utf8.h
+++ b/compat/precompose_utf8.h
@@ -29,6 +29,7 @@ typedef struct {
} PREC_DIR;
const char *precompose_argv_prefix(int argc, const char **argv, const char *prefix);
+const char *precompose_string_if_needed(const char *in);
void probe_utf8_pathname_composition(void);
PREC_DIR *precompose_utf8_opendir(const char *dirname);
diff --git a/compat/simple-ipc/ipc-shared.c b/compat/simple-ipc/ipc-shared.c
new file mode 100644
index 0000000..1edec81
--- /dev/null
+++ b/compat/simple-ipc/ipc-shared.c
@@ -0,0 +1,28 @@
+#include "cache.h"
+#include "simple-ipc.h"
+#include "strbuf.h"
+#include "pkt-line.h"
+#include "thread-utils.h"
+
+#ifdef SUPPORTS_SIMPLE_IPC
+
+int ipc_server_run(const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data)
+{
+ struct ipc_server_data *server_data = NULL;
+ int ret;
+
+ ret = ipc_server_run_async(&server_data, path, opts,
+ application_cb, application_data);
+ if (ret)
+ return ret;
+
+ ret = ipc_server_await(server_data);
+
+ ipc_server_free(server_data);
+
+ return ret;
+}
+
+#endif /* SUPPORTS_SIMPLE_IPC */
diff --git a/compat/simple-ipc/ipc-unix-socket.c b/compat/simple-ipc/ipc-unix-socket.c
new file mode 100644
index 0000000..38689b2
--- /dev/null
+++ b/compat/simple-ipc/ipc-unix-socket.c
@@ -0,0 +1,999 @@
+#include "cache.h"
+#include "simple-ipc.h"
+#include "strbuf.h"
+#include "pkt-line.h"
+#include "thread-utils.h"
+#include "unix-socket.h"
+#include "unix-stream-server.h"
+
+#ifdef NO_UNIX_SOCKETS
+#error compat/simple-ipc/ipc-unix-socket.c requires Unix sockets
+#endif
+
+enum ipc_active_state ipc_get_active_state(const char *path)
+{
+ enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
+ struct ipc_client_connect_options options
+ = IPC_CLIENT_CONNECT_OPTIONS_INIT;
+ struct stat st;
+ struct ipc_client_connection *connection_test = NULL;
+
+ options.wait_if_busy = 0;
+ options.wait_if_not_found = 0;
+
+ if (lstat(path, &st) == -1) {
+ switch (errno) {
+ case ENOENT:
+ case ENOTDIR:
+ return IPC_STATE__NOT_LISTENING;
+ default:
+ return IPC_STATE__INVALID_PATH;
+ }
+ }
+
+ /* also complain if a plain file is in the way */
+ if ((st.st_mode & S_IFMT) != S_IFSOCK)
+ return IPC_STATE__INVALID_PATH;
+
+ /*
+ * Just because the filesystem has a S_IFSOCK type inode
+ * at `path`, doesn't mean it that there is a server listening.
+ * Ping it to be sure.
+ */
+ state = ipc_client_try_connect(path, &options, &connection_test);
+ ipc_client_close_connection(connection_test);
+
+ return state;
+}
+
+/*
+ * Retry frequency when trying to connect to a server.
+ *
+ * This value should be short enough that we don't seriously delay our
+ * caller, but not fast enough that our spinning puts pressure on the
+ * system.
+ */
+#define WAIT_STEP_MS (50)
+
+/*
+ * Try to connect to the server. If the server is just starting up or
+ * is very busy, we may not get a connection the first time.
+ */
+static enum ipc_active_state connect_to_server(
+ const char *path,
+ int timeout_ms,
+ const struct ipc_client_connect_options *options,
+ int *pfd)
+{
+ int k;
+
+ *pfd = -1;
+
+ for (k = 0; k < timeout_ms; k += WAIT_STEP_MS) {
+ int fd = unix_stream_connect(path, options->uds_disallow_chdir);
+
+ if (fd != -1) {
+ *pfd = fd;
+ return IPC_STATE__LISTENING;
+ }
+
+ if (errno == ENOENT) {
+ if (!options->wait_if_not_found)
+ return IPC_STATE__PATH_NOT_FOUND;
+
+ goto sleep_and_try_again;
+ }
+
+ if (errno == ETIMEDOUT) {
+ if (!options->wait_if_busy)
+ return IPC_STATE__NOT_LISTENING;
+
+ goto sleep_and_try_again;
+ }
+
+ if (errno == ECONNREFUSED) {
+ if (!options->wait_if_busy)
+ return IPC_STATE__NOT_LISTENING;
+
+ goto sleep_and_try_again;
+ }
+
+ return IPC_STATE__OTHER_ERROR;
+
+ sleep_and_try_again:
+ sleep_millisec(WAIT_STEP_MS);
+ }
+
+ return IPC_STATE__NOT_LISTENING;
+}
+
+/*
+ * The total amount of time that we are willing to wait when trying to
+ * connect to a server.
+ *
+ * When the server is first started, it might take a little while for
+ * it to become ready to service requests. Likewise, the server may
+ * be very (temporarily) busy and not respond to our connections.
+ *
+ * We should gracefully and silently handle those conditions and try
+ * again for a reasonable time period.
+ *
+ * The value chosen here should be long enough for the server
+ * to reliably heal from the above conditions.
+ */
+#define MY_CONNECTION_TIMEOUT_MS (1000)
+
+enum ipc_active_state ipc_client_try_connect(
+ const char *path,
+ const struct ipc_client_connect_options *options,
+ struct ipc_client_connection **p_connection)
+{
+ enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
+ int fd = -1;
+
+ *p_connection = NULL;
+
+ trace2_region_enter("ipc-client", "try-connect", NULL);
+ trace2_data_string("ipc-client", NULL, "try-connect/path", path);
+
+ state = connect_to_server(path, MY_CONNECTION_TIMEOUT_MS,
+ options, &fd);
+
+ trace2_data_intmax("ipc-client", NULL, "try-connect/state",
+ (intmax_t)state);
+ trace2_region_leave("ipc-client", "try-connect", NULL);
+
+ if (state == IPC_STATE__LISTENING) {
+ (*p_connection) = xcalloc(1, sizeof(struct ipc_client_connection));
+ (*p_connection)->fd = fd;
+ }
+
+ return state;
+}
+
+void ipc_client_close_connection(struct ipc_client_connection *connection)
+{
+ if (!connection)
+ return;
+
+ if (connection->fd != -1)
+ close(connection->fd);
+
+ free(connection);
+}
+
+int ipc_client_send_command_to_connection(
+ struct ipc_client_connection *connection,
+ const char *message, struct strbuf *answer)
+{
+ int ret = 0;
+
+ strbuf_setlen(answer, 0);
+
+ trace2_region_enter("ipc-client", "send-command", NULL);
+
+ if (write_packetized_from_buf_no_flush(message, strlen(message),
+ connection->fd) < 0 ||
+ packet_flush_gently(connection->fd) < 0) {
+ ret = error(_("could not send IPC command"));
+ goto done;
+ }
+
+ if (read_packetized_to_strbuf(
+ connection->fd, answer,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR) < 0) {
+ ret = error(_("could not read IPC response"));
+ goto done;
+ }
+
+done:
+ trace2_region_leave("ipc-client", "send-command", NULL);
+ return ret;
+}
+
+int ipc_client_send_command(const char *path,
+ const struct ipc_client_connect_options *options,
+ const char *message, struct strbuf *answer)
+{
+ int ret = -1;
+ enum ipc_active_state state;
+ struct ipc_client_connection *connection = NULL;
+
+ state = ipc_client_try_connect(path, options, &connection);
+
+ if (state != IPC_STATE__LISTENING)
+ return ret;
+
+ ret = ipc_client_send_command_to_connection(connection, message, answer);
+
+ ipc_client_close_connection(connection);
+
+ return ret;
+}
+
+static int set_socket_blocking_flag(int fd, int make_nonblocking)
+{
+ int flags;
+
+ flags = fcntl(fd, F_GETFL, NULL);
+
+ if (flags < 0)
+ return -1;
+
+ if (make_nonblocking)
+ flags |= O_NONBLOCK;
+ else
+ flags &= ~O_NONBLOCK;
+
+ return fcntl(fd, F_SETFL, flags);
+}
+
+/*
+ * Magic numbers used to annotate callback instance data.
+ * These are used to help guard against accidentally passing the
+ * wrong instance data across multiple levels of callbacks (which
+ * is easy to do if there are `void*` arguments).
+ */
+enum magic {
+ MAGIC_SERVER_REPLY_DATA,
+ MAGIC_WORKER_THREAD_DATA,
+ MAGIC_ACCEPT_THREAD_DATA,
+ MAGIC_SERVER_DATA,
+};
+
+struct ipc_server_reply_data {
+ enum magic magic;
+ int fd;
+ struct ipc_worker_thread_data *worker_thread_data;
+};
+
+struct ipc_worker_thread_data {
+ enum magic magic;
+ struct ipc_worker_thread_data *next_thread;
+ struct ipc_server_data *server_data;
+ pthread_t pthread_id;
+};
+
+struct ipc_accept_thread_data {
+ enum magic magic;
+ struct ipc_server_data *server_data;
+
+ struct unix_ss_socket *server_socket;
+
+ int fd_send_shutdown;
+ int fd_wait_shutdown;
+ pthread_t pthread_id;
+};
+
+/*
+ * With unix-sockets, the conceptual "ipc-server" is implemented as a single
+ * controller "accept-thread" thread and a pool of "worker-thread" threads.
+ * The former does the usual `accept()` loop and dispatches connections
+ * to an idle worker thread. The worker threads wait in an idle loop for
+ * a new connection, communicate with the client and relay data to/from
+ * the `application_cb` and then wait for another connection from the
+ * server thread. This avoids the overhead of constantly creating and
+ * destroying threads.
+ */
+struct ipc_server_data {
+ enum magic magic;
+ ipc_server_application_cb *application_cb;
+ void *application_data;
+ struct strbuf buf_path;
+
+ struct ipc_accept_thread_data *accept_thread;
+ struct ipc_worker_thread_data *worker_thread_list;
+
+ pthread_mutex_t work_available_mutex;
+ pthread_cond_t work_available_cond;
+
+ /*
+ * Accepted but not yet processed client connections are kept
+ * in a circular buffer FIFO. The queue is empty when the
+ * positions are equal.
+ */
+ int *fifo_fds;
+ int queue_size;
+ int back_pos;
+ int front_pos;
+
+ int shutdown_requested;
+ int is_stopped;
+};
+
+/*
+ * Remove and return the oldest queued connection.
+ *
+ * Returns -1 if empty.
+ */
+static int fifo_dequeue(struct ipc_server_data *server_data)
+{
+ /* ASSERT holding mutex */
+
+ int fd;
+
+ if (server_data->back_pos == server_data->front_pos)
+ return -1;
+
+ fd = server_data->fifo_fds[server_data->front_pos];
+ server_data->fifo_fds[server_data->front_pos] = -1;
+
+ server_data->front_pos++;
+ if (server_data->front_pos == server_data->queue_size)
+ server_data->front_pos = 0;
+
+ return fd;
+}
+
+/*
+ * Push a new fd onto the back of the queue.
+ *
+ * Drop it and return -1 if queue is already full.
+ */
+static int fifo_enqueue(struct ipc_server_data *server_data, int fd)
+{
+ /* ASSERT holding mutex */
+
+ int next_back_pos;
+
+ next_back_pos = server_data->back_pos + 1;
+ if (next_back_pos == server_data->queue_size)
+ next_back_pos = 0;
+
+ if (next_back_pos == server_data->front_pos) {
+ /* Queue is full. Just drop it. */
+ close(fd);
+ return -1;
+ }
+
+ server_data->fifo_fds[server_data->back_pos] = fd;
+ server_data->back_pos = next_back_pos;
+
+ return fd;
+}
+
+/*
+ * Wait for a connection to be queued to the FIFO and return it.
+ *
+ * Returns -1 if someone has already requested a shutdown.
+ */
+static int worker_thread__wait_for_connection(
+ struct ipc_worker_thread_data *worker_thread_data)
+{
+ /* ASSERT NOT holding mutex */
+
+ struct ipc_server_data *server_data = worker_thread_data->server_data;
+ int fd = -1;
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+ for (;;) {
+ if (server_data->shutdown_requested)
+ break;
+
+ fd = fifo_dequeue(server_data);
+ if (fd >= 0)
+ break;
+
+ pthread_cond_wait(&server_data->work_available_cond,
+ &server_data->work_available_mutex);
+ }
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+
+ return fd;
+}
+
+/*
+ * Forward declare our reply callback function so that any compiler
+ * errors are reported when we actually define the function (in addition
+ * to any errors reported when we try to pass this callback function as
+ * a parameter in a function call). The former are easier to understand.
+ */
+static ipc_server_reply_cb do_io_reply_callback;
+
+/*
+ * Relay application's response message to the client process.
+ * (We do not flush at this point because we allow the caller
+ * to chunk data to the client thru us.)
+ */
+static int do_io_reply_callback(struct ipc_server_reply_data *reply_data,
+ const char *response, size_t response_len)
+{
+ if (reply_data->magic != MAGIC_SERVER_REPLY_DATA)
+ BUG("reply_cb called with wrong instance data");
+
+ return write_packetized_from_buf_no_flush(response, response_len,
+ reply_data->fd);
+}
+
+/* A randomly chosen value. */
+#define MY_WAIT_POLL_TIMEOUT_MS (10)
+
+/*
+ * If the client hangs up without sending any data on the wire, just
+ * quietly close the socket and ignore this client.
+ *
+ * This worker thread is committed to reading the IPC request data
+ * from the client at the other end of this fd. Wait here for the
+ * client to actually put something on the wire -- because if the
+ * client just does a ping (connect and hangup without sending any
+ * data), our use of the pkt-line read routines will spew an error
+ * message.
+ *
+ * Return -1 if the client hung up.
+ * Return 0 if data (possibly incomplete) is ready.
+ */
+static int worker_thread__wait_for_io_start(
+ struct ipc_worker_thread_data *worker_thread_data,
+ int fd)
+{
+ struct ipc_server_data *server_data = worker_thread_data->server_data;
+ struct pollfd pollfd[1];
+ int result;
+
+ for (;;) {
+ pollfd[0].fd = fd;
+ pollfd[0].events = POLLIN;
+
+ result = poll(pollfd, 1, MY_WAIT_POLL_TIMEOUT_MS);
+ if (result < 0) {
+ if (errno == EINTR)
+ continue;
+ goto cleanup;
+ }
+
+ if (result == 0) {
+ /* a timeout */
+
+ int in_shutdown;
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+ in_shutdown = server_data->shutdown_requested;
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+
+ /*
+ * If a shutdown is already in progress and this
+ * client has not started talking yet, just drop it.
+ */
+ if (in_shutdown)
+ goto cleanup;
+ continue;
+ }
+
+ if (pollfd[0].revents & POLLHUP)
+ goto cleanup;
+
+ if (pollfd[0].revents & POLLIN)
+ return 0;
+
+ goto cleanup;
+ }
+
+cleanup:
+ close(fd);
+ return -1;
+}
+
+/*
+ * Receive the request/command from the client and pass it to the
+ * registered request-callback. The request-callback will compose
+ * a response and call our reply-callback to send it to the client.
+ */
+static int worker_thread__do_io(
+ struct ipc_worker_thread_data *worker_thread_data,
+ int fd)
+{
+ /* ASSERT NOT holding lock */
+
+ struct strbuf buf = STRBUF_INIT;
+ struct ipc_server_reply_data reply_data;
+ int ret = 0;
+
+ reply_data.magic = MAGIC_SERVER_REPLY_DATA;
+ reply_data.worker_thread_data = worker_thread_data;
+
+ reply_data.fd = fd;
+
+ ret = read_packetized_to_strbuf(
+ reply_data.fd, &buf,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR);
+ if (ret >= 0) {
+ ret = worker_thread_data->server_data->application_cb(
+ worker_thread_data->server_data->application_data,
+ buf.buf, do_io_reply_callback, &reply_data);
+
+ packet_flush_gently(reply_data.fd);
+ }
+ else {
+ /*
+ * The client probably disconnected/shutdown before it
+ * could send a well-formed message. Ignore it.
+ */
+ }
+
+ strbuf_release(&buf);
+ close(reply_data.fd);
+
+ return ret;
+}
+
+/*
+ * Block SIGPIPE on the current thread (so that we get EPIPE from
+ * write() rather than an actual signal).
+ *
+ * Note that using sigchain_push() and _pop() to control SIGPIPE
+ * around our IO calls is not thread safe:
+ * [] It uses a global stack of handler frames.
+ * [] It uses ALLOC_GROW() to resize it.
+ * [] Finally, according to the `signal(2)` man-page:
+ * "The effects of `signal()` in a multithreaded process are unspecified."
+ */
+static void thread_block_sigpipe(sigset_t *old_set)
+{
+ sigset_t new_set;
+
+ sigemptyset(&new_set);
+ sigaddset(&new_set, SIGPIPE);
+
+ sigemptyset(old_set);
+ pthread_sigmask(SIG_BLOCK, &new_set, old_set);
+}
+
+/*
+ * Thread proc for an IPC worker thread. It handles a series of
+ * connections from clients. It pulls the next fd from the queue
+ * processes it, and then waits for the next client.
+ *
+ * Block SIGPIPE in this worker thread for the life of the thread.
+ * This avoids stray (and sometimes delayed) SIGPIPE signals caused
+ * by client errors and/or when we are under extremely heavy IO load.
+ *
+ * This means that the application callback will have SIGPIPE blocked.
+ * The callback should not change it.
+ */
+static void *worker_thread_proc(void *_worker_thread_data)
+{
+ struct ipc_worker_thread_data *worker_thread_data = _worker_thread_data;
+ struct ipc_server_data *server_data = worker_thread_data->server_data;
+ sigset_t old_set;
+ int fd, io;
+ int ret;
+
+ trace2_thread_start("ipc-worker");
+
+ thread_block_sigpipe(&old_set);
+
+ for (;;) {
+ fd = worker_thread__wait_for_connection(worker_thread_data);
+ if (fd == -1)
+ break; /* in shutdown */
+
+ io = worker_thread__wait_for_io_start(worker_thread_data, fd);
+ if (io == -1)
+ continue; /* client hung up without sending anything */
+
+ ret = worker_thread__do_io(worker_thread_data, fd);
+
+ if (ret == SIMPLE_IPC_QUIT) {
+ trace2_data_string("ipc-worker", NULL, "queue_stop_async",
+ "application_quit");
+ /*
+ * The application layer is telling the ipc-server
+ * layer to shutdown.
+ *
+ * We DO NOT have a response to send to the client.
+ *
+ * Queue an async stop (to stop the other threads) and
+ * allow this worker thread to exit now (no sense waiting
+ * for the thread-pool shutdown signal).
+ *
+ * Other non-idle worker threads are allowed to finish
+ * responding to their current clients.
+ */
+ ipc_server_stop_async(server_data);
+ break;
+ }
+ }
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+/* A randomly chosen value. */
+#define MY_ACCEPT_POLL_TIMEOUT_MS (60 * 1000)
+
+/*
+ * Accept a new client connection on our socket. This uses non-blocking
+ * IO so that we can also wait for shutdown requests on our socket-pair
+ * without actually spinning on a fast timeout.
+ */
+static int accept_thread__wait_for_connection(
+ struct ipc_accept_thread_data *accept_thread_data)
+{
+ struct pollfd pollfd[2];
+ int result;
+
+ for (;;) {
+ pollfd[0].fd = accept_thread_data->fd_wait_shutdown;
+ pollfd[0].events = POLLIN;
+
+ pollfd[1].fd = accept_thread_data->server_socket->fd_socket;
+ pollfd[1].events = POLLIN;
+
+ result = poll(pollfd, 2, MY_ACCEPT_POLL_TIMEOUT_MS);
+ if (result < 0) {
+ if (errno == EINTR)
+ continue;
+ return result;
+ }
+
+ if (result == 0) {
+ /* a timeout */
+
+ /*
+ * If someone deletes or force-creates a new unix
+ * domain socket at our path, all future clients
+ * will be routed elsewhere and we silently starve.
+ * If that happens, just queue a shutdown.
+ */
+ if (unix_ss_was_stolen(
+ accept_thread_data->server_socket)) {
+ trace2_data_string("ipc-accept", NULL,
+ "queue_stop_async",
+ "socket_stolen");
+ ipc_server_stop_async(
+ accept_thread_data->server_data);
+ }
+ continue;
+ }
+
+ if (pollfd[0].revents & POLLIN) {
+ /* shutdown message queued to socketpair */
+ return -1;
+ }
+
+ if (pollfd[1].revents & POLLIN) {
+ /* a connection is available on server_socket */
+
+ int client_fd =
+ accept(accept_thread_data->server_socket->fd_socket,
+ NULL, NULL);
+ if (client_fd >= 0)
+ return client_fd;
+
+ /*
+ * An error here is unlikely -- it probably
+ * indicates that the connecting process has
+ * already dropped the connection.
+ */
+ continue;
+ }
+
+ BUG("unandled poll result errno=%d r[0]=%d r[1]=%d",
+ errno, pollfd[0].revents, pollfd[1].revents);
+ }
+}
+
+/*
+ * Thread proc for the IPC server "accept thread". This waits for
+ * an incoming socket connection, appends it to the queue of available
+ * connections, and notifies a worker thread to process it.
+ *
+ * Block SIGPIPE in this thread for the life of the thread. This
+ * avoids any stray SIGPIPE signals when closing pipe fds under
+ * extremely heavy loads (such as when the fifo queue is full and we
+ * drop incomming connections).
+ */
+static void *accept_thread_proc(void *_accept_thread_data)
+{
+ struct ipc_accept_thread_data *accept_thread_data = _accept_thread_data;
+ struct ipc_server_data *server_data = accept_thread_data->server_data;
+ sigset_t old_set;
+
+ trace2_thread_start("ipc-accept");
+
+ thread_block_sigpipe(&old_set);
+
+ for (;;) {
+ int client_fd = accept_thread__wait_for_connection(
+ accept_thread_data);
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+ if (server_data->shutdown_requested) {
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+ if (client_fd >= 0)
+ close(client_fd);
+ break;
+ }
+
+ if (client_fd < 0) {
+ /* ignore transient accept() errors */
+ }
+ else {
+ fifo_enqueue(server_data, client_fd);
+ pthread_cond_broadcast(&server_data->work_available_cond);
+ }
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+ }
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+/*
+ * We can't predict the connection arrival rate relative to the worker
+ * processing rate, therefore we allow the "accept-thread" to queue up
+ * a generous number of connections, since we'd rather have the client
+ * not unnecessarily timeout if we can avoid it. (The assumption is
+ * that this will be used for FSMonitor and a few second wait on a
+ * connection is better than having the client timeout and do the full
+ * computation itself.)
+ *
+ * The FIFO queue size is set to a multiple of the worker pool size.
+ * This value chosen at random.
+ */
+#define FIFO_SCALE (100)
+
+/*
+ * The backlog value for `listen(2)`. This doesn't need to huge,
+ * rather just large enough for our "accept-thread" to wake up and
+ * queue incoming connections onto the FIFO without the kernel
+ * dropping any.
+ *
+ * This value chosen at random.
+ */
+#define LISTEN_BACKLOG (50)
+
+static int create_listener_socket(
+ const char *path,
+ const struct ipc_server_opts *ipc_opts,
+ struct unix_ss_socket **new_server_socket)
+{
+ struct unix_ss_socket *server_socket = NULL;
+ struct unix_stream_listen_opts uslg_opts = UNIX_STREAM_LISTEN_OPTS_INIT;
+ int ret;
+
+ uslg_opts.listen_backlog_size = LISTEN_BACKLOG;
+ uslg_opts.disallow_chdir = ipc_opts->uds_disallow_chdir;
+
+ ret = unix_ss_create(path, &uslg_opts, -1, &server_socket);
+ if (ret)
+ return ret;
+
+ if (set_socket_blocking_flag(server_socket->fd_socket, 1)) {
+ int saved_errno = errno;
+ unix_ss_free(server_socket);
+ errno = saved_errno;
+ return -1;
+ }
+
+ *new_server_socket = server_socket;
+
+ trace2_data_string("ipc-server", NULL, "listen-with-lock", path);
+ return 0;
+}
+
+static int setup_listener_socket(
+ const char *path,
+ const struct ipc_server_opts *ipc_opts,
+ struct unix_ss_socket **new_server_socket)
+{
+ int ret, saved_errno;
+
+ trace2_region_enter("ipc-server", "create-listener_socket", NULL);
+
+ ret = create_listener_socket(path, ipc_opts, new_server_socket);
+
+ saved_errno = errno;
+ trace2_region_leave("ipc-server", "create-listener_socket", NULL);
+ errno = saved_errno;
+
+ return ret;
+}
+
+/*
+ * Start IPC server in a pool of background threads.
+ */
+int ipc_server_run_async(struct ipc_server_data **returned_server_data,
+ const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data)
+{
+ struct unix_ss_socket *server_socket = NULL;
+ struct ipc_server_data *server_data;
+ int sv[2];
+ int k;
+ int ret;
+ int nr_threads = opts->nr_threads;
+
+ *returned_server_data = NULL;
+
+ /*
+ * Create a socketpair and set sv[1] to non-blocking. This
+ * will used to send a shutdown message to the accept-thread
+ * and allows the accept-thread to wait on EITHER a client
+ * connection or a shutdown request without spinning.
+ */
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, sv) < 0)
+ return -1;
+
+ if (set_socket_blocking_flag(sv[1], 1)) {
+ int saved_errno = errno;
+ close(sv[0]);
+ close(sv[1]);
+ errno = saved_errno;
+ return -1;
+ }
+
+ ret = setup_listener_socket(path, opts, &server_socket);
+ if (ret) {
+ int saved_errno = errno;
+ close(sv[0]);
+ close(sv[1]);
+ errno = saved_errno;
+ return ret;
+ }
+
+ server_data = xcalloc(1, sizeof(*server_data));
+ server_data->magic = MAGIC_SERVER_DATA;
+ server_data->application_cb = application_cb;
+ server_data->application_data = application_data;
+ strbuf_init(&server_data->buf_path, 0);
+ strbuf_addstr(&server_data->buf_path, path);
+
+ if (nr_threads < 1)
+ nr_threads = 1;
+
+ pthread_mutex_init(&server_data->work_available_mutex, NULL);
+ pthread_cond_init(&server_data->work_available_cond, NULL);
+
+ server_data->queue_size = nr_threads * FIFO_SCALE;
+ CALLOC_ARRAY(server_data->fifo_fds, server_data->queue_size);
+
+ server_data->accept_thread =
+ xcalloc(1, sizeof(*server_data->accept_thread));
+ server_data->accept_thread->magic = MAGIC_ACCEPT_THREAD_DATA;
+ server_data->accept_thread->server_data = server_data;
+ server_data->accept_thread->server_socket = server_socket;
+ server_data->accept_thread->fd_send_shutdown = sv[0];
+ server_data->accept_thread->fd_wait_shutdown = sv[1];
+
+ if (pthread_create(&server_data->accept_thread->pthread_id, NULL,
+ accept_thread_proc, server_data->accept_thread))
+ die_errno(_("could not start accept_thread '%s'"), path);
+
+ for (k = 0; k < nr_threads; k++) {
+ struct ipc_worker_thread_data *wtd;
+
+ wtd = xcalloc(1, sizeof(*wtd));
+ wtd->magic = MAGIC_WORKER_THREAD_DATA;
+ wtd->server_data = server_data;
+
+ if (pthread_create(&wtd->pthread_id, NULL, worker_thread_proc,
+ wtd)) {
+ if (k == 0)
+ die(_("could not start worker[0] for '%s'"),
+ path);
+ /*
+ * Limp along with the thread pool that we have.
+ */
+ break;
+ }
+
+ wtd->next_thread = server_data->worker_thread_list;
+ server_data->worker_thread_list = wtd;
+ }
+
+ *returned_server_data = server_data;
+ return 0;
+}
+
+/*
+ * Gently tell the IPC server treads to shutdown.
+ * Can be run on any thread.
+ */
+int ipc_server_stop_async(struct ipc_server_data *server_data)
+{
+ /* ASSERT NOT holding mutex */
+
+ int fd;
+
+ if (!server_data)
+ return 0;
+
+ trace2_region_enter("ipc-server", "server-stop-async", NULL);
+
+ pthread_mutex_lock(&server_data->work_available_mutex);
+
+ server_data->shutdown_requested = 1;
+
+ /*
+ * Write a byte to the shutdown socket pair to wake up the
+ * accept-thread.
+ */
+ if (write(server_data->accept_thread->fd_send_shutdown, "Q", 1) < 0)
+ error_errno("could not write to fd_send_shutdown");
+
+ /*
+ * Drain the queue of existing connections.
+ */
+ while ((fd = fifo_dequeue(server_data)) != -1)
+ close(fd);
+
+ /*
+ * Gently tell worker threads to stop processing new connections
+ * and exit. (This does not abort in-process conversations.)
+ */
+ pthread_cond_broadcast(&server_data->work_available_cond);
+
+ pthread_mutex_unlock(&server_data->work_available_mutex);
+
+ trace2_region_leave("ipc-server", "server-stop-async", NULL);
+
+ return 0;
+}
+
+/*
+ * Wait for all IPC server threads to stop.
+ */
+int ipc_server_await(struct ipc_server_data *server_data)
+{
+ pthread_join(server_data->accept_thread->pthread_id, NULL);
+
+ if (!server_data->shutdown_requested)
+ BUG("ipc-server: accept-thread stopped for '%s'",
+ server_data->buf_path.buf);
+
+ while (server_data->worker_thread_list) {
+ struct ipc_worker_thread_data *wtd =
+ server_data->worker_thread_list;
+
+ pthread_join(wtd->pthread_id, NULL);
+
+ server_data->worker_thread_list = wtd->next_thread;
+ free(wtd);
+ }
+
+ server_data->is_stopped = 1;
+
+ return 0;
+}
+
+void ipc_server_free(struct ipc_server_data *server_data)
+{
+ struct ipc_accept_thread_data * accept_thread_data;
+
+ if (!server_data)
+ return;
+
+ if (!server_data->is_stopped)
+ BUG("cannot free ipc-server while running for '%s'",
+ server_data->buf_path.buf);
+
+ accept_thread_data = server_data->accept_thread;
+ if (accept_thread_data) {
+ unix_ss_free(accept_thread_data->server_socket);
+
+ if (accept_thread_data->fd_send_shutdown != -1)
+ close(accept_thread_data->fd_send_shutdown);
+ if (accept_thread_data->fd_wait_shutdown != -1)
+ close(accept_thread_data->fd_wait_shutdown);
+
+ free(server_data->accept_thread);
+ }
+
+ while (server_data->worker_thread_list) {
+ struct ipc_worker_thread_data *wtd =
+ server_data->worker_thread_list;
+
+ server_data->worker_thread_list = wtd->next_thread;
+ free(wtd);
+ }
+
+ pthread_cond_destroy(&server_data->work_available_cond);
+ pthread_mutex_destroy(&server_data->work_available_mutex);
+
+ strbuf_release(&server_data->buf_path);
+
+ free(server_data->fifo_fds);
+ free(server_data);
+}
diff --git a/compat/simple-ipc/ipc-win32.c b/compat/simple-ipc/ipc-win32.c
new file mode 100644
index 0000000..8f89c02
--- /dev/null
+++ b/compat/simple-ipc/ipc-win32.c
@@ -0,0 +1,751 @@
+#include "cache.h"
+#include "simple-ipc.h"
+#include "strbuf.h"
+#include "pkt-line.h"
+#include "thread-utils.h"
+
+#ifndef GIT_WINDOWS_NATIVE
+#error This file can only be compiled on Windows
+#endif
+
+static int initialize_pipe_name(const char *path, wchar_t *wpath, size_t alloc)
+{
+ int off = 0;
+ struct strbuf realpath = STRBUF_INIT;
+
+ if (!strbuf_realpath(&realpath, path, 0))
+ return -1;
+
+ off = swprintf(wpath, alloc, L"\\\\.\\pipe\\");
+ if (xutftowcs(wpath + off, realpath.buf, alloc - off) < 0)
+ return -1;
+
+ /* Handle drive prefix */
+ if (wpath[off] && wpath[off + 1] == L':') {
+ wpath[off + 1] = L'_';
+ off += 2;
+ }
+
+ for (; wpath[off]; off++)
+ if (wpath[off] == L'/')
+ wpath[off] = L'\\';
+
+ strbuf_release(&realpath);
+ return 0;
+}
+
+static enum ipc_active_state get_active_state(wchar_t *pipe_path)
+{
+ if (WaitNamedPipeW(pipe_path, NMPWAIT_USE_DEFAULT_WAIT))
+ return IPC_STATE__LISTENING;
+
+ if (GetLastError() == ERROR_SEM_TIMEOUT)
+ return IPC_STATE__NOT_LISTENING;
+
+ if (GetLastError() == ERROR_FILE_NOT_FOUND)
+ return IPC_STATE__PATH_NOT_FOUND;
+
+ return IPC_STATE__OTHER_ERROR;
+}
+
+enum ipc_active_state ipc_get_active_state(const char *path)
+{
+ wchar_t pipe_path[MAX_PATH];
+
+ if (initialize_pipe_name(path, pipe_path, ARRAY_SIZE(pipe_path)) < 0)
+ return IPC_STATE__INVALID_PATH;
+
+ return get_active_state(pipe_path);
+}
+
+#define WAIT_STEP_MS (50)
+
+static enum ipc_active_state connect_to_server(
+ const wchar_t *wpath,
+ DWORD timeout_ms,
+ const struct ipc_client_connect_options *options,
+ int *pfd)
+{
+ DWORD t_start_ms, t_waited_ms;
+ DWORD step_ms;
+ HANDLE hPipe = INVALID_HANDLE_VALUE;
+ DWORD mode = PIPE_READMODE_BYTE;
+ DWORD gle;
+
+ *pfd = -1;
+
+ for (;;) {
+ hPipe = CreateFileW(wpath, GENERIC_READ | GENERIC_WRITE,
+ 0, NULL, OPEN_EXISTING, 0, NULL);
+ if (hPipe != INVALID_HANDLE_VALUE)
+ break;
+
+ gle = GetLastError();
+
+ switch (gle) {
+ case ERROR_FILE_NOT_FOUND:
+ if (!options->wait_if_not_found)
+ return IPC_STATE__PATH_NOT_FOUND;
+ if (!timeout_ms)
+ return IPC_STATE__PATH_NOT_FOUND;
+
+ step_ms = (timeout_ms < WAIT_STEP_MS) ?
+ timeout_ms : WAIT_STEP_MS;
+ sleep_millisec(step_ms);
+
+ timeout_ms -= step_ms;
+ break; /* try again */
+
+ case ERROR_PIPE_BUSY:
+ if (!options->wait_if_busy)
+ return IPC_STATE__NOT_LISTENING;
+ if (!timeout_ms)
+ return IPC_STATE__NOT_LISTENING;
+
+ t_start_ms = (DWORD)(getnanotime() / 1000000);
+
+ if (!WaitNamedPipeW(wpath, timeout_ms)) {
+ if (GetLastError() == ERROR_SEM_TIMEOUT)
+ return IPC_STATE__NOT_LISTENING;
+
+ return IPC_STATE__OTHER_ERROR;
+ }
+
+ /*
+ * A pipe server instance became available.
+ * Race other client processes to connect to
+ * it.
+ *
+ * But first decrement our overall timeout so
+ * that we don't starve if we keep losing the
+ * race. But also guard against special
+ * NPMWAIT_ values (0 and -1).
+ */
+ t_waited_ms = (DWORD)(getnanotime() / 1000000) - t_start_ms;
+ if (t_waited_ms < timeout_ms)
+ timeout_ms -= t_waited_ms;
+ else
+ timeout_ms = 1;
+ break; /* try again */
+
+ default:
+ return IPC_STATE__OTHER_ERROR;
+ }
+ }
+
+ if (!SetNamedPipeHandleState(hPipe, &mode, NULL, NULL)) {
+ CloseHandle(hPipe);
+ return IPC_STATE__OTHER_ERROR;
+ }
+
+ *pfd = _open_osfhandle((intptr_t)hPipe, O_RDWR|O_BINARY);
+ if (*pfd < 0) {
+ CloseHandle(hPipe);
+ return IPC_STATE__OTHER_ERROR;
+ }
+
+ /* fd now owns hPipe */
+
+ return IPC_STATE__LISTENING;
+}
+
+/*
+ * The default connection timeout for Windows clients.
+ *
+ * This is not currently part of the ipc_ API (nor the config settings)
+ * because of differences between Windows and other platforms.
+ *
+ * This value was chosen at random.
+ */
+#define WINDOWS_CONNECTION_TIMEOUT_MS (30000)
+
+enum ipc_active_state ipc_client_try_connect(
+ const char *path,
+ const struct ipc_client_connect_options *options,
+ struct ipc_client_connection **p_connection)
+{
+ wchar_t wpath[MAX_PATH];
+ enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
+ int fd = -1;
+
+ *p_connection = NULL;
+
+ trace2_region_enter("ipc-client", "try-connect", NULL);
+ trace2_data_string("ipc-client", NULL, "try-connect/path", path);
+
+ if (initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath)) < 0)
+ state = IPC_STATE__INVALID_PATH;
+ else
+ state = connect_to_server(wpath, WINDOWS_CONNECTION_TIMEOUT_MS,
+ options, &fd);
+
+ trace2_data_intmax("ipc-client", NULL, "try-connect/state",
+ (intmax_t)state);
+ trace2_region_leave("ipc-client", "try-connect", NULL);
+
+ if (state == IPC_STATE__LISTENING) {
+ (*p_connection) = xcalloc(1, sizeof(struct ipc_client_connection));
+ (*p_connection)->fd = fd;
+ }
+
+ return state;
+}
+
+void ipc_client_close_connection(struct ipc_client_connection *connection)
+{
+ if (!connection)
+ return;
+
+ if (connection->fd != -1)
+ close(connection->fd);
+
+ free(connection);
+}
+
+int ipc_client_send_command_to_connection(
+ struct ipc_client_connection *connection,
+ const char *message, struct strbuf *answer)
+{
+ int ret = 0;
+
+ strbuf_setlen(answer, 0);
+
+ trace2_region_enter("ipc-client", "send-command", NULL);
+
+ if (write_packetized_from_buf_no_flush(message, strlen(message),
+ connection->fd) < 0 ||
+ packet_flush_gently(connection->fd) < 0) {
+ ret = error(_("could not send IPC command"));
+ goto done;
+ }
+
+ FlushFileBuffers((HANDLE)_get_osfhandle(connection->fd));
+
+ if (read_packetized_to_strbuf(
+ connection->fd, answer,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR) < 0) {
+ ret = error(_("could not read IPC response"));
+ goto done;
+ }
+
+done:
+ trace2_region_leave("ipc-client", "send-command", NULL);
+ return ret;
+}
+
+int ipc_client_send_command(const char *path,
+ const struct ipc_client_connect_options *options,
+ const char *message, struct strbuf *response)
+{
+ int ret = -1;
+ enum ipc_active_state state;
+ struct ipc_client_connection *connection = NULL;
+
+ state = ipc_client_try_connect(path, options, &connection);
+
+ if (state != IPC_STATE__LISTENING)
+ return ret;
+
+ ret = ipc_client_send_command_to_connection(connection, message, response);
+
+ ipc_client_close_connection(connection);
+
+ return ret;
+}
+
+/*
+ * Duplicate the given pipe handle and wrap it in a file descriptor so
+ * that we can use pkt-line on it.
+ */
+static int dup_fd_from_pipe(const HANDLE pipe)
+{
+ HANDLE process = GetCurrentProcess();
+ HANDLE handle;
+ int fd;
+
+ if (!DuplicateHandle(process, pipe, process, &handle, 0, FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ errno = err_win_to_posix(GetLastError());
+ return -1;
+ }
+
+ fd = _open_osfhandle((intptr_t)handle, O_RDWR|O_BINARY);
+ if (fd < 0) {
+ errno = err_win_to_posix(GetLastError());
+ CloseHandle(handle);
+ return -1;
+ }
+
+ /*
+ * `handle` is now owned by `fd` and will be automatically closed
+ * when the descriptor is closed.
+ */
+
+ return fd;
+}
+
+/*
+ * Magic numbers used to annotate callback instance data.
+ * These are used to help guard against accidentally passing the
+ * wrong instance data across multiple levels of callbacks (which
+ * is easy to do if there are `void*` arguments).
+ */
+enum magic {
+ MAGIC_SERVER_REPLY_DATA,
+ MAGIC_SERVER_THREAD_DATA,
+ MAGIC_SERVER_DATA,
+};
+
+struct ipc_server_reply_data {
+ enum magic magic;
+ int fd;
+ struct ipc_server_thread_data *server_thread_data;
+};
+
+struct ipc_server_thread_data {
+ enum magic magic;
+ struct ipc_server_thread_data *next_thread;
+ struct ipc_server_data *server_data;
+ pthread_t pthread_id;
+ HANDLE hPipe;
+};
+
+/*
+ * On Windows, the conceptual "ipc-server" is implemented as a pool of
+ * n idential/peer "server-thread" threads. That is, there is no
+ * hierarchy of threads; and therefore no controller thread managing
+ * the pool. Each thread has an independent handle to the named pipe,
+ * receives incoming connections, processes the client, and re-uses
+ * the pipe for the next client connection.
+ *
+ * Therefore, the "ipc-server" only needs to maintain a list of the
+ * spawned threads for eventual "join" purposes.
+ *
+ * A single "stop-event" is visible to all of the server threads to
+ * tell them to shutdown (when idle).
+ */
+struct ipc_server_data {
+ enum magic magic;
+ ipc_server_application_cb *application_cb;
+ void *application_data;
+ struct strbuf buf_path;
+ wchar_t wpath[MAX_PATH];
+
+ HANDLE hEventStopRequested;
+ struct ipc_server_thread_data *thread_list;
+ int is_stopped;
+};
+
+enum connect_result {
+ CR_CONNECTED = 0,
+ CR_CONNECT_PENDING,
+ CR_CONNECT_ERROR,
+ CR_WAIT_ERROR,
+ CR_SHUTDOWN,
+};
+
+static enum connect_result queue_overlapped_connect(
+ struct ipc_server_thread_data *server_thread_data,
+ OVERLAPPED *lpo)
+{
+ if (ConnectNamedPipe(server_thread_data->hPipe, lpo))
+ goto failed;
+
+ switch (GetLastError()) {
+ case ERROR_IO_PENDING:
+ return CR_CONNECT_PENDING;
+
+ case ERROR_PIPE_CONNECTED:
+ SetEvent(lpo->hEvent);
+ return CR_CONNECTED;
+
+ default:
+ break;
+ }
+
+failed:
+ error(_("ConnectNamedPipe failed for '%s' (%lu)"),
+ server_thread_data->server_data->buf_path.buf,
+ GetLastError());
+ return CR_CONNECT_ERROR;
+}
+
+/*
+ * Use Windows Overlapped IO to wait for a connection or for our event
+ * to be signalled.
+ */
+static enum connect_result wait_for_connection(
+ struct ipc_server_thread_data *server_thread_data,
+ OVERLAPPED *lpo)
+{
+ enum connect_result r;
+ HANDLE waitHandles[2];
+ DWORD dwWaitResult;
+
+ r = queue_overlapped_connect(server_thread_data, lpo);
+ if (r != CR_CONNECT_PENDING)
+ return r;
+
+ waitHandles[0] = server_thread_data->server_data->hEventStopRequested;
+ waitHandles[1] = lpo->hEvent;
+
+ dwWaitResult = WaitForMultipleObjects(2, waitHandles, FALSE, INFINITE);
+ switch (dwWaitResult) {
+ case WAIT_OBJECT_0 + 0:
+ return CR_SHUTDOWN;
+
+ case WAIT_OBJECT_0 + 1:
+ ResetEvent(lpo->hEvent);
+ return CR_CONNECTED;
+
+ default:
+ return CR_WAIT_ERROR;
+ }
+}
+
+/*
+ * Forward declare our reply callback function so that any compiler
+ * errors are reported when we actually define the function (in addition
+ * to any errors reported when we try to pass this callback function as
+ * a parameter in a function call). The former are easier to understand.
+ */
+static ipc_server_reply_cb do_io_reply_callback;
+
+/*
+ * Relay application's response message to the client process.
+ * (We do not flush at this point because we allow the caller
+ * to chunk data to the client thru us.)
+ */
+static int do_io_reply_callback(struct ipc_server_reply_data *reply_data,
+ const char *response, size_t response_len)
+{
+ if (reply_data->magic != MAGIC_SERVER_REPLY_DATA)
+ BUG("reply_cb called with wrong instance data");
+
+ return write_packetized_from_buf_no_flush(response, response_len,
+ reply_data->fd);
+}
+
+/*
+ * Receive the request/command from the client and pass it to the
+ * registered request-callback. The request-callback will compose
+ * a response and call our reply-callback to send it to the client.
+ *
+ * Simple-IPC only contains one round trip, so we flush and close
+ * here after the response.
+ */
+static int do_io(struct ipc_server_thread_data *server_thread_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct ipc_server_reply_data reply_data;
+ int ret = 0;
+
+ reply_data.magic = MAGIC_SERVER_REPLY_DATA;
+ reply_data.server_thread_data = server_thread_data;
+
+ reply_data.fd = dup_fd_from_pipe(server_thread_data->hPipe);
+ if (reply_data.fd < 0)
+ return error(_("could not create fd from pipe for '%s'"),
+ server_thread_data->server_data->buf_path.buf);
+
+ ret = read_packetized_to_strbuf(
+ reply_data.fd, &buf,
+ PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR);
+ if (ret >= 0) {
+ ret = server_thread_data->server_data->application_cb(
+ server_thread_data->server_data->application_data,
+ buf.buf, do_io_reply_callback, &reply_data);
+
+ packet_flush_gently(reply_data.fd);
+
+ FlushFileBuffers((HANDLE)_get_osfhandle((reply_data.fd)));
+ }
+ else {
+ /*
+ * The client probably disconnected/shutdown before it
+ * could send a well-formed message. Ignore it.
+ */
+ }
+
+ strbuf_release(&buf);
+ close(reply_data.fd);
+
+ return ret;
+}
+
+/*
+ * Handle IPC request and response with this connected client. And reset
+ * the pipe to prepare for the next client.
+ */
+static int use_connection(struct ipc_server_thread_data *server_thread_data)
+{
+ int ret;
+
+ ret = do_io(server_thread_data);
+
+ FlushFileBuffers(server_thread_data->hPipe);
+ DisconnectNamedPipe(server_thread_data->hPipe);
+
+ return ret;
+}
+
+/*
+ * Thread proc for an IPC server worker thread. It handles a series of
+ * connections from clients. It cleans and reuses the hPipe between each
+ * client.
+ */
+static void *server_thread_proc(void *_server_thread_data)
+{
+ struct ipc_server_thread_data *server_thread_data = _server_thread_data;
+ HANDLE hEventConnected = INVALID_HANDLE_VALUE;
+ OVERLAPPED oConnect;
+ enum connect_result cr;
+ int ret;
+
+ assert(server_thread_data->hPipe != INVALID_HANDLE_VALUE);
+
+ trace2_thread_start("ipc-server");
+ trace2_data_string("ipc-server", NULL, "pipe",
+ server_thread_data->server_data->buf_path.buf);
+
+ hEventConnected = CreateEventW(NULL, TRUE, FALSE, NULL);
+
+ memset(&oConnect, 0, sizeof(oConnect));
+ oConnect.hEvent = hEventConnected;
+
+ for (;;) {
+ cr = wait_for_connection(server_thread_data, &oConnect);
+
+ switch (cr) {
+ case CR_SHUTDOWN:
+ goto finished;
+
+ case CR_CONNECTED:
+ ret = use_connection(server_thread_data);
+ if (ret == SIMPLE_IPC_QUIT) {
+ ipc_server_stop_async(
+ server_thread_data->server_data);
+ goto finished;
+ }
+ if (ret > 0) {
+ /*
+ * Ignore (transient) IO errors with this
+ * client and reset for the next client.
+ */
+ }
+ break;
+
+ case CR_CONNECT_PENDING:
+ /* By construction, this should not happen. */
+ BUG("ipc-server[%s]: unexpeced CR_CONNECT_PENDING",
+ server_thread_data->server_data->buf_path.buf);
+
+ case CR_CONNECT_ERROR:
+ case CR_WAIT_ERROR:
+ /*
+ * Ignore these theoretical errors.
+ */
+ DisconnectNamedPipe(server_thread_data->hPipe);
+ break;
+
+ default:
+ BUG("unandled case after wait_for_connection");
+ }
+ }
+
+finished:
+ CloseHandle(server_thread_data->hPipe);
+ CloseHandle(hEventConnected);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+static HANDLE create_new_pipe(wchar_t *wpath, int is_first)
+{
+ HANDLE hPipe;
+ DWORD dwOpenMode, dwPipeMode;
+ LPSECURITY_ATTRIBUTES lpsa = NULL;
+
+ dwOpenMode = PIPE_ACCESS_INBOUND | PIPE_ACCESS_OUTBOUND |
+ FILE_FLAG_OVERLAPPED;
+
+ dwPipeMode = PIPE_TYPE_MESSAGE | PIPE_READMODE_BYTE | PIPE_WAIT |
+ PIPE_REJECT_REMOTE_CLIENTS;
+
+ if (is_first) {
+ dwOpenMode |= FILE_FLAG_FIRST_PIPE_INSTANCE;
+
+ /*
+ * On Windows, the first server pipe instance gets to
+ * set the ACL / Security Attributes on the named
+ * pipe; subsequent instances inherit and cannot
+ * change them.
+ *
+ * TODO Should we allow the application layer to
+ * specify security attributes, such as `LocalService`
+ * or `LocalSystem`, when we create the named pipe?
+ * This question is probably not important when the
+ * daemon is started by a foreground user process and
+ * only needs to talk to the current user, but may be
+ * if the daemon is run via the Control Panel as a
+ * System Service.
+ */
+ }
+
+ hPipe = CreateNamedPipeW(wpath, dwOpenMode, dwPipeMode,
+ PIPE_UNLIMITED_INSTANCES, 1024, 1024, 0, lpsa);
+
+ return hPipe;
+}
+
+int ipc_server_run_async(struct ipc_server_data **returned_server_data,
+ const char *path, const struct ipc_server_opts *opts,
+ ipc_server_application_cb *application_cb,
+ void *application_data)
+{
+ struct ipc_server_data *server_data;
+ wchar_t wpath[MAX_PATH];
+ HANDLE hPipeFirst = INVALID_HANDLE_VALUE;
+ int k;
+ int ret = 0;
+ int nr_threads = opts->nr_threads;
+
+ *returned_server_data = NULL;
+
+ ret = initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath));
+ if (ret < 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ hPipeFirst = create_new_pipe(wpath, 1);
+ if (hPipeFirst == INVALID_HANDLE_VALUE) {
+ errno = EADDRINUSE;
+ return -2;
+ }
+
+ server_data = xcalloc(1, sizeof(*server_data));
+ server_data->magic = MAGIC_SERVER_DATA;
+ server_data->application_cb = application_cb;
+ server_data->application_data = application_data;
+ server_data->hEventStopRequested = CreateEvent(NULL, TRUE, FALSE, NULL);
+ strbuf_init(&server_data->buf_path, 0);
+ strbuf_addstr(&server_data->buf_path, path);
+ wcscpy(server_data->wpath, wpath);
+
+ if (nr_threads < 1)
+ nr_threads = 1;
+
+ for (k = 0; k < nr_threads; k++) {
+ struct ipc_server_thread_data *std;
+
+ std = xcalloc(1, sizeof(*std));
+ std->magic = MAGIC_SERVER_THREAD_DATA;
+ std->server_data = server_data;
+ std->hPipe = INVALID_HANDLE_VALUE;
+
+ std->hPipe = (k == 0)
+ ? hPipeFirst
+ : create_new_pipe(server_data->wpath, 0);
+
+ if (std->hPipe == INVALID_HANDLE_VALUE) {
+ /*
+ * If we've reached a pipe instance limit for
+ * this path, just use fewer threads.
+ */
+ free(std);
+ break;
+ }
+
+ if (pthread_create(&std->pthread_id, NULL,
+ server_thread_proc, std)) {
+ /*
+ * Likewise, if we're out of threads, just use
+ * fewer threads than requested.
+ *
+ * However, we just give up if we can't even get
+ * one thread. This should not happen.
+ */
+ if (k == 0)
+ die(_("could not start thread[0] for '%s'"),
+ path);
+
+ CloseHandle(std->hPipe);
+ free(std);
+ break;
+ }
+
+ std->next_thread = server_data->thread_list;
+ server_data->thread_list = std;
+ }
+
+ *returned_server_data = server_data;
+ return 0;
+}
+
+int ipc_server_stop_async(struct ipc_server_data *server_data)
+{
+ if (!server_data)
+ return 0;
+
+ /*
+ * Gently tell all of the ipc_server threads to shutdown.
+ * This will be seen the next time they are idle (and waiting
+ * for a connection).
+ *
+ * We DO NOT attempt to force them to drop an active connection.
+ */
+ SetEvent(server_data->hEventStopRequested);
+ return 0;
+}
+
+int ipc_server_await(struct ipc_server_data *server_data)
+{
+ DWORD dwWaitResult;
+
+ if (!server_data)
+ return 0;
+
+ dwWaitResult = WaitForSingleObject(server_data->hEventStopRequested, INFINITE);
+ if (dwWaitResult != WAIT_OBJECT_0)
+ return error(_("wait for hEvent failed for '%s'"),
+ server_data->buf_path.buf);
+
+ while (server_data->thread_list) {
+ struct ipc_server_thread_data *std = server_data->thread_list;
+
+ pthread_join(std->pthread_id, NULL);
+
+ server_data->thread_list = std->next_thread;
+ free(std);
+ }
+
+ server_data->is_stopped = 1;
+
+ return 0;
+}
+
+void ipc_server_free(struct ipc_server_data *server_data)
+{
+ if (!server_data)
+ return;
+
+ if (!server_data->is_stopped)
+ BUG("cannot free ipc-server while running for '%s'",
+ server_data->buf_path.buf);
+
+ strbuf_release(&server_data->buf_path);
+
+ if (server_data->hEventStopRequested != INVALID_HANDLE_VALUE)
+ CloseHandle(server_data->hEventStopRequested);
+
+ while (server_data->thread_list) {
+ struct ipc_server_thread_data *std = server_data->thread_list;
+
+ server_data->thread_list = std->next_thread;
+ free(std);
+ }
+
+ free(server_data);
+}
diff --git a/config.c b/config.c
index f90b633..870d953 100644
--- a/config.c
+++ b/config.c
@@ -1180,20 +1180,6 @@ static void die_bad_number(const char *name, const char *value)
}
}
-NORETURN
-static void die_bad_bool(const char *name, const char *value)
-{
- if (!strcmp(name, "GIT_TEST_GETTEXT_POISON"))
- /*
- * We explicitly *don't* use _() here since it would
- * cause an infinite loop with _() needing to call
- * use_gettext_poison().
- */
- die("bad boolean config value '%s' for '%s'", value, name);
- else
- die(_("bad boolean config value '%s' for '%s'"), value, name);
-}
-
int git_config_int(const char *name, const char *value)
{
int ret;
@@ -1268,7 +1254,7 @@ int git_config_bool(const char *name, const char *value)
{
int v = git_parse_maybe_bool(value);
if (v < 0)
- die_bad_bool(name, value);
+ die(_("bad boolean config value '%s' for '%s'"), value, name);
return v;
}
@@ -2269,7 +2255,7 @@ static void repo_read_config(struct repository *repo)
opts.git_dir = repo->gitdir;
if (!repo->config)
- repo->config = xcalloc(1, sizeof(struct config_set));
+ CALLOC_ARRAY(repo->config, 1);
else
git_configset_clear(repo->config);
diff --git a/config.mak.uname b/config.mak.uname
index d204c20..cb443b4 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -424,6 +424,7 @@ ifeq ($(uname_S),Windows)
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
+ USE_WIN32_IPC = YesPlease
USE_WIN32_MMAP = YesPlease
MMAP_PREVENTS_DELETE = UnfortunatelyYes
# USE_NED_ALLOCATOR = YesPlease
@@ -600,6 +601,7 @@ ifneq (,$(findstring MINGW,$(uname_S)))
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
+ USE_WIN32_IPC = YesPlease
USE_WIN32_MMAP = YesPlease
MMAP_PREVENTS_DELETE = UnfortunatelyYes
USE_NED_ALLOCATOR = YesPlease
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index ac3dbc0..75ed198 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -58,6 +58,10 @@ if(WIN32)
# In the vcpkg edition, we need this to be able to link to libcurl
set(CURL_NO_CURL_CMAKE ON)
+
+ # Copy the necessary vcpkg DLLs (like iconv) to the install dir
+ set(X_VCPKG_APPLOCAL_DEPS_INSTALL ON)
+ set(CMAKE_TOOLCHAIN_FILE ${VCPKG_DIR}/scripts/buildsystems/vcpkg.cmake CACHE STRING "Vcpkg toolchain file")
endif()
find_program(SH_EXE sh PATHS "C:/Program Files/Git/bin")
@@ -243,7 +247,13 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
add_compile_definitions(PROCFS_EXECUTABLE_PATH="/proc/self/exe" HAVE_DEV_TTY )
- list(APPEND compat_SOURCES unix-socket.c)
+ list(APPEND compat_SOURCES unix-socket.c unix-stream-server.c)
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
+ list(APPEND compat_SOURCES compat/simple-ipc/ipc-shared.c compat/simple-ipc/ipc-win32.c)
+else()
+ list(APPEND compat_SOURCES compat/simple-ipc/ipc-shared.c compat/simple-ipc/ipc-unix-socket.c)
endif()
set(EXE_EXTENSION ${CMAKE_EXECUTABLE_SUFFIX})
@@ -685,13 +695,17 @@ endif()
parse_makefile_for_executables(git_builtin_extra "BUILT_INS")
+option(SKIP_DASHED_BUILT_INS "Skip hardlinking the dashed versions of the built-ins")
+
#Creating hardlinks
+if(NOT SKIP_DASHED_BUILT_INS)
foreach(s ${git_SOURCES} ${git_builtin_extra})
string(REPLACE "${CMAKE_SOURCE_DIR}/builtin/" "" s ${s})
string(REPLACE ".c" "" s ${s})
file(APPEND ${CMAKE_BINARY_DIR}/CreateLinks.cmake "file(CREATE_LINK git${EXE_EXTENSION} git-${s}${EXE_EXTENSION})\n")
list(APPEND git_links ${CMAKE_BINARY_DIR}/git-${s}${EXE_EXTENSION})
endforeach()
+endif()
if(CURL_FOUND)
set(remote_exes
@@ -807,15 +821,19 @@ list(TRANSFORM git_shell_scripts PREPEND "${CMAKE_BINARY_DIR}/")
list(TRANSFORM git_perl_scripts PREPEND "${CMAKE_BINARY_DIR}/")
#install
-install(TARGETS git git-shell
+foreach(program ${PROGRAMS_BUILT})
+if(program STREQUAL "git" OR program STREQUAL "git-shell")
+install(TARGETS ${program}
RUNTIME DESTINATION bin)
+else()
+install(TARGETS ${program}
+ RUNTIME DESTINATION libexec/git-core)
+endif()
+endforeach()
+
install(PROGRAMS ${CMAKE_BINARY_DIR}/git-cvsserver
DESTINATION bin)
-list(REMOVE_ITEM PROGRAMS_BUILT git git-shell)
-install(TARGETS ${PROGRAMS_BUILT}
- RUNTIME DESTINATION libexec/git-core)
-
set(bin_links
git-receive-pack git-upload-archive git-upload-pack)
@@ -828,12 +846,12 @@ install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git-shell${EXE_EXTENS
foreach(b ${git_links})
string(REPLACE "${CMAKE_BINARY_DIR}" "" b ${b})
- install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b}${EXE_EXTENSION})")
+ install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b})")
endforeach()
foreach(b ${git_http_links})
string(REPLACE "${CMAKE_BINARY_DIR}" "" b ${b})
- install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/libexec/git-core/git-remote-http${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b}${EXE_EXTENSION})")
+ install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/libexec/git-core/git-remote-http${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b})")
endforeach()
install(PROGRAMS ${git_shell_scripts} ${git_perl_scripts} ${CMAKE_BINARY_DIR}/git-p4
diff --git a/contrib/coccinelle/array.cocci b/contrib/coccinelle/array.cocci
index 46b8d2e..9a4f00c 100644
--- a/contrib/coccinelle/array.cocci
+++ b/contrib/coccinelle/array.cocci
@@ -88,3 +88,11 @@ expression n;
@@
- ptr = xmalloc((n) * sizeof(T));
+ ALLOC_ARRAY(ptr, n);
+
+@@
+type T;
+T *ptr;
+expression n != 1;
+@@
+- ptr = xcalloc(n, \( sizeof(*ptr) \| sizeof(T) \) )
++ CALLOC_ARRAY(ptr, n)
diff --git a/contrib/coccinelle/xcalloc.cocci b/contrib/coccinelle/xcalloc.cocci
new file mode 100644
index 0000000..c291011
--- /dev/null
+++ b/contrib/coccinelle/xcalloc.cocci
@@ -0,0 +1,10 @@
+@@
+type T;
+T *ptr;
+expression n;
+@@
+ xcalloc(
++ n,
+ \( sizeof(T) \| sizeof(*ptr) \)
+- , n
+ )
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index 7dc6cd8..dfa735e 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -77,7 +77,7 @@ __git_find_repo_path ()
test -d "$__git_dir" &&
__git_repo_path="$__git_dir"
elif [ -n "${GIT_DIR-}" ]; then
- test -d "${GIT_DIR-}" &&
+ test -d "$GIT_DIR" &&
__git_repo_path="$GIT_DIR"
elif [ -d .git ]; then
__git_repo_path=.git
@@ -427,7 +427,7 @@ __gitcomp_builtin ()
if [ -z "$options" ]; then
local completion_helper
- if [ "$GIT_COMPLETION_SHOW_ALL" = "1" ]; then
+ if [ "${GIT_COMPLETION_SHOW_ALL-}" = "1" ]; then
completion_helper="--git-completion-helper-all"
else
completion_helper="--git-completion-helper"
@@ -744,7 +744,7 @@ __git_refs ()
track=""
;;
*)
- for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD; do
+ for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD CHERRY_PICK_HEAD; do
case "$i" in
$match*)
if [ -e "$dir/$i" ]; then
@@ -1910,7 +1910,7 @@ _git_help ()
return
;;
esac
- if test -n "$GIT_TESTING_ALL_COMMAND_LIST"
+ if test -n "${GIT_TESTING_ALL_COMMAND_LIST-}"
then
__gitcomp "$GIT_TESTING_ALL_COMMAND_LIST $(__git --list-cmds=alias,list-guide) gitk"
else
@@ -3053,7 +3053,7 @@ _git_stash ()
__gitcomp "--name-status --oneline --patch-with-stat"
;;
show,--*)
- __gitcomp "$__git_diff_common_options"
+ __gitcomp "--include-untracked --only-untracked $__git_diff_common_options"
;;
branch,--*)
;;
diff --git a/convert.c b/convert.c
index 20b19ab..fd9c84b 100644
--- a/convert.c
+++ b/convert.c
@@ -24,17 +24,6 @@
#define CONVERT_STAT_BITS_TXT_CRLF 0x2
#define CONVERT_STAT_BITS_BIN 0x4
-enum crlf_action {
- CRLF_UNDEFINED,
- CRLF_BINARY,
- CRLF_TEXT,
- CRLF_TEXT_INPUT,
- CRLF_TEXT_CRLF,
- CRLF_AUTO,
- CRLF_AUTO_INPUT,
- CRLF_AUTO_CRLF
-};
-
struct text_stat {
/* NUL, CR, LF and CRLF counts */
unsigned nul, lonecr, lonelf, crlf;
@@ -172,7 +161,7 @@ static int text_eol_is_crlf(void)
return 0;
}
-static enum eol output_eol(enum crlf_action crlf_action)
+static enum eol output_eol(enum convert_crlf_action crlf_action)
{
switch (crlf_action) {
case CRLF_BINARY:
@@ -246,7 +235,7 @@ static int has_crlf_in_index(struct index_state *istate, const char *path)
}
static int will_convert_lf_to_crlf(struct text_stat *stats,
- enum crlf_action crlf_action)
+ enum convert_crlf_action crlf_action)
{
if (output_eol(crlf_action) != EOL_CRLF)
return 0;
@@ -499,7 +488,7 @@ static int encode_to_worktree(const char *path, const char *src, size_t src_len,
static int crlf_to_git(struct index_state *istate,
const char *path, const char *src, size_t len,
struct strbuf *buf,
- enum crlf_action crlf_action, int conv_flags)
+ enum convert_crlf_action crlf_action, int conv_flags)
{
struct text_stat stats;
char *dst;
@@ -585,8 +574,8 @@ static int crlf_to_git(struct index_state *istate,
return 1;
}
-static int crlf_to_worktree(const char *src, size_t len,
- struct strbuf *buf, enum crlf_action crlf_action)
+static int crlf_to_worktree(const char *src, size_t len, struct strbuf *buf,
+ enum convert_crlf_action crlf_action)
{
char *to_free = NULL;
struct text_stat stats;
@@ -884,9 +873,13 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
goto done;
if (fd >= 0)
- err = write_packetized_from_fd(fd, process->in);
+ err = write_packetized_from_fd_no_flush(fd, process->in);
else
- err = write_packetized_from_buf(src, len, process->in);
+ err = write_packetized_from_buf_no_flush(src, len, process->in);
+ if (err)
+ goto done;
+
+ err = packet_flush_gently(process->in);
if (err)
goto done;
@@ -903,7 +896,8 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
if (err)
goto done;
- err = read_packetized_to_strbuf(process->out, &nbuf) < 0;
+ err = read_packetized_to_strbuf(process->out, &nbuf,
+ PACKET_READ_GENTLE_ON_EOF) < 0;
if (err)
goto done;
@@ -1028,7 +1022,7 @@ static int read_convert_config(const char *var, const char *value, void *cb)
if (!strncmp(drv->name, name, namelen) && !drv->name[namelen])
break;
if (!drv) {
- drv = xcalloc(1, sizeof(struct convert_driver));
+ CALLOC_ARRAY(drv, 1);
drv->name = xmemdupz(name, namelen);
*user_convert_tail = drv;
user_convert_tail = &(drv->next);
@@ -1247,7 +1241,7 @@ static const char *git_path_check_encoding(struct attr_check_item *check)
return value;
}
-static enum crlf_action git_path_check_crlf(struct attr_check_item *check)
+static enum convert_crlf_action git_path_check_crlf(struct attr_check_item *check)
{
const char *value = check->value;
@@ -1297,18 +1291,10 @@ static int git_path_check_ident(struct attr_check_item *check)
return !!ATTR_TRUE(value);
}
-struct conv_attrs {
- struct convert_driver *drv;
- enum crlf_action attr_action; /* What attr says */
- enum crlf_action crlf_action; /* When no attr is set, use core.autocrlf */
- int ident;
- const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
-};
-
static struct attr_check *check;
-static void convert_attrs(struct index_state *istate,
- struct conv_attrs *ca, const char *path)
+void convert_attrs(struct index_state *istate,
+ struct conv_attrs *ca, const char *path)
{
struct attr_check_item *ccheck = NULL;
@@ -1456,7 +1442,6 @@ void convert_to_git_filter_fd(struct index_state *istate,
convert_attrs(istate, &ca, path);
assert(ca.drv);
- assert(ca.drv->clean || ca.drv->process);
if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL, NULL))
die(_("%s: clean filter '%s' failed"), path, ca.drv->name);
@@ -1466,19 +1451,16 @@ void convert_to_git_filter_fd(struct index_state *istate,
ident_to_git(dst->buf, dst->len, dst, ca.ident);
}
-static int convert_to_working_tree_internal(struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- int normalizing,
- const struct checkout_metadata *meta,
- struct delayed_checkout *dco)
+static int convert_to_working_tree_ca_internal(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ int normalizing,
+ const struct checkout_metadata *meta,
+ struct delayed_checkout *dco)
{
int ret = 0, ret_filter = 0;
- struct conv_attrs ca;
- convert_attrs(istate, &ca, path);
-
- ret |= ident_to_worktree(src, len, dst, ca.ident);
+ ret |= ident_to_worktree(src, len, dst, ca->ident);
if (ret) {
src = dst->buf;
len = dst->len;
@@ -1488,49 +1470,56 @@ static int convert_to_working_tree_internal(struct index_state *istate,
* is a smudge or process filter (even if the process filter doesn't
* support smudge). The filters might expect CRLFs.
*/
- if ((ca.drv && (ca.drv->smudge || ca.drv->process)) || !normalizing) {
- ret |= crlf_to_worktree(src, len, dst, ca.crlf_action);
+ if ((ca->drv && (ca->drv->smudge || ca->drv->process)) || !normalizing) {
+ ret |= crlf_to_worktree(src, len, dst, ca->crlf_action);
if (ret) {
src = dst->buf;
len = dst->len;
}
}
- ret |= encode_to_worktree(path, src, len, dst, ca.working_tree_encoding);
+ ret |= encode_to_worktree(path, src, len, dst, ca->working_tree_encoding);
if (ret) {
src = dst->buf;
len = dst->len;
}
ret_filter = apply_filter(
- path, src, len, -1, dst, ca.drv, CAP_SMUDGE, meta, dco);
- if (!ret_filter && ca.drv && ca.drv->required)
- die(_("%s: smudge filter %s failed"), path, ca.drv->name);
+ path, src, len, -1, dst, ca->drv, CAP_SMUDGE, meta, dco);
+ if (!ret_filter && ca->drv && ca->drv->required)
+ die(_("%s: smudge filter %s failed"), path, ca->drv->name);
return ret | ret_filter;
}
-int async_convert_to_working_tree(struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta,
- void *dco)
+int async_convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta,
+ void *dco)
{
- return convert_to_working_tree_internal(istate, path, src, len, dst, 0, meta, dco);
+ return convert_to_working_tree_ca_internal(ca, path, src, len, dst, 0,
+ meta, dco);
}
-int convert_to_working_tree(struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta)
+int convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta)
{
- return convert_to_working_tree_internal(istate, path, src, len, dst, 0, meta, NULL);
+ return convert_to_working_tree_ca_internal(ca, path, src, len, dst, 0,
+ meta, NULL);
}
int renormalize_buffer(struct index_state *istate, const char *path,
const char *src, size_t len, struct strbuf *dst)
{
- int ret = convert_to_working_tree_internal(istate, path, src, len, dst, 1, NULL, NULL);
+ struct conv_attrs ca;
+ int ret;
+
+ convert_attrs(istate, &ca, path);
+ ret = convert_to_working_tree_ca_internal(&ca, path, src, len, dst, 1,
+ NULL, NULL);
if (ret) {
src = dst->buf;
len = dst->len;
@@ -1957,34 +1946,25 @@ static struct stream_filter *ident_filter(const struct object_id *oid)
}
/*
- * Return an appropriately constructed filter for the path, or NULL if
+ * Return an appropriately constructed filter for the given ca, or NULL if
* the contents cannot be filtered without reading the whole thing
* in-core.
*
* Note that you would be crazy to set CRLF, smudge/clean or ident to a
* large binary blob you would want us not to slurp into the memory!
*/
-struct stream_filter *get_stream_filter(struct index_state *istate,
- const char *path,
- const struct object_id *oid)
+struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca,
+ const struct object_id *oid)
{
- struct conv_attrs ca;
struct stream_filter *filter = NULL;
- convert_attrs(istate, &ca, path);
- if (ca.drv && (ca.drv->process || ca.drv->smudge || ca.drv->clean))
+ if (classify_conv_attrs(ca) != CA_CLASS_STREAMABLE)
return NULL;
- if (ca.working_tree_encoding)
- return NULL;
-
- if (ca.crlf_action == CRLF_AUTO || ca.crlf_action == CRLF_AUTO_CRLF)
- return NULL;
-
- if (ca.ident)
+ if (ca->ident)
filter = ident_filter(oid);
- if (output_eol(ca.crlf_action) == EOL_CRLF)
+ if (output_eol(ca->crlf_action) == EOL_CRLF)
filter = cascade_filter(filter, lf_to_crlf_filter());
else
filter = cascade_filter(filter, &null_filter_singleton);
@@ -1992,6 +1972,15 @@ struct stream_filter *get_stream_filter(struct index_state *istate,
return filter;
}
+struct stream_filter *get_stream_filter(struct index_state *istate,
+ const char *path,
+ const struct object_id *oid)
+{
+ struct conv_attrs ca;
+ convert_attrs(istate, &ca, path);
+ return get_stream_filter_ca(&ca, oid);
+}
+
void free_stream_filter(struct stream_filter *filter)
{
filter->vtbl->free(filter);
@@ -2025,3 +2014,21 @@ void clone_checkout_metadata(struct checkout_metadata *dst,
if (blob)
oidcpy(&dst->blob, blob);
}
+
+enum conv_attrs_classification classify_conv_attrs(const struct conv_attrs *ca)
+{
+ if (ca->drv) {
+ if (ca->drv->process)
+ return CA_CLASS_INCORE_PROCESS;
+ if (ca->drv->smudge || ca->drv->clean)
+ return CA_CLASS_INCORE_FILTER;
+ }
+
+ if (ca->working_tree_encoding)
+ return CA_CLASS_INCORE;
+
+ if (ca->crlf_action == CRLF_AUTO || ca->crlf_action == CRLF_AUTO_CRLF)
+ return CA_CLASS_INCORE;
+
+ return CA_CLASS_STREAMABLE;
+}
diff --git a/convert.h b/convert.h
index 0f7c8a1..5ee1c32 100644
--- a/convert.h
+++ b/convert.h
@@ -63,6 +63,30 @@ struct checkout_metadata {
struct object_id blob;
};
+enum convert_crlf_action {
+ CRLF_UNDEFINED,
+ CRLF_BINARY,
+ CRLF_TEXT,
+ CRLF_TEXT_INPUT,
+ CRLF_TEXT_CRLF,
+ CRLF_AUTO,
+ CRLF_AUTO_INPUT,
+ CRLF_AUTO_CRLF
+};
+
+struct convert_driver;
+
+struct conv_attrs {
+ struct convert_driver *drv;
+ enum convert_crlf_action attr_action; /* What attr says */
+ enum convert_crlf_action crlf_action; /* When no attr is set, use core.autocrlf */
+ int ident;
+ const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
+};
+
+void convert_attrs(struct index_state *istate,
+ struct conv_attrs *ca, const char *path);
+
extern enum eol core_eol;
extern char *check_roundtrip_encoding;
const char *get_cached_convert_stats_ascii(struct index_state *istate,
@@ -75,15 +99,34 @@ const char *get_convert_attr_ascii(struct index_state *istate,
int convert_to_git(struct index_state *istate,
const char *path, const char *src, size_t len,
struct strbuf *dst, int conv_flags);
-int convert_to_working_tree(struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta);
-int async_convert_to_working_tree(struct index_state *istate,
- const char *path, const char *src,
- size_t len, struct strbuf *dst,
- const struct checkout_metadata *meta,
- void *dco);
+int convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta);
+int async_convert_to_working_tree_ca(const struct conv_attrs *ca,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta,
+ void *dco);
+static inline int convert_to_working_tree(struct index_state *istate,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta)
+{
+ struct conv_attrs ca;
+ convert_attrs(istate, &ca, path);
+ return convert_to_working_tree_ca(&ca, path, src, len, dst, meta);
+}
+static inline int async_convert_to_working_tree(struct index_state *istate,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst,
+ const struct checkout_metadata *meta,
+ void *dco)
+{
+ struct conv_attrs ca;
+ convert_attrs(istate, &ca, path);
+ return async_convert_to_working_tree_ca(&ca, path, src, len, dst, meta, dco);
+}
int async_query_available_blobs(const char *cmd,
struct string_list *available_paths);
int renormalize_buffer(struct index_state *istate,
@@ -136,6 +179,8 @@ struct stream_filter; /* opaque */
struct stream_filter *get_stream_filter(struct index_state *istate,
const char *path,
const struct object_id *);
+struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca,
+ const struct object_id *oid);
void free_stream_filter(struct stream_filter *);
int is_null_stream_filter(struct stream_filter *);
@@ -155,4 +200,37 @@ int stream_filter(struct stream_filter *,
const char *input, size_t *isize_p,
char *output, size_t *osize_p);
+enum conv_attrs_classification {
+ /*
+ * The blob must be loaded into a buffer before it can be
+ * smudged. All smudging is done in-proc.
+ */
+ CA_CLASS_INCORE,
+
+ /*
+ * The blob must be loaded into a buffer, but uses a
+ * single-file driver filter, such as rot13.
+ */
+ CA_CLASS_INCORE_FILTER,
+
+ /*
+ * The blob must be loaded into a buffer, but uses a
+ * long-running driver process, such as LFS. This might or
+ * might not use delayed operations. (The important thing is
+ * that there is a single subordinate long-running process
+ * handling all associated blobs and in case of delayed
+ * operations, may hold per-blob state.)
+ */
+ CA_CLASS_INCORE_PROCESS,
+
+ /*
+ * The blob can be streamed and smudged without needing to
+ * completely read it into a buffer.
+ */
+ CA_CLASS_STREAMABLE,
+};
+
+enum conv_attrs_classification classify_conv_attrs(
+ const struct conv_attrs *ca);
+
#endif /* CONVERT_H */
diff --git a/csum-file.c b/csum-file.c
index 0f35fa5..7510950 100644
--- a/csum-file.c
+++ b/csum-file.c
@@ -89,32 +89,35 @@ int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int fl
void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
{
while (count) {
- unsigned offset = f->offset;
- unsigned left = sizeof(f->buffer) - offset;
+ unsigned left = sizeof(f->buffer) - f->offset;
unsigned nr = count > left ? left : count;
- const void *data;
if (f->do_crc)
f->crc32 = crc32(f->crc32, buf, nr);
if (nr == sizeof(f->buffer)) {
- /* process full buffer directly without copy */
- data = buf;
+ /*
+ * Flush a full batch worth of data directly
+ * from the input, skipping the memcpy() to
+ * the hashfile's buffer. In this block,
+ * f->offset is necessarily zero.
+ */
+ the_hash_algo->update_fn(&f->ctx, buf, nr);
+ flush(f, buf, nr);
} else {
- memcpy(f->buffer + offset, buf, nr);
- data = f->buffer;
+ /*
+ * Copy to the hashfile's buffer, flushing only
+ * if it became full.
+ */
+ memcpy(f->buffer + f->offset, buf, nr);
+ f->offset += nr;
+ left -= nr;
+ if (!left)
+ hashflush(f);
}
count -= nr;
- offset += nr;
buf = (char *) buf + nr;
- left -= nr;
- if (!left) {
- the_hash_algo->update_fn(&f->ctx, data, offset);
- flush(f, data, offset);
- offset = 0;
- }
- f->offset = offset;
}
}
diff --git a/daemon.c b/daemon.c
index 2ab7ea8..5c4cbad 100644
--- a/daemon.c
+++ b/daemon.c
@@ -566,14 +566,14 @@ static void parse_host_and_port(char *hostport, char **host,
/*
* Sanitize a string from the client so that it's OK to be inserted into a
- * filesystem path. Specifically, we disallow slashes, runs of "..", and
- * trailing and leading dots, which means that the client cannot escape
- * our base path via ".." traversal.
+ * filesystem path. Specifically, we disallow directory separators, runs
+ * of "..", and trailing and leading dots, which means that the client
+ * cannot escape our base path via ".." traversal.
*/
static void sanitize_client(struct strbuf *out, const char *in)
{
for (; *in; in++) {
- if (*in == '/')
+ if (is_dir_sep(*in))
continue;
if (*in == '.' && (!out->len || out->buf[out->len - 1] == '.'))
continue;
@@ -840,7 +840,7 @@ static void add_child(struct child_process *cld, struct sockaddr *addr, socklen_
{
struct child *newborn, **cradle;
- newborn = xcalloc(1, sizeof(*newborn));
+ CALLOC_ARRAY(newborn, 1);
live_children++;
memcpy(&newborn->cld, cld, sizeof(*cld));
memcpy(&newborn->address, addr, addrlen);
@@ -1148,7 +1148,7 @@ static int service_loop(struct socketlist *socklist)
struct pollfd *pfd;
int i;
- pfd = xcalloc(socklist->nr, sizeof(struct pollfd));
+ CALLOC_ARRAY(pfd, socklist->nr);
for (i = 0; i < socklist->nr; i++) {
pfd[i].fd = socklist->list[i];
diff --git a/decorate.c b/decorate.c
index a605b1b..2036d15 100644
--- a/decorate.c
+++ b/decorate.c
@@ -39,7 +39,7 @@ static void grow_decoration(struct decoration *n)
struct decoration_entry *old_entries = n->entries;
n->size = (old_size + 1000) * 3 / 2;
- n->entries = xcalloc(n->size, sizeof(struct decoration_entry));
+ CALLOC_ARRAY(n->entries, n->size);
n->nr = 0;
for (i = 0; i < old_size; i++) {
diff --git a/diff-lib.c b/diff-lib.c
index b73cc18..e5a58c9 100644
--- a/diff-lib.c
+++ b/diff-lib.c
@@ -28,9 +28,10 @@
* exists for ce that is a submodule -- it is a submodule that is not
* checked out). Return negative for an error.
*/
-static int check_removed(const struct cache_entry *ce, struct stat *st)
+static int check_removed(const struct index_state *istate, const struct cache_entry *ce, struct stat *st)
{
- if (lstat(ce->name, st) < 0) {
+ assert(is_fsmonitor_refreshed(istate));
+ if (!(ce->ce_flags & CE_FSMONITOR_VALID) && lstat(ce->name, st) < 0) {
if (!is_missing_file_error(errno))
return -1;
return 1;
@@ -136,7 +137,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
memset(&(dpath->parent[0]), 0,
sizeof(struct combine_diff_parent)*5);
- changed = check_removed(ce, &st);
+ changed = check_removed(istate, ce, &st);
if (!changed)
wt_mode = ce_mode_from_stat(ce, st.st_mode);
else {
@@ -216,7 +217,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
} else {
struct stat st;
- changed = check_removed(ce, &st);
+ changed = check_removed(istate, ce, &st);
if (changed) {
if (changed < 0) {
perror(ce->name);
@@ -278,7 +279,8 @@ static void diff_index_show_file(struct rev_info *revs,
oid, oid_valid, ce->name, dirty_submodule);
}
-static int get_stat_data(const struct cache_entry *ce,
+static int get_stat_data(const struct index_state *istate,
+ const struct cache_entry *ce,
const struct object_id **oidp,
unsigned int *modep,
int cached, int match_missing,
@@ -290,7 +292,7 @@ static int get_stat_data(const struct cache_entry *ce,
if (!cached && !ce_uptodate(ce)) {
int changed;
struct stat st;
- changed = check_removed(ce, &st);
+ changed = check_removed(istate, ce, &st);
if (changed < 0)
return -1;
else if (changed) {
@@ -321,12 +323,13 @@ static void show_new_file(struct rev_info *revs,
const struct object_id *oid;
unsigned int mode;
unsigned dirty_submodule = 0;
+ struct index_state *istate = revs->diffopt.repo->index;
/*
* New file in the index: it might actually be different in
* the working tree.
*/
- if (get_stat_data(new_file, &oid, &mode, cached, match_missing,
+ if (get_stat_data(istate, new_file, &oid, &mode, cached, match_missing,
&dirty_submodule, &revs->diffopt) < 0)
return;
@@ -342,8 +345,9 @@ static int show_modified(struct rev_info *revs,
unsigned int mode, oldmode;
const struct object_id *oid;
unsigned dirty_submodule = 0;
+ struct index_state *istate = revs->diffopt.repo->index;
- if (get_stat_data(new_entry, &oid, &mode, cached, match_missing,
+ if (get_stat_data(istate, new_entry, &oid, &mode, cached, match_missing,
&dirty_submodule, &revs->diffopt) < 0) {
if (report_missing)
diff_index_show_file(revs, "-", old_entry,
@@ -574,6 +578,7 @@ int run_diff_index(struct rev_info *revs, unsigned int option)
struct object_id oid;
const char *name;
char merge_base_hex[GIT_MAX_HEXSZ + 1];
+ struct index_state *istate = revs->diffopt.repo->index;
if (revs->pending.nr != 1)
BUG("run_diff_index must be passed exactly one tree");
@@ -581,6 +586,8 @@ int run_diff_index(struct rev_info *revs, unsigned int option)
trace_performance_enter();
ent = revs->pending.objects;
+ refresh_fsmonitor(istate);
+
if (merge_base) {
diff_get_merge_base(revs, &oid);
name = oid_to_hex_r(merge_base_hex, &oid);
diff --git a/diff.c b/diff.c
index 6956f5e..4acccd9 100644
--- a/diff.c
+++ b/diff.c
@@ -2233,14 +2233,12 @@ static void init_diff_words_data(struct emit_callback *ecbdata,
struct diff_options *o = xmalloc(sizeof(struct diff_options));
memcpy(o, orig_opts, sizeof(struct diff_options));
- ecbdata->diff_words =
- xcalloc(1, sizeof(struct diff_words_data));
+ CALLOC_ARRAY(ecbdata->diff_words, 1);
ecbdata->diff_words->type = o->word_diff;
ecbdata->diff_words->opt = o;
if (orig_opts->emitted_symbols)
- o->emitted_symbols =
- xcalloc(1, sizeof(struct emitted_diff_symbols));
+ CALLOC_ARRAY(o->emitted_symbols, 1);
if (!o->word_regex)
o->word_regex = userdiff_word_regex(one, o->repo->index);
@@ -2509,7 +2507,7 @@ static struct diffstat_file *diffstat_add(struct diffstat_t *diffstat,
const char *name_b)
{
struct diffstat_file *x;
- x = xcalloc(1, sizeof(*x));
+ CALLOC_ARRAY(x, 1);
ALLOC_GROW(diffstat->files, diffstat->nr + 1, diffstat->alloc);
diffstat->files[diffstat->nr++] = x;
if (name_b) {
@@ -4918,7 +4916,7 @@ static int diff_opt_find_object(const struct option *option,
return error(_("unable to resolve '%s'"), arg);
if (!opt->objfind)
- opt->objfind = xcalloc(1, sizeof(*opt->objfind));
+ CALLOC_ARRAY(opt->objfind, 1);
opt->pickaxe_opts |= DIFF_PICKAXE_KIND_OBJFIND;
opt->flags.recursive = 1;
diff --git a/diffcore-rename.c b/diffcore-rename.c
index 4155818..963ca58 100644
--- a/diffcore-rename.c
+++ b/diffcore-rename.c
@@ -367,6 +367,361 @@ static int find_exact_renames(struct diff_options *options)
return renames;
}
+struct dir_rename_info {
+ struct strintmap idx_map;
+ struct strmap dir_rename_guess;
+ struct strmap *dir_rename_count;
+ struct strintmap *relevant_source_dirs;
+ unsigned setup;
+};
+
+static char *get_dirname(const char *filename)
+{
+ char *slash = strrchr(filename, '/');
+ return slash ? xstrndup(filename, slash - filename) : xstrdup("");
+}
+
+static void dirname_munge(char *filename)
+{
+ char *slash = strrchr(filename, '/');
+ if (!slash)
+ slash = filename;
+ *slash = '\0';
+}
+
+static const char *get_highest_rename_path(struct strintmap *counts)
+{
+ int highest_count = 0;
+ const char *highest_destination_dir = NULL;
+ struct hashmap_iter iter;
+ struct strmap_entry *entry;
+
+ strintmap_for_each_entry(counts, &iter, entry) {
+ const char *destination_dir = entry->key;
+ intptr_t count = (intptr_t)entry->value;
+ if (count > highest_count) {
+ highest_count = count;
+ highest_destination_dir = destination_dir;
+ }
+ }
+ return highest_destination_dir;
+}
+
+static char *UNKNOWN_DIR = "/"; /* placeholder -- short, illegal directory */
+
+static int dir_rename_already_determinable(struct strintmap *counts)
+{
+ struct hashmap_iter iter;
+ struct strmap_entry *entry;
+ int first = 0, second = 0, unknown = 0;
+ strintmap_for_each_entry(counts, &iter, entry) {
+ const char *destination_dir = entry->key;
+ intptr_t count = (intptr_t)entry->value;
+ if (!strcmp(destination_dir, UNKNOWN_DIR)) {
+ unknown = count;
+ } else if (count >= first) {
+ second = first;
+ first = count;
+ } else if (count >= second) {
+ second = count;
+ }
+ }
+ return first > second + unknown;
+}
+
+static void increment_count(struct dir_rename_info *info,
+ char *old_dir,
+ char *new_dir)
+{
+ struct strintmap *counts;
+ struct strmap_entry *e;
+
+ /* Get the {new_dirs -> counts} mapping using old_dir */
+ e = strmap_get_entry(info->dir_rename_count, old_dir);
+ if (e) {
+ counts = e->value;
+ } else {
+ counts = xmalloc(sizeof(*counts));
+ strintmap_init_with_options(counts, 0, NULL, 1);
+ strmap_put(info->dir_rename_count, old_dir, counts);
+ }
+
+ /* Increment the count for new_dir */
+ strintmap_incr(counts, new_dir, 1);
+}
+
+static void update_dir_rename_counts(struct dir_rename_info *info,
+ struct strintmap *dirs_removed,
+ const char *oldname,
+ const char *newname)
+{
+ char *old_dir = xstrdup(oldname);
+ char *new_dir = xstrdup(newname);
+ char new_dir_first_char = new_dir[0];
+ int first_time_in_loop = 1;
+
+ if (!info->setup)
+ /*
+ * info->setup is 0 here in two cases: (1) all auxiliary
+ * vars (like dirs_removed) were NULL so
+ * initialize_dir_rename_info() returned early, or (2)
+ * either break detection or copy detection are active so
+ * that we never called initialize_dir_rename_info(). In
+ * the former case, we don't have enough info to know if
+ * directories were renamed (because dirs_removed lets us
+ * know about a necessary prerequisite, namely if they were
+ * removed), and in the latter, we don't care about
+ * directory renames or find_basename_matches.
+ *
+ * This matters because both basename and inexact matching
+ * will also call update_dir_rename_counts(). In either of
+ * the above two cases info->dir_rename_counts will not
+ * have been properly initialized which prevents us from
+ * updating it, but in these two cases we don't care about
+ * dir_rename_counts anyway, so we can just exit early.
+ */
+ return;
+
+ while (1) {
+ int drd_flag = NOT_RELEVANT;
+
+ /* Get old_dir, skip if its directory isn't relevant. */
+ dirname_munge(old_dir);
+ if (info->relevant_source_dirs &&
+ !strintmap_contains(info->relevant_source_dirs, old_dir))
+ break;
+
+ /* Get new_dir */
+ dirname_munge(new_dir);
+
+ /*
+ * When renaming
+ * "a/b/c/d/e/foo.c" -> "a/b/some/thing/else/e/foo.c"
+ * then this suggests that both
+ * a/b/c/d/e/ => a/b/some/thing/else/e/
+ * a/b/c/d/ => a/b/some/thing/else/
+ * so we want to increment counters for both. We do NOT,
+ * however, also want to suggest that there was the following
+ * rename:
+ * a/b/c/ => a/b/some/thing/
+ * so we need to quit at that point.
+ *
+ * Note the when first_time_in_loop, we only strip off the
+ * basename, and we don't care if that's different.
+ */
+ if (!first_time_in_loop) {
+ char *old_sub_dir = strchr(old_dir, '\0')+1;
+ char *new_sub_dir = strchr(new_dir, '\0')+1;
+ if (!*new_dir) {
+ /*
+ * Special case when renaming to root directory,
+ * i.e. when new_dir == "". In this case, we had
+ * something like
+ * a/b/subdir => subdir
+ * and so dirname_munge() sets things up so that
+ * old_dir = "a/b\0subdir\0"
+ * new_dir = "\0ubdir\0"
+ * We didn't have a '/' to overwrite a '\0' onto
+ * in new_dir, so we have to compare differently.
+ */
+ if (new_dir_first_char != old_sub_dir[0] ||
+ strcmp(old_sub_dir+1, new_sub_dir))
+ break;
+ } else {
+ if (strcmp(old_sub_dir, new_sub_dir))
+ break;
+ }
+ }
+
+ /*
+ * Above we suggested that we'd keep recording renames for
+ * all ancestor directories where the trailing directories
+ * matched, i.e. for
+ * "a/b/c/d/e/foo.c" -> "a/b/some/thing/else/e/foo.c"
+ * we'd increment rename counts for each of
+ * a/b/c/d/e/ => a/b/some/thing/else/e/
+ * a/b/c/d/ => a/b/some/thing/else/
+ * However, we only need the rename counts for directories
+ * in dirs_removed whose value is RELEVANT_FOR_SELF.
+ * However, we add one special case of also recording it for
+ * first_time_in_loop because find_basename_matches() can
+ * use that as a hint to find a good pairing.
+ */
+ if (dirs_removed)
+ drd_flag = strintmap_get(dirs_removed, old_dir);
+ if (drd_flag == RELEVANT_FOR_SELF || first_time_in_loop)
+ increment_count(info, old_dir, new_dir);
+
+ first_time_in_loop = 0;
+ if (drd_flag == NOT_RELEVANT)
+ break;
+ /* If we hit toplevel directory ("") for old or new dir, quit */
+ if (!*old_dir || !*new_dir)
+ break;
+ }
+
+ /* Free resources we don't need anymore */
+ free(old_dir);
+ free(new_dir);
+}
+
+static void initialize_dir_rename_info(struct dir_rename_info *info,
+ struct strintmap *relevant_sources,
+ struct strintmap *dirs_removed,
+ struct strmap *dir_rename_count)
+{
+ struct hashmap_iter iter;
+ struct strmap_entry *entry;
+ int i;
+
+ if (!dirs_removed && !relevant_sources) {
+ info->setup = 0;
+ return;
+ }
+ info->setup = 1;
+
+ info->dir_rename_count = dir_rename_count;
+ if (!info->dir_rename_count) {
+ info->dir_rename_count = xmalloc(sizeof(*dir_rename_count));
+ strmap_init(info->dir_rename_count);
+ }
+ strintmap_init_with_options(&info->idx_map, -1, NULL, 0);
+ strmap_init_with_options(&info->dir_rename_guess, NULL, 0);
+
+ /* Setup info->relevant_source_dirs */
+ info->relevant_source_dirs = NULL;
+ if (dirs_removed || !relevant_sources) {
+ info->relevant_source_dirs = dirs_removed; /* might be NULL */
+ } else {
+ info->relevant_source_dirs = xmalloc(sizeof(struct strintmap));
+ strintmap_init(info->relevant_source_dirs, 0 /* unused */);
+ strintmap_for_each_entry(relevant_sources, &iter, entry) {
+ char *dirname = get_dirname(entry->key);
+ if (!dirs_removed ||
+ strintmap_contains(dirs_removed, dirname))
+ strintmap_set(info->relevant_source_dirs,
+ dirname, 0 /* value irrelevant */);
+ free(dirname);
+ }
+ }
+
+ /*
+ * Loop setting up both info->idx_map, and doing setup of
+ * info->dir_rename_count.
+ */
+ for (i = 0; i < rename_dst_nr; ++i) {
+ /*
+ * For non-renamed files, make idx_map contain mapping of
+ * filename -> index (index within rename_dst, that is)
+ */
+ if (!rename_dst[i].is_rename) {
+ char *filename = rename_dst[i].p->two->path;
+ strintmap_set(&info->idx_map, filename, i);
+ continue;
+ }
+
+ /*
+ * For everything else (i.e. renamed files), make
+ * dir_rename_count contain a map of a map:
+ * old_directory -> {new_directory -> count}
+ * In other words, for every pair look at the directories for
+ * the old filename and the new filename and count how many
+ * times that pairing occurs.
+ */
+ update_dir_rename_counts(info, dirs_removed,
+ rename_dst[i].p->one->path,
+ rename_dst[i].p->two->path);
+ }
+
+ /*
+ * Now we collapse
+ * dir_rename_count: old_directory -> {new_directory -> count}
+ * down to
+ * dir_rename_guess: old_directory -> best_new_directory
+ * where best_new_directory is the one with the highest count.
+ */
+ strmap_for_each_entry(info->dir_rename_count, &iter, entry) {
+ /* entry->key is source_dir */
+ struct strintmap *counts = entry->value;
+ char *best_newdir;
+
+ best_newdir = xstrdup(get_highest_rename_path(counts));
+ strmap_put(&info->dir_rename_guess, entry->key,
+ best_newdir);
+ }
+}
+
+void partial_clear_dir_rename_count(struct strmap *dir_rename_count)
+{
+ struct hashmap_iter iter;
+ struct strmap_entry *entry;
+
+ strmap_for_each_entry(dir_rename_count, &iter, entry) {
+ struct strintmap *counts = entry->value;
+ strintmap_clear(counts);
+ }
+ strmap_partial_clear(dir_rename_count, 1);
+}
+
+static void cleanup_dir_rename_info(struct dir_rename_info *info,
+ struct strintmap *dirs_removed,
+ int keep_dir_rename_count)
+{
+ struct hashmap_iter iter;
+ struct strmap_entry *entry;
+ struct string_list to_remove = STRING_LIST_INIT_NODUP;
+ int i;
+
+ if (!info->setup)
+ return;
+
+ /* idx_map */
+ strintmap_clear(&info->idx_map);
+
+ /* dir_rename_guess */
+ strmap_clear(&info->dir_rename_guess, 1);
+
+ /* relevant_source_dirs */
+ if (info->relevant_source_dirs &&
+ info->relevant_source_dirs != dirs_removed) {
+ strintmap_clear(info->relevant_source_dirs);
+ FREE_AND_NULL(info->relevant_source_dirs);
+ }
+
+ /* dir_rename_count */
+ if (!keep_dir_rename_count) {
+ partial_clear_dir_rename_count(info->dir_rename_count);
+ strmap_clear(info->dir_rename_count, 1);
+ FREE_AND_NULL(info->dir_rename_count);
+ return;
+ }
+
+ /*
+ * Although dir_rename_count was passed in
+ * diffcore_rename_extended() and we want to keep it around and
+ * return it to that caller, we first want to remove any counts in
+ * the maps associated with UNKNOWN_DIR entries and any data
+ * associated with directories that weren't renamed.
+ */
+ strmap_for_each_entry(info->dir_rename_count, &iter, entry) {
+ const char *source_dir = entry->key;
+ struct strintmap *counts = entry->value;
+
+ if (!strintmap_get(dirs_removed, source_dir)) {
+ string_list_append(&to_remove, source_dir);
+ strintmap_clear(counts);
+ continue;
+ }
+
+ if (strintmap_contains(counts, UNKNOWN_DIR))
+ strintmap_remove(counts, UNKNOWN_DIR);
+ }
+ for (i = 0; i < to_remove.nr; ++i)
+ strmap_remove(info->dir_rename_count,
+ to_remove.items[i].string, 1);
+ string_list_clear(&to_remove, 0);
+}
+
static const char *get_basename(const char *filename)
{
/*
@@ -379,8 +734,88 @@ static const char *get_basename(const char *filename)
return base ? base + 1 : filename;
}
+static int idx_possible_rename(char *filename, struct dir_rename_info *info)
+{
+ /*
+ * Our comparison of files with the same basename (see
+ * find_basename_matches() below), is only helpful when after exact
+ * rename detection we have exactly one file with a given basename
+ * among the rename sources and also only exactly one file with
+ * that basename among the rename destinations. When we have
+ * multiple files with the same basename in either set, we do not
+ * know which to compare against. However, there are some
+ * filenames that occur in large numbers (particularly
+ * build-related filenames such as 'Makefile', '.gitignore', or
+ * 'build.gradle' that potentially exist within every single
+ * subdirectory), and for performance we want to be able to quickly
+ * find renames for these files too.
+ *
+ * The reason basename comparisons are a useful heuristic was that it
+ * is common for people to move files across directories while keeping
+ * their filename the same. If we had a way of determining or even
+ * making a good educated guess about which directory these non-unique
+ * basename files had moved the file to, we could check it.
+ * Luckily...
+ *
+ * When an entire directory is in fact renamed, we have two factors
+ * helping us out:
+ * (a) the original directory disappeared giving us a hint
+ * about when we can apply an extra heuristic.
+ * (a) we often have several files within that directory and
+ * subdirectories that are renamed without changes
+ * So, rules for a heuristic:
+ * (0) If there basename matches are non-unique (the condition under
+ * which this function is called) AND
+ * (1) the directory in which the file was found has disappeared
+ * (i.e. dirs_removed is non-NULL and has a relevant entry) THEN
+ * (2) use exact renames of files within the directory to determine
+ * where the directory is likely to have been renamed to. IF
+ * there is at least one exact rename from within that
+ * directory, we can proceed.
+ * (3) If there are multiple places the directory could have been
+ * renamed to based on exact renames, ignore all but one of them.
+ * Just use the destination with the most renames going to it.
+ * (4) Check if applying that directory rename to the original file
+ * would result in a destination filename that is in the
+ * potential rename set. If so, return the index of the
+ * destination file (the index within rename_dst).
+ * (5) Compare the original file and returned destination for
+ * similarity, and if they are sufficiently similar, record the
+ * rename.
+ *
+ * This function, idx_possible_rename(), is only responsible for (4).
+ * The conditions/steps in (1)-(3) are handled via setting up
+ * dir_rename_count and dir_rename_guess in
+ * initialize_dir_rename_info(). Steps (0) and (5) are handled by
+ * the caller of this function.
+ */
+ char *old_dir, *new_dir;
+ struct strbuf new_path = STRBUF_INIT;
+ int idx;
+
+ if (!info->setup)
+ return -1;
+
+ old_dir = get_dirname(filename);
+ new_dir = strmap_get(&info->dir_rename_guess, old_dir);
+ free(old_dir);
+ if (!new_dir)
+ return -1;
+
+ strbuf_addstr(&new_path, new_dir);
+ strbuf_addch(&new_path, '/');
+ strbuf_addstr(&new_path, get_basename(filename));
+
+ idx = strintmap_get(&info->idx_map, new_path.buf);
+ strbuf_release(&new_path);
+ return idx;
+}
+
static int find_basename_matches(struct diff_options *options,
- int minimum_score)
+ int minimum_score,
+ struct dir_rename_info *info,
+ struct strintmap *relevant_sources,
+ struct strintmap *dirs_removed)
{
/*
* When I checked in early 2020, over 76% of file renames in linux
@@ -415,8 +850,6 @@ static int find_basename_matches(struct diff_options *options,
int i, renames = 0;
struct strintmap sources;
struct strintmap dests;
- struct hashmap_iter iter;
- struct strmap_entry *entry;
/*
* The prefeteching stuff wants to know if it can skip prefetching
@@ -466,17 +899,44 @@ static int find_basename_matches(struct diff_options *options,
}
/* Now look for basename matchups and do similarity estimation */
- strintmap_for_each_entry(&sources, &iter, entry) {
- const char *base = entry->key;
- intptr_t src_index = (intptr_t)entry->value;
+ for (i = 0; i < rename_src_nr; ++i) {
+ char *filename = rename_src[i].p->one->path;
+ const char *base = NULL;
+ intptr_t src_index;
intptr_t dst_index;
- if (src_index == -1)
+
+ /* Skip irrelevant sources */
+ if (relevant_sources &&
+ !strintmap_contains(relevant_sources, filename))
continue;
- if (0 <= (dst_index = strintmap_get(&dests, base))) {
+ /*
+ * If the basename is unique among remaining sources, then
+ * src_index will equal 'i' and we can attempt to match it
+ * to a unique basename in the destinations. Otherwise,
+ * use directory rename heuristics, if possible.
+ */
+ base = get_basename(filename);
+ src_index = strintmap_get(&sources, base);
+ assert(src_index == -1 || src_index == i);
+
+ if (strintmap_contains(&dests, base)) {
struct diff_filespec *one, *two;
int score;
+ /* Find a matching destination, if possible */
+ dst_index = strintmap_get(&dests, base);
+ if (src_index == -1 || dst_index == -1) {
+ src_index = i;
+ dst_index = idx_possible_rename(filename, info);
+ }
+ if (dst_index == -1)
+ continue;
+
+ /* Ignore this dest if already used in a rename */
+ if (rename_dst[dst_index].is_rename)
+ continue; /* already used previously */
+
/* Estimate the similarity */
one = rename_src[src_index].p->one;
two = rename_dst[dst_index].p->two;
@@ -488,6 +948,8 @@ static int find_basename_matches(struct diff_options *options,
continue;
record_rename_pair(dst_index, src_index, score);
renames++;
+ update_dir_rename_counts(info, dirs_removed,
+ one->path, two->path);
/*
* Found a rename so don't need text anymore; if we
@@ -571,7 +1033,12 @@ static int too_many_rename_candidates(int num_destinations, int num_sources,
return 1;
}
-static int find_renames(struct diff_score *mx, int dst_cnt, int minimum_score, int copies)
+static int find_renames(struct diff_score *mx,
+ int dst_cnt,
+ int minimum_score,
+ int copies,
+ struct dir_rename_info *info,
+ struct strintmap *dirs_removed)
{
int count = 0, i;
@@ -588,15 +1055,19 @@ static int find_renames(struct diff_score *mx, int dst_cnt, int minimum_score, i
continue;
record_rename_pair(mx[i].dst, mx[i].src, mx[i].score);
count++;
+ update_dir_rename_counts(info, dirs_removed,
+ rename_src[mx[i].src].p->one->path,
+ rename_dst[mx[i].dst].p->two->path);
}
return count;
}
-static void remove_unneeded_paths_from_src(int detecting_copies)
+static void remove_unneeded_paths_from_src(int detecting_copies,
+ struct strintmap *interesting)
{
int i, new_num_src;
- if (detecting_copies)
+ if (detecting_copies && !interesting)
return; /* nothing to remove */
if (break_idx)
return; /* culling incompatible with break detection */
@@ -623,12 +1094,18 @@ static void remove_unneeded_paths_from_src(int detecting_copies)
* from rename_src here.
*/
for (i = 0, new_num_src = 0; i < rename_src_nr; i++) {
+ struct diff_filespec *one = rename_src[i].p->one;
+
/*
* renames are stored in rename_dst, so if a rename has
* already been detected using this source, we can just
* remove the source knowing rename_dst has its info.
*/
- if (rename_src[i].p->one->rename_used)
+ if (!detecting_copies && one->rename_used)
+ continue;
+
+ /* If we don't care about the source path, skip it */
+ if (interesting && !strintmap_contains(interesting, one->path))
continue;
if (new_num_src < i)
@@ -640,7 +1117,137 @@ static void remove_unneeded_paths_from_src(int detecting_copies)
rename_src_nr = new_num_src;
}
-void diffcore_rename(struct diff_options *options)
+static void handle_early_known_dir_renames(struct dir_rename_info *info,
+ struct strintmap *relevant_sources,
+ struct strintmap *dirs_removed)
+{
+ /*
+ * Directory renames are determined via an aggregate of all renames
+ * under them and using a "majority wins" rule. The fact that
+ * "majority wins", though, means we don't need all the renames
+ * under the given directory, we only need enough to ensure we have
+ * a majority.
+ */
+
+ int i, new_num_src;
+ struct hashmap_iter iter;
+ struct strmap_entry *entry;
+
+ if (!dirs_removed || !relevant_sources)
+ return; /* nothing to cull */
+ if (break_idx)
+ return; /* culling incompatbile with break detection */
+
+ /*
+ * Supplement dir_rename_count with number of potential renames,
+ * marking all potential rename sources as mapping to UNKNOWN_DIR.
+ */
+ for (i = 0; i < rename_src_nr; i++) {
+ char *old_dir;
+ struct diff_filespec *one = rename_src[i].p->one;
+
+ /*
+ * sources that are part of a rename will have already been
+ * removed by a prior call to remove_unneeded_paths_from_src()
+ */
+ assert(!one->rename_used);
+
+ old_dir = get_dirname(one->path);
+ while (*old_dir != '\0' &&
+ NOT_RELEVANT != strintmap_get(dirs_removed, old_dir)) {
+ char *freeme = old_dir;
+
+ increment_count(info, old_dir, UNKNOWN_DIR);
+ old_dir = get_dirname(old_dir);
+
+ /* Free resources we don't need anymore */
+ free(freeme);
+ }
+ /*
+ * old_dir and new_dir free'd in increment_count, but
+ * get_dirname() gives us a new pointer we need to free for
+ * old_dir. Also, if the loop runs 0 times we need old_dir
+ * to be freed.
+ */
+ free(old_dir);
+ }
+
+ /*
+ * For any directory which we need a potential rename detected for
+ * (i.e. those marked as RELEVANT_FOR_SELF in dirs_removed), check
+ * whether we have enough renames to satisfy the "majority rules"
+ * requirement such that detecting any more renames of files under
+ * it won't change the result. For any such directory, mark that
+ * we no longer need to detect a rename for it. However, since we
+ * might need to still detect renames for an ancestor of that
+ * directory, use RELEVANT_FOR_ANCESTOR.
+ */
+ strmap_for_each_entry(info->dir_rename_count, &iter, entry) {
+ /* entry->key is source_dir */
+ struct strintmap *counts = entry->value;
+
+ if (strintmap_get(dirs_removed, entry->key) ==
+ RELEVANT_FOR_SELF &&
+ dir_rename_already_determinable(counts)) {
+ strintmap_set(dirs_removed, entry->key,
+ RELEVANT_FOR_ANCESTOR);
+ }
+ }
+
+ for (i = 0, new_num_src = 0; i < rename_src_nr; i++) {
+ struct diff_filespec *one = rename_src[i].p->one;
+ int val;
+
+ val = strintmap_get(relevant_sources, one->path);
+
+ /*
+ * sources that were not found in relevant_sources should
+ * have already been removed by a prior call to
+ * remove_unneeded_paths_from_src()
+ */
+ assert(val != -1);
+
+ if (val == RELEVANT_LOCATION) {
+ int removable = 1;
+ char *dir = get_dirname(one->path);
+ while (1) {
+ char *freeme = dir;
+ int res = strintmap_get(dirs_removed, dir);
+
+ /* Quit if not found or irrelevant */
+ if (res == NOT_RELEVANT)
+ break;
+ /* If RELEVANT_FOR_SELF, can't remove */
+ if (res == RELEVANT_FOR_SELF) {
+ removable = 0;
+ break;
+ }
+ /* Else continue searching upwards */
+ assert(res == RELEVANT_FOR_ANCESTOR);
+ dir = get_dirname(dir);
+ free(freeme);
+ }
+ free(dir);
+ if (removable) {
+ strintmap_set(relevant_sources, one->path,
+ RELEVANT_NO_MORE);
+ continue;
+ }
+ }
+
+ if (new_num_src < i)
+ memcpy(&rename_src[new_num_src], &rename_src[i],
+ sizeof(struct diff_rename_src));
+ new_num_src++;
+ }
+
+ rename_src_nr = new_num_src;
+}
+
+void diffcore_rename_extended(struct diff_options *options,
+ struct strintmap *relevant_sources,
+ struct strintmap *dirs_removed,
+ struct strmap *dir_rename_count)
{
int detect_rename = options->detect_rename;
int minimum_score = options->rename_score;
@@ -651,9 +1258,16 @@ void diffcore_rename(struct diff_options *options)
int num_destinations, dst_cnt;
int num_sources, want_copies;
struct progress *progress = NULL;
+ struct dir_rename_info info;
trace2_region_enter("diff", "setup", options->repo);
+ info.setup = 0;
+ assert(!dir_rename_count || strmap_empty(dir_rename_count));
want_copies = (detect_rename == DIFF_DETECT_COPY);
+ if (dirs_removed && (break_idx || want_copies))
+ BUG("dirs_removed incompatible with break/copy detection");
+ if (break_idx && relevant_sources)
+ BUG("break detection incompatible with source specification");
if (!minimum_score)
minimum_score = DEFAULT_RENAME_SCORE;
@@ -721,9 +1335,10 @@ void diffcore_rename(struct diff_options *options)
/*
* Cull sources:
* - remove ones corresponding to exact renames
+ * - remove ones not found in relevant_sources
*/
trace2_region_enter("diff", "cull after exact", options->repo);
- remove_unneeded_paths_from_src(want_copies);
+ remove_unneeded_paths_from_src(want_copies, relevant_sources);
trace2_region_leave("diff", "cull after exact", options->repo);
} else {
/* Determine minimum score to match basenames */
@@ -742,21 +1357,38 @@ void diffcore_rename(struct diff_options *options)
* - remove ones involved in renames (found via exact match)
*/
trace2_region_enter("diff", "cull after exact", options->repo);
- remove_unneeded_paths_from_src(want_copies);
+ remove_unneeded_paths_from_src(want_copies, NULL);
trace2_region_leave("diff", "cull after exact", options->repo);
+ /* Preparation for basename-driven matching. */
+ trace2_region_enter("diff", "dir rename setup", options->repo);
+ initialize_dir_rename_info(&info, relevant_sources,
+ dirs_removed, dir_rename_count);
+ trace2_region_leave("diff", "dir rename setup", options->repo);
+
/* Utilize file basenames to quickly find renames. */
trace2_region_enter("diff", "basename matches", options->repo);
rename_count += find_basename_matches(options,
- min_basename_score);
+ min_basename_score,
+ &info,
+ relevant_sources,
+ dirs_removed);
trace2_region_leave("diff", "basename matches", options->repo);
/*
* Cull sources, again:
* - remove ones involved in renames (found via basenames)
+ * - remove ones not found in relevant_sources
+ * and
+ * - remove ones in relevant_sources which are needed only
+ * for directory renames IF no ancestory directory
+ * actually needs to know any more individual path
+ * renames under them
*/
trace2_region_enter("diff", "cull basename", options->repo);
- remove_unneeded_paths_from_src(want_copies);
+ remove_unneeded_paths_from_src(want_copies, relevant_sources);
+ handle_early_known_dir_renames(&info, relevant_sources,
+ dirs_removed);
trace2_region_leave("diff", "cull basename", options->repo);
}
@@ -787,8 +1419,7 @@ void diffcore_rename(struct diff_options *options)
(uint64_t)num_destinations * (uint64_t)num_sources);
}
- mx = xcalloc(st_mult(NUM_CANDIDATE_PER_DST, num_destinations),
- sizeof(*mx));
+ CALLOC_ARRAY(mx, st_mult(NUM_CANDIDATE_PER_DST, num_destinations));
for (dst_cnt = i = 0; i < rename_dst_nr; i++) {
struct diff_filespec *two = rename_dst[i].p->two;
struct diff_score *m;
@@ -834,9 +1465,11 @@ void diffcore_rename(struct diff_options *options)
/* cost matrix sorted by most to least similar pair */
STABLE_QSORT(mx, dst_cnt * NUM_CANDIDATE_PER_DST, score_compare);
- rename_count += find_renames(mx, dst_cnt, minimum_score, 0);
+ rename_count += find_renames(mx, dst_cnt, minimum_score, 0,
+ &info, dirs_removed);
if (want_copies)
- rename_count += find_renames(mx, dst_cnt, minimum_score, 1);
+ rename_count += find_renames(mx, dst_cnt, minimum_score, 1,
+ &info, dirs_removed);
free(mx);
trace2_region_leave("diff", "inexact renames", options->repo);
@@ -912,6 +1545,7 @@ void diffcore_rename(struct diff_options *options)
if (rename_dst[i].filespec_to_free)
free_filespec(rename_dst[i].filespec_to_free);
+ cleanup_dir_rename_info(&info, dirs_removed, dir_rename_count != NULL);
FREE_AND_NULL(rename_dst);
rename_dst_nr = rename_dst_alloc = 0;
FREE_AND_NULL(rename_src);
@@ -923,3 +1557,8 @@ void diffcore_rename(struct diff_options *options)
trace2_region_leave("diff", "write back to queue", options->repo);
return;
}
+
+void diffcore_rename(struct diff_options *options)
+{
+ diffcore_rename_extended(options, NULL, NULL, NULL);
+}
diff --git a/diffcore.h b/diffcore.h
index c1592bc..f5c6de4 100644
--- a/diffcore.h
+++ b/diffcore.h
@@ -8,6 +8,8 @@
struct diff_options;
struct repository;
+struct strintmap;
+struct strmap;
struct userdiff_driver;
/* This header file is internal between diff.c and its diff transformers
@@ -159,8 +161,27 @@ struct diff_filepair *diff_queue(struct diff_queue_struct *,
struct diff_filespec *);
void diff_q(struct diff_queue_struct *, struct diff_filepair *);
+/* dir_rename_relevance: the reason we want rename information for a dir */
+enum dir_rename_relevance {
+ NOT_RELEVANT = 0,
+ RELEVANT_FOR_ANCESTOR = 1,
+ RELEVANT_FOR_SELF = 2
+};
+/* file_rename_relevance: the reason(s) we want rename information for a file */
+enum file_rename_relevance {
+ RELEVANT_NO_MORE = 0, /* i.e. NOT relevant */
+ RELEVANT_CONTENT = 1,
+ RELEVANT_LOCATION = 2
+};
+
+void partial_clear_dir_rename_count(struct strmap *dir_rename_count);
+
void diffcore_break(struct repository *, int);
void diffcore_rename(struct diff_options *);
+void diffcore_rename_extended(struct diff_options *options,
+ struct strintmap *relevant_sources,
+ struct strintmap *dirs_removed,
+ struct strmap *dir_rename_count);
void diffcore_merge_broken(void);
void diffcore_pickaxe(struct diff_options *);
void diffcore_order(const char *orderfile);
diff --git a/dir.c b/dir.c
index 166238e..d81b5df 100644
--- a/dir.c
+++ b/dir.c
@@ -1035,6 +1035,9 @@ static int add_patterns_from_buffer(char *buf, size_t size,
const char *base, int baselen,
struct pattern_list *pl);
+/* Flags for add_patterns() */
+#define PATTERN_NOFOLLOW (1<<0)
+
/*
* Given a file with name "fname", read it (either from disk, or from
* an index if 'istate' is non-null), parse it and store the
@@ -1046,7 +1049,7 @@ static int add_patterns_from_buffer(char *buf, size_t size,
*/
static int add_patterns(const char *fname, const char *base, int baselen,
struct pattern_list *pl, struct index_state *istate,
- struct oid_stat *oid_stat)
+ unsigned flags, struct oid_stat *oid_stat)
{
struct stat st;
int r;
@@ -1054,7 +1057,11 @@ static int add_patterns(const char *fname, const char *base, int baselen,
size_t size = 0;
char *buf;
- fd = open(fname, O_RDONLY);
+ if (flags & PATTERN_NOFOLLOW)
+ fd = open_nofollow(fname, O_RDONLY);
+ else
+ fd = open(fname, O_RDONLY);
+
if (fd < 0 || fstat(fd, &st) < 0) {
if (fd < 0)
warn_on_fopen_errors(fname);
@@ -1143,9 +1150,10 @@ static int add_patterns_from_buffer(char *buf, size_t size,
int add_patterns_from_file_to_list(const char *fname, const char *base,
int baselen, struct pattern_list *pl,
- struct index_state *istate)
+ struct index_state *istate,
+ unsigned flags)
{
- return add_patterns(fname, base, baselen, pl, istate, NULL);
+ return add_patterns(fname, base, baselen, pl, istate, flags, NULL);
}
int add_patterns_from_blob_to_list(
@@ -1194,7 +1202,7 @@ static void add_patterns_from_file_1(struct dir_struct *dir, const char *fname,
if (!dir->untracked)
dir->unmanaged_exclude_files++;
pl = add_pattern_list(dir, EXC_FILE, fname);
- if (add_patterns(fname, "", 0, pl, NULL, oid_stat) < 0)
+ if (add_patterns(fname, "", 0, pl, NULL, 0, oid_stat) < 0)
die(_("cannot use %s as an exclude file"), fname);
}
@@ -1488,7 +1496,7 @@ static void prep_exclude(struct dir_struct *dir,
const char *cp;
struct oid_stat oid_stat;
- stk = xcalloc(1, sizeof(*stk));
+ CALLOC_ARRAY(stk, 1);
if (current < 0) {
cp = base;
current = 0;
@@ -1558,6 +1566,7 @@ static void prep_exclude(struct dir_struct *dir,
strbuf_addstr(&sb, dir->exclude_per_dir);
pl->src = strbuf_detach(&sb, NULL);
add_patterns(pl->src, pl->src, stk->baselen, pl, istate,
+ PATTERN_NOFOLLOW,
untracked ? &oid_stat : NULL);
}
/*
@@ -3006,7 +3015,7 @@ int get_sparse_checkout_patterns(struct pattern_list *pl)
char *sparse_filename = get_sparse_checkout_filename();
pl->use_cone_patterns = core_sparse_checkout_cone;
- res = add_patterns_from_file_to_list(sparse_filename, "", 0, pl, NULL);
+ res = add_patterns_from_file_to_list(sparse_filename, "", 0, pl, NULL, 0);
free(sparse_filename);
return res;
@@ -3162,7 +3171,7 @@ void write_untracked_extension(struct strbuf *out, struct untracked_cache *untra
int varint_len;
const unsigned hashsz = the_hash_algo->rawsz;
- ouc = xcalloc(1, sizeof(*ouc));
+ CALLOC_ARRAY(ouc, 1);
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
ouc->dir_flags = htonl(untracked->dir_flags);
@@ -3373,7 +3382,7 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
if (next + exclude_per_dir_offset + 1 > end)
return NULL;
- uc = xcalloc(1, sizeof(*uc));
+ CALLOC_ARRAY(uc, 1);
strbuf_init(&uc->ident, ident_len);
strbuf_add(&uc->ident, ident, ident_len);
load_oid_stat(&uc->ss_info_exclude,
diff --git a/dir.h b/dir.h
index 51cb0e2..35963f9 100644
--- a/dir.h
+++ b/dir.h
@@ -420,7 +420,8 @@ int hashmap_contains_parent(struct hashmap *map,
struct pattern_list *add_pattern_list(struct dir_struct *dir,
int group_type, const char *src);
int add_patterns_from_file_to_list(const char *fname, const char *base, int baselen,
- struct pattern_list *pl, struct index_state *istate);
+ struct pattern_list *pl, struct index_state *istate,
+ unsigned flags);
void add_patterns_from_file(struct dir_struct *, const char *fname);
int add_patterns_from_blob_to_list(struct object_id *oid,
const char *base, int baselen,
diff --git a/entry.c b/entry.c
index 891e4ba..9fdc843 100644
--- a/entry.c
+++ b/entry.c
@@ -6,6 +6,7 @@
#include "submodule.h"
#include "progress.h"
#include "fsmonitor.h"
+#include "entry.h"
static void create_directories(const char *path, int path_len,
const struct checkout *state)
@@ -83,7 +84,7 @@ static int create_file(const char *path, unsigned int mode)
return open(path, O_WRONLY | O_CREAT | O_EXCL, mode);
}
-static void *read_blob_entry(const struct cache_entry *ce, unsigned long *size)
+void *read_blob_entry(const struct cache_entry *ce, unsigned long *size)
{
enum object_type type;
void *blob_data = read_object_file(&ce->oid, &type, size);
@@ -108,7 +109,7 @@ static int open_output_fd(char *path, const struct cache_entry *ce, int to_tempf
}
}
-static int fstat_output(int fd, const struct checkout *state, struct stat *st)
+int fstat_checkout_output(int fd, const struct checkout *state, struct stat *st)
{
/* use fstat() only when path == ce->name */
if (fstat_is_reliable() &&
@@ -131,7 +132,7 @@ static int streaming_write_entry(const struct cache_entry *ce, char *path,
return -1;
result |= stream_blob_to_fd(fd, &ce->oid, filter, 1);
- *fstat_done = fstat_output(fd, state, statbuf);
+ *fstat_done = fstat_checkout_output(fd, state, statbuf);
result |= close(fd);
if (result)
@@ -250,8 +251,21 @@ int finish_delayed_checkout(struct checkout *state, int *nr_checkouts)
return errs;
}
-static int write_entry(struct cache_entry *ce,
- char *path, const struct checkout *state, int to_tempfile)
+void update_ce_after_write(const struct checkout *state, struct cache_entry *ce,
+ struct stat *st)
+{
+ if (state->refresh_cache) {
+ assert(state->istate);
+ fill_stat_cache_info(state->istate, ce, st);
+ ce->ce_flags |= CE_UPDATE_IN_BASE;
+ mark_fsmonitor_invalid(state->istate, ce);
+ state->istate->cache_changed |= CE_ENTRY_CHANGED;
+ }
+}
+
+/* Note: ca is used (and required) iff the entry refers to a regular file. */
+static int write_entry(struct cache_entry *ce, char *path, struct conv_attrs *ca,
+ const struct checkout *state, int to_tempfile)
{
unsigned int ce_mode_s_ifmt = ce->ce_mode & S_IFMT;
struct delayed_checkout *dco = state->delayed_checkout;
@@ -268,8 +282,7 @@ static int write_entry(struct cache_entry *ce,
clone_checkout_metadata(&meta, &state->meta, &ce->oid);
if (ce_mode_s_ifmt == S_IFREG) {
- struct stream_filter *filter = get_stream_filter(state->istate, ce->name,
- &ce->oid);
+ struct stream_filter *filter = get_stream_filter_ca(ca, &ce->oid);
if (filter &&
!streaming_write_entry(ce, path, filter,
state, to_tempfile,
@@ -316,14 +329,17 @@ static int write_entry(struct cache_entry *ce,
* Convert from git internal format to working tree format
*/
if (dco && dco->state != CE_NO_DELAY) {
- ret = async_convert_to_working_tree(state->istate, ce->name, new_blob,
- size, &buf, &meta, dco);
+ ret = async_convert_to_working_tree_ca(ca, ce->name,
+ new_blob, size,
+ &buf, &meta, dco);
if (ret && string_list_has_string(&dco->paths, ce->name)) {
free(new_blob);
goto delayed;
}
- } else
- ret = convert_to_working_tree(state->istate, ce->name, new_blob, size, &buf, &meta);
+ } else {
+ ret = convert_to_working_tree_ca(ca, ce->name, new_blob,
+ size, &buf, &meta);
+ }
if (ret) {
free(new_blob);
@@ -345,7 +361,7 @@ static int write_entry(struct cache_entry *ce,
wrote = write_in_full(fd, new_blob, size);
if (!to_tempfile)
- fstat_done = fstat_output(fd, state, &st);
+ fstat_done = fstat_checkout_output(fd, state, &st);
close(fd);
free(new_blob);
if (wrote < 0)
@@ -370,15 +386,10 @@ static int write_entry(struct cache_entry *ce,
finish:
if (state->refresh_cache) {
- assert(state->istate);
- if (!fstat_done)
- if (lstat(ce->name, &st) < 0)
- return error_errno("unable to stat just-written file %s",
- ce->name);
- fill_stat_cache_info(state->istate, ce, &st);
- ce->ce_flags |= CE_UPDATE_IN_BASE;
- mark_fsmonitor_invalid(state->istate, ce);
- state->istate->cache_changed |= CE_ENTRY_CHANGED;
+ if (!fstat_done && lstat(ce->name, &st) < 0)
+ return error_errno("unable to stat just-written file %s",
+ ce->name);
+ update_ce_after_write(state, ce , &st);
}
delayed:
return 0;
@@ -431,19 +442,13 @@ static void mark_colliding_entries(const struct checkout *state,
}
}
-/*
- * Write the contents from ce out to the working tree.
- *
- * When topath[] is not NULL, instead of writing to the working tree
- * file named by ce, a temporary file is created by this function and
- * its name is returned in topath[], which must be able to hold at
- * least TEMPORARY_FILENAME_LENGTH bytes long.
- */
-int checkout_entry(struct cache_entry *ce, const struct checkout *state,
- char *topath, int *nr_checkouts)
+int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca,
+ const struct checkout *state, char *topath,
+ int *nr_checkouts)
{
static struct strbuf path = STRBUF_INIT;
struct stat st;
+ struct conv_attrs ca_buf;
if (ce->ce_flags & CE_WT_REMOVE) {
if (topath)
@@ -456,8 +461,13 @@ int checkout_entry(struct cache_entry *ce, const struct checkout *state,
return 0;
}
- if (topath)
- return write_entry(ce, topath, state, 1);
+ if (topath) {
+ if (S_ISREG(ce->ce_mode) && !ca) {
+ convert_attrs(state->istate, &ca_buf, ce->name);
+ ca = &ca_buf;
+ }
+ return write_entry(ce, topath, ca, state, 1);
+ }
strbuf_reset(&path);
strbuf_add(&path, state->base_dir, state->base_dir_len);
@@ -519,9 +529,16 @@ int checkout_entry(struct cache_entry *ce, const struct checkout *state,
return 0;
create_directories(path.buf, path.len, state);
+
if (nr_checkouts)
(*nr_checkouts)++;
- return write_entry(ce, path.buf, state, 0);
+
+ if (S_ISREG(ce->ce_mode) && !ca) {
+ convert_attrs(state->istate, &ca_buf, ce->name);
+ ca = &ca_buf;
+ }
+
+ return write_entry(ce, path.buf, ca, state, 0);
}
void unlink_entry(const struct cache_entry *ce)
@@ -532,7 +549,7 @@ void unlink_entry(const struct cache_entry *ce)
submodule_move_head(ce->name, "HEAD", NULL,
SUBMODULE_MOVE_HEAD_FORCE);
}
- if (!check_leading_path(ce->name, ce_namelen(ce)))
+ if (check_leading_path(ce->name, ce_namelen(ce), 1) >= 0)
return;
if (remove_or_warn(ce->ce_mode, ce->name))
return;
diff --git a/entry.h b/entry.h
new file mode 100644
index 0000000..b8c0e17
--- /dev/null
+++ b/entry.h
@@ -0,0 +1,59 @@
+#ifndef ENTRY_H
+#define ENTRY_H
+
+#include "cache.h"
+#include "convert.h"
+
+struct checkout {
+ struct index_state *istate;
+ const char *base_dir;
+ int base_dir_len;
+ struct delayed_checkout *delayed_checkout;
+ struct checkout_metadata meta;
+ unsigned force:1,
+ quiet:1,
+ not_new:1,
+ clone:1,
+ refresh_cache:1;
+};
+#define CHECKOUT_INIT { NULL, "" }
+
+#define TEMPORARY_FILENAME_LENGTH 25
+/*
+ * Write the contents from ce out to the working tree.
+ *
+ * When topath[] is not NULL, instead of writing to the working tree
+ * file named by ce, a temporary file is created by this function and
+ * its name is returned in topath[], which must be able to hold at
+ * least TEMPORARY_FILENAME_LENGTH bytes long.
+ *
+ * With checkout_entry_ca(), callers can optionally pass a preloaded
+ * conv_attrs struct (to avoid reloading it), when ce refers to a
+ * regular file. If ca is NULL, the attributes will be loaded
+ * internally when (and if) needed.
+ */
+int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca,
+ const struct checkout *state, char *topath,
+ int *nr_checkouts);
+static inline int checkout_entry(struct cache_entry *ce,
+ const struct checkout *state, char *topath,
+ int *nr_checkouts)
+{
+ return checkout_entry_ca(ce, NULL, state, topath, nr_checkouts);
+}
+
+void enable_delayed_checkout(struct checkout *state);
+int finish_delayed_checkout(struct checkout *state, int *nr_checkouts);
+
+/*
+ * Unlink the last component and schedule the leading directories for
+ * removal, such that empty directories get removed.
+ */
+void unlink_entry(const struct cache_entry *ce);
+
+void *read_blob_entry(const struct cache_entry *ce, unsigned long *size);
+int fstat_checkout_output(int fd, const struct checkout *state, struct stat *st);
+void update_ce_after_write(const struct checkout *state, struct cache_entry *ce,
+ struct stat *st);
+
+#endif /* ENTRY_H */
diff --git a/ewah/bitmap.c b/ewah/bitmap.c
index 0d31cdc..38a47c4 100644
--- a/ewah/bitmap.c
+++ b/ewah/bitmap.c
@@ -25,7 +25,7 @@
struct bitmap *bitmap_word_alloc(size_t word_alloc)
{
struct bitmap *bitmap = xmalloc(sizeof(struct bitmap));
- bitmap->words = xcalloc(word_alloc, sizeof(eword_t));
+ CALLOC_ARRAY(bitmap->words, word_alloc);
bitmap->word_alloc = word_alloc;
return bitmap;
}
diff --git a/fetch-pack.c b/fetch-pack.c
index 0cb59ac..2318ebe 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -38,6 +38,7 @@ static int server_supports_filtering;
static int advertise_sid;
static struct shallow_lock shallow_lock;
static const char *alternate_shallow_file;
+static struct fsck_options fsck_options = FSCK_OPTIONS_MISSING_GITMODULES;
static struct strbuf fsck_msg_types = STRBUF_INIT;
static struct string_list uri_protocols = STRING_LIST_INIT_DUP;
@@ -846,7 +847,7 @@ static int get_pack(struct fetch_pack_args *args,
else
demux.out = xd[0];
- if (!args->keep_pack && unpack_limit) {
+ if (!args->keep_pack && unpack_limit && !index_pack_args) {
if (read_pack_header(demux.out, &header))
die(_("protocol error: bad pack header"));
@@ -879,7 +880,7 @@ static int get_pack(struct fetch_pack_args *args,
strvec_push(&cmd.args, "-v");
if (args->use_thin_pack)
strvec_push(&cmd.args, "--fix-thin");
- if (do_keep && (args->lock_pack || unpack_limit)) {
+ if ((do_keep || index_pack_args) && (args->lock_pack || unpack_limit)) {
char hostname[HOST_NAME_MAX + 1];
if (xgethostname(hostname, sizeof(hostname)))
xsnprintf(hostname, sizeof(hostname), "localhost");
@@ -987,22 +988,6 @@ static int cmp_ref_by_name(const void *a_, const void *b_)
return strcmp(a->name, b->name);
}
-static void fsck_gitmodules_oids(struct oidset *gitmodules_oids)
-{
- struct oidset_iter iter;
- const struct object_id *oid;
- struct fsck_options fo = FSCK_OPTIONS_STRICT;
-
- if (!oidset_size(gitmodules_oids))
- return;
-
- oidset_iter_init(gitmodules_oids, &iter);
- while ((oid = oidset_iter_next(&iter)))
- register_found_gitmodules(oid);
- if (fsck_finish(&fo))
- die("fsck failed");
-}
-
static struct ref *do_fetch_pack(struct fetch_pack_args *args,
int fd[2],
const struct ref *orig_ref,
@@ -1017,7 +1002,6 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
int agent_len;
struct fetch_negotiator negotiator_alloc;
struct fetch_negotiator *negotiator;
- struct oidset gitmodules_oids = OIDSET_INIT;
negotiator = &negotiator_alloc;
fetch_negotiator_init(r, negotiator);
@@ -1129,14 +1113,17 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
if (args->deepen)
setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
NULL);
- else if (si->nr_ours || si->nr_theirs)
+ else if (si->nr_ours || si->nr_theirs) {
+ if (args->reject_shallow_remote)
+ die(_("source repository is shallow, reject to clone."));
alternate_shallow_file = setup_temporary_shallow(si->shallow);
- else
+ } else
alternate_shallow_file = NULL;
if (get_pack(args, fd, pack_lockfiles, NULL, sought, nr_sought,
- &gitmodules_oids))
+ &fsck_options.gitmodules_found))
die(_("git fetch-pack: fetch failed."));
- fsck_gitmodules_oids(&gitmodules_oids);
+ if (fsck_finish(&fsck_options))
+ die("fsck failed");
all_done:
if (negotiator)
@@ -1264,7 +1251,7 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
if (hash_algo_by_ptr(the_hash_algo) != hash_algo)
die(_("mismatched algorithms: client %s; server %s"),
the_hash_algo->name, hash_name);
- packet_write_fmt(fd_out, "object-format=%s", the_hash_algo->name);
+ packet_buf_write(&req_buf, "object-format=%s", the_hash_algo->name);
} else if (hash_algo_by_ptr(the_hash_algo) != GIT_HASH_SHA1) {
die(_("the server does not support algorithm '%s'"),
the_hash_algo->name);
@@ -1498,10 +1485,12 @@ static void receive_shallow_info(struct fetch_pack_args *args,
* rejected (unless --update-shallow is set); do the same.
*/
prepare_shallow_info(si, shallows);
- if (si->nr_ours || si->nr_theirs)
+ if (si->nr_ours || si->nr_theirs) {
+ if (args->reject_shallow_remote)
+ die(_("source repository is shallow, reject to clone."));
alternate_shallow_file =
setup_temporary_shallow(si->shallow);
- else
+ } else
alternate_shallow_file = NULL;
} else {
alternate_shallow_file = NULL;
@@ -1587,7 +1576,6 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
struct string_list packfile_uris = STRING_LIST_INIT_DUP;
int i;
struct strvec index_pack_args = STRVEC_INIT;
- struct oidset gitmodules_oids = OIDSET_INIT;
negotiator = &negotiator_alloc;
fetch_negotiator_init(r, negotiator);
@@ -1678,7 +1666,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
process_section_header(&reader, "packfile", 0);
if (get_pack(args, fd, pack_lockfiles,
packfile_uris.nr ? &index_pack_args : NULL,
- sought, nr_sought, &gitmodules_oids))
+ sought, nr_sought, &fsck_options.gitmodules_found))
die(_("git fetch-pack: fetch failed."));
do_check_stateless_delimiter(args, &reader);
@@ -1721,7 +1709,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
packname[the_hash_algo->hexsz] = '\0';
- parse_gitmodules_oids(cmd.out, &gitmodules_oids);
+ parse_gitmodules_oids(cmd.out, &fsck_options.gitmodules_found);
close(cmd.out);
@@ -1742,7 +1730,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
string_list_clear(&packfile_uris, 0);
strvec_clear(&index_pack_args);
- fsck_gitmodules_oids(&gitmodules_oids);
+ if (fsck_finish(&fsck_options))
+ die("fsck failed");
if (negotiator)
negotiator->release(negotiator);
@@ -1916,7 +1905,7 @@ static void update_shallow(struct fetch_pack_args *args,
* remote is also shallow, check what ref is safe to update
* without updating .git/shallow
*/
- status = xcalloc(nr_sought, sizeof(*status));
+ CALLOC_ARRAY(status, nr_sought);
assign_shallow_commits_to_refs(si, NULL, status);
if (si->nr_ours || si->nr_theirs) {
for (i = 0; i < nr_sought; i++)
diff --git a/fetch-pack.h b/fetch-pack.h
index 736a3da..f114d72 100644
--- a/fetch-pack.h
+++ b/fetch-pack.h
@@ -39,6 +39,7 @@ struct fetch_pack_args {
unsigned self_contained_and_connected:1;
unsigned cloning:1;
unsigned update_shallow:1;
+ unsigned reject_shallow_remote:1;
unsigned deepen:1;
/*
diff --git a/fmt-merge-msg.c b/fmt-merge-msg.c
index 1e51492..0f66818 100644
--- a/fmt-merge-msg.c
+++ b/fmt-merge-msg.c
@@ -130,7 +130,7 @@ static int handle_line(char *line, struct merge_parents *merge_parents)
if (!find_merge_parent(merge_parents, &oid, NULL))
return 0; /* subsumed by other parents */
- origin_data = xcalloc(1, sizeof(struct origin_data));
+ CALLOC_ARRAY(origin_data, 1);
oidcpy(&origin_data->oid, &oid);
if (line[len - 1] == '\n')
diff --git a/fsck.c b/fsck.c
index e3030f3..f5ed6a2 100644
--- a/fsck.c
+++ b/fsck.c
@@ -19,90 +19,19 @@
#include "credential.h"
#include "help.h"
-static struct oidset gitmodules_found = OIDSET_INIT;
-static struct oidset gitmodules_done = OIDSET_INIT;
-
-#define FSCK_FATAL -1
-#define FSCK_INFO -2
-
-#define FOREACH_MSG_ID(FUNC) \
- /* fatal errors */ \
- FUNC(NUL_IN_HEADER, FATAL) \
- FUNC(UNTERMINATED_HEADER, FATAL) \
- /* errors */ \
- FUNC(BAD_DATE, ERROR) \
- FUNC(BAD_DATE_OVERFLOW, ERROR) \
- FUNC(BAD_EMAIL, ERROR) \
- FUNC(BAD_NAME, ERROR) \
- FUNC(BAD_OBJECT_SHA1, ERROR) \
- FUNC(BAD_PARENT_SHA1, ERROR) \
- FUNC(BAD_TAG_OBJECT, ERROR) \
- FUNC(BAD_TIMEZONE, ERROR) \
- FUNC(BAD_TREE, ERROR) \
- FUNC(BAD_TREE_SHA1, ERROR) \
- FUNC(BAD_TYPE, ERROR) \
- FUNC(DUPLICATE_ENTRIES, ERROR) \
- FUNC(MISSING_AUTHOR, ERROR) \
- FUNC(MISSING_COMMITTER, ERROR) \
- FUNC(MISSING_EMAIL, ERROR) \
- FUNC(MISSING_NAME_BEFORE_EMAIL, ERROR) \
- FUNC(MISSING_OBJECT, ERROR) \
- FUNC(MISSING_SPACE_BEFORE_DATE, ERROR) \
- FUNC(MISSING_SPACE_BEFORE_EMAIL, ERROR) \
- FUNC(MISSING_TAG, ERROR) \
- FUNC(MISSING_TAG_ENTRY, ERROR) \
- FUNC(MISSING_TREE, ERROR) \
- FUNC(MISSING_TREE_OBJECT, ERROR) \
- FUNC(MISSING_TYPE, ERROR) \
- FUNC(MISSING_TYPE_ENTRY, ERROR) \
- FUNC(MULTIPLE_AUTHORS, ERROR) \
- FUNC(TREE_NOT_SORTED, ERROR) \
- FUNC(UNKNOWN_TYPE, ERROR) \
- FUNC(ZERO_PADDED_DATE, ERROR) \
- FUNC(GITMODULES_MISSING, ERROR) \
- FUNC(GITMODULES_BLOB, ERROR) \
- FUNC(GITMODULES_LARGE, ERROR) \
- FUNC(GITMODULES_NAME, ERROR) \
- FUNC(GITMODULES_SYMLINK, ERROR) \
- FUNC(GITMODULES_URL, ERROR) \
- FUNC(GITMODULES_PATH, ERROR) \
- FUNC(GITMODULES_UPDATE, ERROR) \
- /* warnings */ \
- FUNC(BAD_FILEMODE, WARN) \
- FUNC(EMPTY_NAME, WARN) \
- FUNC(FULL_PATHNAME, WARN) \
- FUNC(HAS_DOT, WARN) \
- FUNC(HAS_DOTDOT, WARN) \
- FUNC(HAS_DOTGIT, WARN) \
- FUNC(NULL_SHA1, WARN) \
- FUNC(ZERO_PADDED_FILEMODE, WARN) \
- FUNC(NUL_IN_COMMIT, WARN) \
- /* infos (reported as warnings, but ignored by default) */ \
- FUNC(GITMODULES_PARSE, INFO) \
- FUNC(BAD_TAG_NAME, INFO) \
- FUNC(MISSING_TAGGER_ENTRY, INFO) \
- /* ignored (elevated when requested) */ \
- FUNC(EXTRA_HEADER_ENTRY, IGNORE)
-
-#define MSG_ID(id, msg_type) FSCK_MSG_##id,
-enum fsck_msg_id {
- FOREACH_MSG_ID(MSG_ID)
- FSCK_MSG_MAX
-};
-#undef MSG_ID
-
#define STR(x) #x
#define MSG_ID(id, msg_type) { STR(id), NULL, NULL, FSCK_##msg_type },
static struct {
const char *id_string;
const char *downcased;
const char *camelcased;
- int msg_type;
+ enum fsck_msg_type msg_type;
} msg_id_info[FSCK_MSG_MAX + 1] = {
- FOREACH_MSG_ID(MSG_ID)
+ FOREACH_FSCK_MSG_ID(MSG_ID)
{ NULL, NULL, NULL, -1 }
};
#undef MSG_ID
+#undef STR
static void prepare_msg_ids(void)
{
@@ -164,25 +93,23 @@ void list_config_fsck_msg_ids(struct string_list *list, const char *prefix)
list_config_item(list, prefix, msg_id_info[i].camelcased);
}
-static int fsck_msg_type(enum fsck_msg_id msg_id,
+static enum fsck_msg_type fsck_msg_type(enum fsck_msg_id msg_id,
struct fsck_options *options)
{
- int msg_type;
-
assert(msg_id >= 0 && msg_id < FSCK_MSG_MAX);
- if (options->msg_type)
- msg_type = options->msg_type[msg_id];
- else {
- msg_type = msg_id_info[msg_id].msg_type;
+ if (!options->msg_type) {
+ enum fsck_msg_type msg_type = msg_id_info[msg_id].msg_type;
+
if (options->strict && msg_type == FSCK_WARN)
msg_type = FSCK_ERROR;
+ return msg_type;
}
- return msg_type;
+ return options->msg_type[msg_id];
}
-static int parse_msg_type(const char *str)
+static enum fsck_msg_type parse_msg_type(const char *str)
{
if (!strcmp(str, "error"))
return FSCK_ERROR;
@@ -202,28 +129,35 @@ int is_valid_msg_type(const char *msg_id, const char *msg_type)
return 1;
}
-void fsck_set_msg_type(struct fsck_options *options,
- const char *msg_id, const char *msg_type)
+void fsck_set_msg_type_from_ids(struct fsck_options *options,
+ enum fsck_msg_id msg_id,
+ enum fsck_msg_type msg_type)
{
- int id = parse_msg_id(msg_id), type;
-
- if (id < 0)
- die("Unhandled message id: %s", msg_id);
- type = parse_msg_type(msg_type);
-
- if (type != FSCK_ERROR && msg_id_info[id].msg_type == FSCK_FATAL)
- die("Cannot demote %s to %s", msg_id, msg_type);
-
if (!options->msg_type) {
int i;
- int *msg_type;
- ALLOC_ARRAY(msg_type, FSCK_MSG_MAX);
+ enum fsck_msg_type *severity;
+ ALLOC_ARRAY(severity, FSCK_MSG_MAX);
for (i = 0; i < FSCK_MSG_MAX; i++)
- msg_type[i] = fsck_msg_type(i, options);
- options->msg_type = msg_type;
+ severity[i] = fsck_msg_type(i, options);
+ options->msg_type = severity;
}
- options->msg_type[id] = type;
+ options->msg_type[msg_id] = msg_type;
+}
+
+void fsck_set_msg_type(struct fsck_options *options,
+ const char *msg_id_str, const char *msg_type_str)
+{
+ int msg_id = parse_msg_id(msg_id_str);
+ enum fsck_msg_type msg_type = parse_msg_type(msg_type_str);
+
+ if (msg_id < 0)
+ die("Unhandled message id: %s", msg_id_str);
+
+ if (msg_type != FSCK_ERROR && msg_id_info[msg_id].msg_type == FSCK_FATAL)
+ die("Cannot demote %s to %s", msg_id_str, msg_type_str);
+
+ fsck_set_msg_type_from_ids(options, msg_id, msg_type);
}
void fsck_set_msg_types(struct fsck_options *options, const char *values)
@@ -264,24 +198,6 @@ void fsck_set_msg_types(struct fsck_options *options, const char *values)
free(to_free);
}
-static void append_msg_id(struct strbuf *sb, const char *msg_id)
-{
- for (;;) {
- char c = *(msg_id)++;
-
- if (!c)
- break;
- if (c != '_')
- strbuf_addch(sb, tolower(c));
- else {
- assert(*msg_id);
- strbuf_addch(sb, *(msg_id)++);
- }
- }
-
- strbuf_addstr(sb, ": ");
-}
-
static int object_on_skiplist(struct fsck_options *opts,
const struct object_id *oid)
{
@@ -291,11 +207,12 @@ static int object_on_skiplist(struct fsck_options *opts,
__attribute__((format (printf, 5, 6)))
static int report(struct fsck_options *options,
const struct object_id *oid, enum object_type object_type,
- enum fsck_msg_id id, const char *fmt, ...)
+ enum fsck_msg_id msg_id, const char *fmt, ...)
{
va_list ap;
struct strbuf sb = STRBUF_INIT;
- int msg_type = fsck_msg_type(id, options), result;
+ enum fsck_msg_type msg_type = fsck_msg_type(msg_id, options);
+ int result;
if (msg_type == FSCK_IGNORE)
return 0;
@@ -308,12 +225,13 @@ static int report(struct fsck_options *options,
else if (msg_type == FSCK_INFO)
msg_type = FSCK_WARN;
- append_msg_id(&sb, msg_id_info[id].id_string);
+ prepare_msg_ids();
+ strbuf_addf(&sb, "%s: ", msg_id_info[msg_id].camelcased);
va_start(ap, fmt);
strbuf_vaddf(&sb, fmt, ap);
result = options->error_func(options, oid, object_type,
- msg_type, sb.buf);
+ msg_type, msg_id, sb.buf);
strbuf_release(&sb);
va_end(ap);
@@ -685,7 +603,7 @@ static int fsck_tree(const struct object_id *oid,
if (is_hfs_dotgitmodules(name) || is_ntfs_dotgitmodules(name)) {
if (!S_ISLNK(mode))
- oidset_insert(&gitmodules_found, oid);
+ oidset_insert(&options->gitmodules_found, oid);
else
retval += report(options,
oid, OBJ_TREE,
@@ -699,7 +617,7 @@ static int fsck_tree(const struct object_id *oid,
has_dotgit |= is_ntfs_dotgit(backslash);
if (is_ntfs_dotgitmodules(backslash)) {
if (!S_ISLNK(mode))
- oidset_insert(&gitmodules_found, oid);
+ oidset_insert(&options->gitmodules_found, oid);
else
retval += report(options, oid, OBJ_TREE,
FSCK_MSG_GITMODULES_SYMLINK,
@@ -1211,9 +1129,9 @@ static int fsck_blob(const struct object_id *oid, const char *buf,
struct fsck_gitmodules_data data;
struct config_options config_opts = { 0 };
- if (!oidset_contains(&gitmodules_found, oid))
+ if (!oidset_contains(&options->gitmodules_found, oid))
return 0;
- oidset_insert(&gitmodules_done, oid);
+ oidset_insert(&options->gitmodules_done, oid);
if (object_on_skiplist(options, oid))
return 0;
@@ -1266,7 +1184,9 @@ int fsck_object(struct object *obj, void *data, unsigned long size,
int fsck_error_function(struct fsck_options *o,
const struct object_id *oid,
enum object_type object_type,
- int msg_type, const char *message)
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
{
if (msg_type == FSCK_WARN) {
warning("object %s: %s", fsck_describe_object(o, oid), message);
@@ -1276,24 +1196,19 @@ int fsck_error_function(struct fsck_options *o,
return 1;
}
-void register_found_gitmodules(const struct object_id *oid)
-{
- oidset_insert(&gitmodules_found, oid);
-}
-
int fsck_finish(struct fsck_options *options)
{
int ret = 0;
struct oidset_iter iter;
const struct object_id *oid;
- oidset_iter_init(&gitmodules_found, &iter);
+ oidset_iter_init(&options->gitmodules_found, &iter);
while ((oid = oidset_iter_next(&iter))) {
enum object_type type;
unsigned long size;
char *buf;
- if (oidset_contains(&gitmodules_done, oid))
+ if (oidset_contains(&options->gitmodules_done, oid))
continue;
buf = read_object_file(oid, &type, &size);
@@ -1318,14 +1233,14 @@ int fsck_finish(struct fsck_options *options)
}
- oidset_clear(&gitmodules_found);
- oidset_clear(&gitmodules_done);
+ oidset_clear(&options->gitmodules_found);
+ oidset_clear(&options->gitmodules_done);
return ret;
}
-int fsck_config_internal(const char *var, const char *value, void *cb,
- struct fsck_options *options)
+int git_fsck_config(const char *var, const char *value, void *cb)
{
+ struct fsck_options *options = cb;
if (strcmp(var, "fsck.skiplist") == 0) {
const char *path;
struct strbuf sb = STRBUF_INIT;
@@ -1346,3 +1261,21 @@ int fsck_config_internal(const char *var, const char *value, void *cb,
return git_default_config(var, value, cb);
}
+
+/*
+ * Custom error callbacks that are used in more than one place.
+ */
+
+int fsck_error_cb_print_missing_gitmodules(struct fsck_options *o,
+ const struct object_id *oid,
+ enum object_type object_type,
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message)
+{
+ if (msg_id == FSCK_MSG_GITMODULES_MISSING) {
+ puts(oid_to_hex(oid));
+ return 0;
+ }
+ return fsck_error_function(o, oid, object_type, msg_type, msg_id, message);
+}
diff --git a/fsck.h b/fsck.h
index 733378f..7202c3c 100644
--- a/fsck.h
+++ b/fsck.h
@@ -3,15 +3,90 @@
#include "oidset.h"
-#define FSCK_ERROR 1
-#define FSCK_WARN 2
-#define FSCK_IGNORE 3
+enum fsck_msg_type {
+ /* for internal use only */
+ FSCK_IGNORE,
+ FSCK_INFO,
+ FSCK_FATAL,
+ /* "public", fed to e.g. error_func callbacks */
+ FSCK_ERROR,
+ FSCK_WARN,
+};
+
+#define FOREACH_FSCK_MSG_ID(FUNC) \
+ /* fatal errors */ \
+ FUNC(NUL_IN_HEADER, FATAL) \
+ FUNC(UNTERMINATED_HEADER, FATAL) \
+ /* errors */ \
+ FUNC(BAD_DATE, ERROR) \
+ FUNC(BAD_DATE_OVERFLOW, ERROR) \
+ FUNC(BAD_EMAIL, ERROR) \
+ FUNC(BAD_NAME, ERROR) \
+ FUNC(BAD_OBJECT_SHA1, ERROR) \
+ FUNC(BAD_PARENT_SHA1, ERROR) \
+ FUNC(BAD_TAG_OBJECT, ERROR) \
+ FUNC(BAD_TIMEZONE, ERROR) \
+ FUNC(BAD_TREE, ERROR) \
+ FUNC(BAD_TREE_SHA1, ERROR) \
+ FUNC(BAD_TYPE, ERROR) \
+ FUNC(DUPLICATE_ENTRIES, ERROR) \
+ FUNC(MISSING_AUTHOR, ERROR) \
+ FUNC(MISSING_COMMITTER, ERROR) \
+ FUNC(MISSING_EMAIL, ERROR) \
+ FUNC(MISSING_NAME_BEFORE_EMAIL, ERROR) \
+ FUNC(MISSING_OBJECT, ERROR) \
+ FUNC(MISSING_SPACE_BEFORE_DATE, ERROR) \
+ FUNC(MISSING_SPACE_BEFORE_EMAIL, ERROR) \
+ FUNC(MISSING_TAG, ERROR) \
+ FUNC(MISSING_TAG_ENTRY, ERROR) \
+ FUNC(MISSING_TREE, ERROR) \
+ FUNC(MISSING_TREE_OBJECT, ERROR) \
+ FUNC(MISSING_TYPE, ERROR) \
+ FUNC(MISSING_TYPE_ENTRY, ERROR) \
+ FUNC(MULTIPLE_AUTHORS, ERROR) \
+ FUNC(TREE_NOT_SORTED, ERROR) \
+ FUNC(UNKNOWN_TYPE, ERROR) \
+ FUNC(ZERO_PADDED_DATE, ERROR) \
+ FUNC(GITMODULES_MISSING, ERROR) \
+ FUNC(GITMODULES_BLOB, ERROR) \
+ FUNC(GITMODULES_LARGE, ERROR) \
+ FUNC(GITMODULES_NAME, ERROR) \
+ FUNC(GITMODULES_SYMLINK, ERROR) \
+ FUNC(GITMODULES_URL, ERROR) \
+ FUNC(GITMODULES_PATH, ERROR) \
+ FUNC(GITMODULES_UPDATE, ERROR) \
+ /* warnings */ \
+ FUNC(BAD_FILEMODE, WARN) \
+ FUNC(EMPTY_NAME, WARN) \
+ FUNC(FULL_PATHNAME, WARN) \
+ FUNC(HAS_DOT, WARN) \
+ FUNC(HAS_DOTDOT, WARN) \
+ FUNC(HAS_DOTGIT, WARN) \
+ FUNC(NULL_SHA1, WARN) \
+ FUNC(ZERO_PADDED_FILEMODE, WARN) \
+ FUNC(NUL_IN_COMMIT, WARN) \
+ /* infos (reported as warnings, but ignored by default) */ \
+ FUNC(GITMODULES_PARSE, INFO) \
+ FUNC(BAD_TAG_NAME, INFO) \
+ FUNC(MISSING_TAGGER_ENTRY, INFO) \
+ /* ignored (elevated when requested) */ \
+ FUNC(EXTRA_HEADER_ENTRY, IGNORE)
+
+#define MSG_ID(id, msg_type) FSCK_MSG_##id,
+enum fsck_msg_id {
+ FOREACH_FSCK_MSG_ID(MSG_ID)
+ FSCK_MSG_MAX
+};
+#undef MSG_ID
struct fsck_options;
struct object;
+void fsck_set_msg_type_from_ids(struct fsck_options *options,
+ enum fsck_msg_id msg_id,
+ enum fsck_msg_type msg_type);
void fsck_set_msg_type(struct fsck_options *options,
- const char *msg_id, const char *msg_type);
+ const char *msg_id, const char *msg_type);
void fsck_set_msg_types(struct fsck_options *options, const char *values);
int is_valid_msg_type(const char *msg_id, const char *msg_type);
@@ -23,28 +98,55 @@ int is_valid_msg_type(const char *msg_id, const char *msg_type);
* <0 error signaled and abort
* >0 error signaled and do not abort
*/
-typedef int (*fsck_walk_func)(struct object *obj, int type, void *data, struct fsck_options *options);
+typedef int (*fsck_walk_func)(struct object *obj, enum object_type object_type,
+ void *data, struct fsck_options *options);
/* callback for fsck_object, type is FSCK_ERROR or FSCK_WARN */
typedef int (*fsck_error)(struct fsck_options *o,
const struct object_id *oid, enum object_type object_type,
- int msg_type, const char *message);
+ enum fsck_msg_type msg_type, enum fsck_msg_id msg_id,
+ const char *message);
int fsck_error_function(struct fsck_options *o,
const struct object_id *oid, enum object_type object_type,
- int msg_type, const char *message);
+ enum fsck_msg_type msg_type, enum fsck_msg_id msg_id,
+ const char *message);
+int fsck_error_cb_print_missing_gitmodules(struct fsck_options *o,
+ const struct object_id *oid,
+ enum object_type object_type,
+ enum fsck_msg_type msg_type,
+ enum fsck_msg_id msg_id,
+ const char *message);
struct fsck_options {
fsck_walk_func walk;
fsck_error error_func;
unsigned strict:1;
- int *msg_type;
+ enum fsck_msg_type *msg_type;
struct oidset skiplist;
+ struct oidset gitmodules_found;
+ struct oidset gitmodules_done;
kh_oid_map_t *object_names;
};
-#define FSCK_OPTIONS_DEFAULT { NULL, fsck_error_function, 0, NULL, OIDSET_INIT }
-#define FSCK_OPTIONS_STRICT { NULL, fsck_error_function, 1, NULL, OIDSET_INIT }
+#define FSCK_OPTIONS_DEFAULT { \
+ .skiplist = OIDSET_INIT, \
+ .gitmodules_found = OIDSET_INIT, \
+ .gitmodules_done = OIDSET_INIT, \
+ .error_func = fsck_error_function \
+}
+#define FSCK_OPTIONS_STRICT { \
+ .strict = 1, \
+ .gitmodules_found = OIDSET_INIT, \
+ .gitmodules_done = OIDSET_INIT, \
+ .error_func = fsck_error_function, \
+}
+#define FSCK_OPTIONS_MISSING_GITMODULES { \
+ .strict = 1, \
+ .gitmodules_found = OIDSET_INIT, \
+ .gitmodules_done = OIDSET_INIT, \
+ .error_func = fsck_error_cb_print_missing_gitmodules, \
+}
/* descend in all linked child objects
* the return value is:
@@ -62,8 +164,6 @@ int fsck_walk(struct object *obj, void *data, struct fsck_options *options);
int fsck_object(struct object *obj, void *data, unsigned long size,
struct fsck_options *options);
-void register_found_gitmodules(const struct object_id *oid);
-
/*
* fsck a tag, and pass info about it back to the caller. This is
* exposed fsck_object() internals for git-mktag(1).
@@ -109,7 +209,6 @@ const char *fsck_describe_object(struct fsck_options *options,
* git_config() callback for use by fsck-y tools that want to support
* fsck.<msg> fsck.skipList etc.
*/
-int fsck_config_internal(const char *var, const char *value, void *cb,
- struct fsck_options *options);
+int git_fsck_config(const char *var, const char *value, void *cb);
#endif
diff --git a/fsmonitor.c b/fsmonitor.c
index 23f8a0c..ab9bfc6 100644
--- a/fsmonitor.c
+++ b/fsmonitor.c
@@ -185,10 +185,10 @@ static int query_fsmonitor(int version, const char *last_update, struct strbuf *
int fsmonitor_is_trivial_response(const struct strbuf *query_result)
{
static char trivial_response[3] = { '\0', '/', '\0' };
- int is_trivial = !memcmp(trivial_response,
- &query_result->buf[query_result->len - 3], 3);
- return is_trivial;
+ return query_result->len >= 3 &&
+ !memcmp(trivial_response,
+ &query_result->buf[query_result->len - 3], 3);
}
static void fsmonitor_refresh_callback(struct index_state *istate, char *name)
diff --git a/fsmonitor.h b/fsmonitor.h
index 7f1794b..f20d726 100644
--- a/fsmonitor.h
+++ b/fsmonitor.h
@@ -50,6 +50,17 @@ void refresh_fsmonitor(struct index_state *istate);
int fsmonitor_is_trivial_response(const struct strbuf *query_result);
/*
+ * Check if refresh_fsmonitor has been called at least once.
+ * refresh_fsmonitor is idempotent. Returns true if fsmonitor is
+ * not enabled (since the state will be "fresh" w/ CE_FSMONITOR_VALID unset)
+ * This version is useful for assertions
+ */
+static inline int is_fsmonitor_refreshed(const struct index_state *istate)
+{
+ return !core_fsmonitor || istate->fsmonitor_has_run_once;
+}
+
+/*
* Set the given cache entries CE_FSMONITOR_VALID bit. This should be
* called any time the cache entry has been updated to reflect the
* current state of the file on disk.
diff --git a/git-compat-util.h b/git-compat-util.h
index 551cc9f..a508dbe 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -256,6 +256,11 @@ static inline const char *precompose_argv_prefix(int argc, const char **argv, co
{
return prefix;
}
+static inline const char *precompose_string_if_needed(const char *in)
+{
+ return in;
+}
+
#define probe_utf8_pathname_composition()
#endif
@@ -349,6 +354,11 @@ static inline int noop_core_config(const char *var, const char *value, void *cb)
#define platform_core_config noop_core_config
#endif
+int lstat_cache_aware_rmdir(const char *path);
+#if !defined(__MINGW32__) && !defined(_MSC_VER)
+#define rmdir lstat_cache_aware_rmdir
+#endif
+
#ifndef has_dos_drive_prefix
static inline int git_has_dos_drive_prefix(const char *path)
{
@@ -893,7 +903,7 @@ int xstrncmpz(const char *s, const char *t, size_t len);
#define FREE_AND_NULL(p) do { free(p); (p) = NULL; } while (0)
#define ALLOC_ARRAY(x, alloc) (x) = xmalloc(st_mult(sizeof(*(x)), (alloc)))
-#define CALLOC_ARRAY(x, alloc) (x) = xcalloc((alloc), sizeof(*(x)));
+#define CALLOC_ARRAY(x, alloc) (x) = xcalloc((alloc), sizeof(*(x)))
#define REALLOC_ARRAY(x, alloc) (x) = xrealloc((x), st_mult(sizeof(*(x)), (alloc)))
#define COPY_ARRAY(dst, src, n) copy_array((dst), (src), (n), sizeof(*(dst)) + \
@@ -1237,6 +1247,13 @@ int access_or_die(const char *path, int mode, unsigned flag);
/* Warn on an inaccessible file if errno indicates this is an error */
int warn_on_fopen_errors(const char *path);
+/*
+ * Open with O_NOFOLLOW, or equivalent. Note that the fallback equivalent
+ * may be racy. Do not use this as protection against an attacker who can
+ * simultaneously create paths.
+ */
+int open_nofollow(const char *path, int flags);
+
#if !defined(USE_PARENS_AROUND_GETTEXT_N) && defined(__GNUC__)
#define USE_PARENS_AROUND_GETTEXT_N 1
#endif
diff --git a/git-filter-branch.sh b/git-filter-branch.sh
index fea7964..cb89372 100755
--- a/git-filter-branch.sh
+++ b/git-filter-branch.sh
@@ -492,14 +492,12 @@ then
sha1=$(git rev-parse "$ref"^0)
test -f "$workdir"/../map/$sha1 && continue
ancestor=$(git rev-list --simplify-merges -1 "$ref" "$@")
- test "$ancestor" && echo $(map $ancestor) >> "$workdir"/../map/$sha1
+ test "$ancestor" && echo $(map $ancestor) >"$workdir"/../map/$sha1
done < "$tempdir"/heads
fi
# Finally update the refs
-_x40='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]'
-_x40="$_x40$_x40$_x40$_x40$_x40$_x40$_x40$_x40"
echo
while read ref
do
@@ -519,7 +517,7 @@ do
git update-ref -m "filter-branch: delete" -d "$ref" $sha1 ||
die "Could not delete $ref"
;;
- $_x40)
+ *)
echo "Ref '$ref' was rewritten"
if ! git update-ref -m "filter-branch: rewrite" \
"$ref" $rewritten $sha1 2>/dev/null; then
@@ -533,16 +531,6 @@ do
fi
fi
;;
- *)
- # NEEDSWORK: possibly add -Werror, making this an error
- warn "WARNING: '$ref' was rewritten into multiple commits:"
- warn "$rewritten"
- warn "WARNING: Ref '$ref' points to the first one now."
- rewritten=$(echo "$rewritten" | head -n 1)
- git update-ref -m "filter-branch: rewrite to first" \
- "$ref" $rewritten $sha1 ||
- die "Could not rewrite $ref"
- ;;
esac
git update-ref -m "filter-branch: backup" "$orig_namespace$ref" $sha1 ||
exit
diff --git a/git-mergetool.sh b/git-mergetool.sh
index 911470a..f751d9c 100755
--- a/git-mergetool.sh
+++ b/git-mergetool.sh
@@ -358,13 +358,8 @@ merge_file () {
enabled=false
fi
else
- # The user does not have a preference. Ask the tool.
- if hide_resolved_enabled
- then
- enabled=true
- else
- enabled=false
- fi
+ # The user does not have a preference. Default to disabled.
+ enabled=false
fi
if test "$enabled" = true
diff --git a/git-send-email.perl b/git-send-email.perl
index 1f425c0..175da07 100755
--- a/git-send-email.perl
+++ b/git-send-email.perl
@@ -212,22 +212,31 @@ my $dump_aliases = 0;
my $multiedit;
my $editor;
+sub system_or_msg {
+ my ($args, $msg) = @_;
+ system(@$args);
+ my $signalled = $? & 127;
+ my $exit_code = $? >> 8;
+ return unless $signalled or $exit_code;
+
+ return sprintf(__("fatal: command '%s' died with exit code %d"),
+ $args->[0], $exit_code);
+}
+
+sub system_or_die {
+ my $msg = system_or_msg(@_);
+ die $msg if $msg;
+}
+
sub do_edit {
if (!defined($editor)) {
$editor = Git::command_oneline('var', 'GIT_EDITOR');
}
+ my $die_msg = __("the editor exited uncleanly, aborting everything");
if (defined($multiedit) && !$multiedit) {
- map {
- system('sh', '-c', $editor.' "$@"', $editor, $_);
- if (($? & 127) || ($? >> 8)) {
- die(__("the editor exited uncleanly, aborting everything"));
- }
- } @_;
+ system_or_die(['sh', '-c', $editor.' "$@"', $editor, $_], $die_msg) for @_;
} else {
- system('sh', '-c', $editor.' "$@"', $editor, @_);
- if (($? & 127) || ($? >> 8)) {
- die(__("the editor exited uncleanly, aborting everything"));
- }
+ system_or_die(['sh', '-c', $editor.' "$@"', $editor, @_], $die_msg);
}
}
@@ -698,9 +707,7 @@ if (@rev_list_opts) {
if ($validate) {
foreach my $f (@files) {
unless (-p $f) {
- my $error = validate_patch($f, $target_xfer_encoding);
- $error and die sprintf(__("fatal: %s: %s\nwarning: no patches were sent\n"),
- $f, $error);
+ validate_patch($f, $target_xfer_encoding);
}
}
}
@@ -1942,7 +1949,7 @@ sub validate_patch {
my ($fn, $xfer_encoding) = @_;
if ($repo) {
- my $validate_hook = catfile(catdir($repo->repo_path(), 'hooks'),
+ my $validate_hook = catfile($repo->hooks_path(),
'sendemail-validate');
my $hook_error;
if (-x $validate_hook) {
@@ -1952,11 +1959,14 @@ sub validate_patch {
chdir($repo->wc_path() or $repo->repo_path())
or die("chdir: $!");
local $ENV{"GIT_DIR"} = $repo->repo_path();
- $hook_error = "rejected by sendemail-validate hook"
- if system($validate_hook, $target);
+ $hook_error = system_or_msg([$validate_hook, $target]);
chdir($cwd_save) or die("chdir: $!");
}
- return $hook_error if $hook_error;
+ if ($hook_error) {
+ die sprintf(__("fatal: %s: rejected by sendemail-validate hook\n" .
+ "%s\n" .
+ "warning: no patches were sent\n"), $fn, $hook_error);
+ }
}
# Any long lines will be automatically fixed if we use a suitable transfer
@@ -1966,7 +1976,8 @@ sub validate_patch {
or die sprintf(__("unable to open %s: %s\n"), $fn, $!);
while (my $line = <$fh>) {
if (length($line) > 998) {
- return sprintf(__("%s: patch contains a line longer than 998 characters"), $.);
+ die sprintf(__("fatal: %s:%d is longer than 998 characters\n" .
+ "warning: no patches were sent\n"), $fn, $.);
}
}
}
diff --git a/git.c b/git.c
index 9bc077a..b53e665 100644
--- a/git.c
+++ b/git.c
@@ -423,7 +423,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
int nongit_ok;
prefix = setup_git_directory_gently(&nongit_ok);
}
- prefix = precompose_argv_prefix(argc, argv, prefix);
+ precompose_argv_prefix(argc, argv, NULL);
if (use_pager == -1 && p->option & (RUN_SETUP | RUN_SETUP_GENTLY) &&
!(p->option & DELAY_PAGER_CONFIG))
use_pager = check_pager_config(p->cmd);
diff --git a/gitweb/gitweb.perl b/gitweb/gitweb.perl
index 0959a78..e09e024 100755
--- a/gitweb/gitweb.perl
+++ b/gitweb/gitweb.perl
@@ -569,6 +569,15 @@ our %feature = (
'sub' => \&feature_extra_branch_refs,
'override' => 0,
'default' => []},
+
+ # Redact e-mail addresses.
+
+ # To enable system wide have in $GITWEB_CONFIG
+ # $feature{'email-privacy'}{'default'} = [1];
+ 'email-privacy' => {
+ 'sub' => sub { feature_bool('email-privacy', @_) },
+ 'override' => 1,
+ 'default' => [0]},
);
sub gitweb_get_feature {
@@ -3449,6 +3458,13 @@ sub parse_date {
return %date;
}
+sub hide_mailaddrs_if_private {
+ my $line = shift;
+ return $line unless gitweb_check_feature('email-privacy');
+ $line =~ s/<[^@>]+@[^>]+>/<redacted>/g;
+ return $line;
+}
+
sub parse_tag {
my $tag_id = shift;
my %tag;
@@ -3465,7 +3481,7 @@ sub parse_tag {
} elsif ($line =~ m/^tag (.+)$/) {
$tag{'name'} = $1;
} elsif ($line =~ m/^tagger (.*) ([0-9]+) (.*)$/) {
- $tag{'author'} = $1;
+ $tag{'author'} = hide_mailaddrs_if_private($1);
$tag{'author_epoch'} = $2;
$tag{'author_tz'} = $3;
if ($tag{'author'} =~ m/^([^<]+) <([^>]*)>/) {
@@ -3513,7 +3529,7 @@ sub parse_commit_text {
} elsif ((!defined $withparents) && ($line =~ m/^parent ($oid_regex)$/)) {
push @parents, $1;
} elsif ($line =~ m/^author (.*) ([0-9]+) (.*)$/) {
- $co{'author'} = to_utf8($1);
+ $co{'author'} = hide_mailaddrs_if_private(to_utf8($1));
$co{'author_epoch'} = $2;
$co{'author_tz'} = $3;
if ($co{'author'} =~ m/^([^<]+) <([^>]*)>/) {
@@ -3523,7 +3539,7 @@ sub parse_commit_text {
$co{'author_name'} = $co{'author'};
}
} elsif ($line =~ m/^committer (.*) ([0-9]+) (.*)$/) {
- $co{'committer'} = to_utf8($1);
+ $co{'committer'} = hide_mailaddrs_if_private(to_utf8($1));
$co{'committer_epoch'} = $2;
$co{'committer_tz'} = $3;
if ($co{'committer'} =~ m/^([^<]+) <([^>]*)>/) {
@@ -3568,9 +3584,10 @@ sub parse_commit_text {
if (! defined $co{'title'} || $co{'title'} eq "") {
$co{'title'} = $co{'title_short'} = '(no commit message)';
}
- # remove added spaces
+ # remove added spaces, redact e-mail addresses if applicable.
foreach my $line (@commit_lines) {
$line =~ s/^ //;
+ $line = hide_mailaddrs_if_private($line);
}
$co{'comment'} = \@commit_lines;
@@ -7489,7 +7506,8 @@ sub git_log_generic {
-accesskey => "n", -title => "Alt-n"}, "next");
}
my $patch_max = gitweb_get_feature('patches');
- if ($patch_max && !defined $file_name) {
+ if ($patch_max && !defined $file_name &&
+ !gitweb_check_feature('email-privacy')) {
if ($patch_max < 0 || @commitlist <= $patch_max) {
$paging_nav .= " &sdot; " .
$cgi->a({-href => href(action=>"patches", -replay=>1)},
@@ -7550,7 +7568,8 @@ sub git_commit {
} @$parents ) .
')';
}
- if (gitweb_check_feature('patches') && @$parents <= 1) {
+ if (gitweb_check_feature('patches') && @$parents <= 1 &&
+ !gitweb_check_feature('email-privacy')) {
$formats_nav .= " | " .
$cgi->a({-href => href(action=>"patch", -replay=>1)},
"patch");
@@ -7863,7 +7882,8 @@ sub git_commitdiff {
$formats_nav =
$cgi->a({-href => href(action=>"commitdiff_plain", -replay=>1)},
"raw");
- if ($patch_max && @{$co{'parents'}} <= 1) {
+ if ($patch_max && @{$co{'parents'}} <= 1 &&
+ !gitweb_check_feature('email-privacy')) {
$formats_nav .= " | " .
$cgi->a({-href => href(action=>"patch", -replay=>1)},
"patch");
diff --git a/grep.c b/grep.c
index aabfaaa..c5c348b 100644
--- a/grep.c
+++ b/grep.c
@@ -40,20 +40,6 @@ static struct grep_opt grep_defaults = {
.output = std_output,
};
-#ifdef USE_LIBPCRE2
-static pcre2_general_context *pcre2_global_context;
-
-static void *pcre2_malloc(PCRE2_SIZE size, MAYBE_UNUSED void *memory_data)
-{
- return malloc(size);
-}
-
-static void pcre2_free(void *pointer, MAYBE_UNUSED void *memory_data)
-{
- free(pointer);
-}
-#endif
-
static const char *color_grep_slots[] = {
[GREP_COLOR_CONTEXT] = "context",
[GREP_COLOR_FILENAME] = "filename",
@@ -152,20 +138,9 @@ int grep_config(const char *var, const char *value, void *cb)
* Initialize one instance of grep_opt and copy the
* default values from the template we read the configuration
* information in an earlier call to git_config(grep_config).
- *
- * If using PCRE, make sure that the library is configured
- * to use the same allocator as Git (e.g. nedmalloc on Windows).
- *
- * Any allocated memory needs to be released in grep_destroy().
*/
void grep_init(struct grep_opt *opt, struct repository *repo, const char *prefix)
{
-#if defined(USE_LIBPCRE2)
- if (!pcre2_global_context)
- pcre2_global_context = pcre2_general_context_create(
- pcre2_malloc, pcre2_free, NULL);
-#endif
-
*opt = grep_defaults;
opt->repo = repo;
@@ -175,13 +150,6 @@ void grep_init(struct grep_opt *opt, struct repository *repo, const char *prefix
opt->header_tail = &opt->header_list;
}
-void grep_destroy(void)
-{
-#ifdef USE_LIBPCRE2
- pcre2_general_context_free(pcre2_global_context);
-#endif
-}
-
static void grep_set_pattern_type_option(enum grep_pattern_type pattern_type, struct grep_opt *opt)
{
/*
@@ -363,6 +331,28 @@ static int is_fixed(const char *s, size_t len)
}
#ifdef USE_LIBPCRE2
+#define GREP_PCRE2_DEBUG_MALLOC 0
+
+static void *pcre2_malloc(PCRE2_SIZE size, MAYBE_UNUSED void *memory_data)
+{
+ void *pointer = malloc(size);
+#if GREP_PCRE2_DEBUG_MALLOC
+ static int count = 1;
+ fprintf(stderr, "PCRE2:%p -> #%02d: alloc(%lu)\n", pointer, count++, size);
+#endif
+ return pointer;
+}
+
+static void pcre2_free(void *pointer, MAYBE_UNUSED void *memory_data)
+{
+#if GREP_PCRE2_DEBUG_MALLOC
+ static int count = 1;
+ if (pointer)
+ fprintf(stderr, "PCRE2:%p -> #%02d: free()\n", pointer, count++);
+#endif
+ free(pointer);
+}
+
static void compile_pcre2_pattern(struct grep_pat *p, const struct grep_opt *opt)
{
int error;
@@ -373,17 +363,20 @@ static void compile_pcre2_pattern(struct grep_pat *p, const struct grep_opt *opt
int patinforet;
size_t jitsizearg;
- assert(opt->pcre2);
-
- p->pcre2_compile_context = NULL;
+ /*
+ * Call pcre2_general_context_create() before calling any
+ * other pcre2_*(). It sets up our malloc()/free() functions
+ * with which everything else is allocated.
+ */
+ p->pcre2_general_context = pcre2_general_context_create(
+ pcre2_malloc, pcre2_free, NULL);
+ if (!p->pcre2_general_context)
+ die("Couldn't allocate PCRE2 general context");
- /* pcre2_global_context is initialized in append_grep_pattern */
if (opt->ignore_case) {
if (!opt->ignore_locale && has_non_ascii(p->pattern)) {
- if (!pcre2_global_context)
- BUG("pcre2_global_context uninitialized");
- p->pcre2_tables = pcre2_maketables(pcre2_global_context);
- p->pcre2_compile_context = pcre2_compile_context_create(NULL);
+ p->pcre2_tables = pcre2_maketables(p->pcre2_general_context);
+ p->pcre2_compile_context = pcre2_compile_context_create(p->pcre2_general_context);
pcre2_set_character_tables(p->pcre2_compile_context,
p->pcre2_tables);
}
@@ -393,28 +386,18 @@ static void compile_pcre2_pattern(struct grep_pat *p, const struct grep_opt *opt
!(!opt->ignore_case && (p->fixed || p->is_fixed)))
options |= (PCRE2_UTF | PCRE2_MATCH_INVALID_UTF);
+#ifdef GIT_PCRE2_VERSION_10_36_OR_HIGHER
/* Work around https://bugs.exim.org/show_bug.cgi?id=2642 fixed in 10.36 */
- if (PCRE2_MATCH_INVALID_UTF && options & (PCRE2_UTF | PCRE2_CASELESS)) {
- struct strbuf buf;
- int len;
- int err;
-
- if ((len = pcre2_config(PCRE2_CONFIG_VERSION, NULL)) < 0)
- BUG("pcre2_config(..., NULL) failed: %d", len);
- strbuf_init(&buf, len + 1);
- if ((err = pcre2_config(PCRE2_CONFIG_VERSION, buf.buf)) < 0)
- BUG("pcre2_config(..., buf.buf) failed: %d", err);
- if (versioncmp(buf.buf, "10.36") < 0)
- options |= PCRE2_NO_START_OPTIMIZE;
- strbuf_release(&buf);
- }
+ if (PCRE2_MATCH_INVALID_UTF && options & (PCRE2_UTF | PCRE2_CASELESS))
+ options |= PCRE2_NO_START_OPTIMIZE;
+#endif
p->pcre2_pattern = pcre2_compile((PCRE2_SPTR)p->pattern,
p->patternlen, options, &error, &erroffset,
p->pcre2_compile_context);
if (p->pcre2_pattern) {
- p->pcre2_match_data = pcre2_match_data_create_from_pattern(p->pcre2_pattern, NULL);
+ p->pcre2_match_data = pcre2_match_data_create_from_pattern(p->pcre2_pattern, p->pcre2_general_context);
if (!p->pcre2_match_data)
die("Couldn't allocate PCRE2 match data");
} else {
@@ -493,7 +476,12 @@ static void free_pcre2_pattern(struct grep_pat *p)
pcre2_compile_context_free(p->pcre2_compile_context);
pcre2_code_free(p->pcre2_pattern);
pcre2_match_data_free(p->pcre2_match_data);
+#ifdef GIT_PCRE2_VERSION_10_34_OR_HIGHER
+ pcre2_maketables_free(p->pcre2_general_context, p->pcre2_tables);
+#else
free((void *)p->pcre2_tables);
+#endif
+ pcre2_general_context_free(p->pcre2_general_context);
}
#else /* !USE_LIBPCRE2 */
static void compile_pcre2_pattern(struct grep_pat *p, const struct grep_opt *opt)
@@ -555,7 +543,6 @@ static void compile_regexp(struct grep_pat *p, struct grep_opt *opt)
#endif
if (p->fixed || p->is_fixed) {
#ifdef USE_LIBPCRE2
- opt->pcre2 = 1;
if (p->is_fixed) {
compile_pcre2_pattern(p, opt);
} else {
@@ -621,7 +608,7 @@ static struct grep_expr *compile_pattern_atom(struct grep_pat **list)
case GREP_PATTERN: /* atom */
case GREP_PATTERN_HEAD:
case GREP_PATTERN_BODY:
- x = xcalloc(1, sizeof (struct grep_expr));
+ CALLOC_ARRAY(x, 1);
x->node = GREP_NODE_ATOM;
x->u.atom = p;
*list = p->next;
@@ -651,7 +638,7 @@ static struct grep_expr *compile_pattern_not(struct grep_pat **list)
if (!p->next)
die("--not not followed by pattern expression");
*list = p->next;
- x = xcalloc(1, sizeof (struct grep_expr));
+ CALLOC_ARRAY(x, 1);
x->node = GREP_NODE_NOT;
x->u.unary = compile_pattern_not(list);
if (!x->u.unary)
@@ -676,7 +663,7 @@ static struct grep_expr *compile_pattern_and(struct grep_pat **list)
y = compile_pattern_and(list);
if (!y)
die("--and not followed by pattern expression");
- z = xcalloc(1, sizeof (struct grep_expr));
+ CALLOC_ARRAY(z, 1);
z->node = GREP_NODE_AND;
z->u.binary.left = x;
z->u.binary.right = y;
@@ -696,7 +683,7 @@ static struct grep_expr *compile_pattern_or(struct grep_pat **list)
y = compile_pattern_or(list);
if (!y)
die("not a pattern expression %s", p->pattern);
- z = xcalloc(1, sizeof (struct grep_expr));
+ CALLOC_ARRAY(z, 1);
z->node = GREP_NODE_OR;
z->u.binary.left = x;
z->u.binary.right = y;
diff --git a/grep.h b/grep.h
index ae89d62..72f82b1 100644
--- a/grep.h
+++ b/grep.h
@@ -4,10 +4,17 @@
#ifdef USE_LIBPCRE2
#define PCRE2_CODE_UNIT_WIDTH 8
#include <pcre2.h>
+#if (PCRE2_MAJOR >= 10 && PCRE2_MINOR >= 36) || PCRE2_MAJOR >= 11
+#define GIT_PCRE2_VERSION_10_36_OR_HIGHER
+#endif
+#if (PCRE2_MAJOR >= 10 && PCRE2_MINOR >= 34) || PCRE2_MAJOR >= 11
+#define GIT_PCRE2_VERSION_10_34_OR_HIGHER
+#endif
#else
typedef int pcre2_code;
typedef int pcre2_match_data;
typedef int pcre2_compile_context;
+typedef int pcre2_general_context;
#endif
#ifndef PCRE2_MATCH_INVALID_UTF
/* PCRE2_MATCH_* dummy also with !USE_LIBPCRE2, for test-pcre2-config.c */
@@ -69,6 +76,7 @@ struct grep_pat {
pcre2_code *pcre2_pattern;
pcre2_match_data *pcre2_match_data;
pcre2_compile_context *pcre2_compile_context;
+ pcre2_general_context *pcre2_general_context;
const uint8_t *pcre2_tables;
uint32_t pcre2_jit_on;
unsigned fixed:1;
@@ -161,7 +169,6 @@ struct grep_opt {
int grep_config(const char *var, const char *value, void *);
void grep_init(struct grep_opt *, struct repository *repo, const char *prefix);
-void grep_destroy(void);
void grep_commit_pattern_type(enum grep_pattern_type, struct grep_opt *opt);
void append_grep_pat(struct grep_opt *opt, const char *pat, size_t patlen, const char *origin, int no, enum grep_pat_token t);
diff --git a/hashmap.c b/hashmap.c
index 5009471..134d2ee 100644
--- a/hashmap.c
+++ b/hashmap.c
@@ -76,7 +76,7 @@ unsigned int memihash_cont(unsigned int hash_seed, const void *buf, size_t len)
static void alloc_table(struct hashmap *map, unsigned int size)
{
map->tablesize = size;
- map->table = xcalloc(size, sizeof(struct hashmap_entry *));
+ CALLOC_ARRAY(map->table, size);
/* calculate resize thresholds for new size */
map->grow_at = (unsigned int) ((uint64_t) size * HASHMAP_LOAD_FACTOR / 100);
diff --git a/http-backend.c b/http-backend.c
index a03b4ba..b329bf6 100644
--- a/http-backend.c
+++ b/http-backend.c
@@ -39,7 +39,7 @@ static struct string_list *get_parameters(void)
if (!query_params) {
const char *query = getenv("QUERY_STRING");
- query_params = xcalloc(1, sizeof(*query_params));
+ CALLOC_ARRAY(query_params, 1);
while (query && *query) {
char *name = url_decode_parameter_name(&query);
char *value = url_decode_parameter_value(&query);
diff --git a/http-push.c b/http-push.c
index 6a4a43e..8131232 100644
--- a/http-push.c
+++ b/http-push.c
@@ -896,7 +896,7 @@ static struct remote_lock *lock_remote(const char *path, long timeout)
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers);
curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer);
- lock = xcalloc(1, sizeof(*lock));
+ CALLOC_ARRAY(lock, 1);
lock->timeout = -1;
if (start_active_slot(slot)) {
@@ -1436,7 +1436,7 @@ static void one_remote_ref(const char *refname)
* may be required for updating server info later.
*/
if (repo->can_update_info_refs && !has_object_file(&ref->old_oid)) {
- obj = lookup_unknown_object(&ref->old_oid);
+ obj = lookup_unknown_object(the_repository, &ref->old_oid);
fprintf(stderr, " fetch %s for %s\n",
oid_to_hex(&ref->old_oid), refname);
add_fetch_request(obj);
@@ -1713,7 +1713,7 @@ int cmd_main(int argc, const char **argv)
int new_refs;
struct ref *ref, *local_refs;
- repo = xcalloc(1, sizeof(*repo));
+ CALLOC_ARRAY(repo, 1);
argv++;
for (i = 1; i < argc; i++, argv++) {
diff --git a/http.c b/http.c
index f8ea28b..406410f 100644
--- a/http.c
+++ b/http.c
@@ -1635,23 +1635,33 @@ static int handle_curl_result(struct slot_results *results)
if (results->curl_result == CURLE_OK) {
credential_approve(&http_auth);
- if (proxy_auth.password)
- credential_approve(&proxy_auth);
+ credential_approve(&proxy_auth);
+ credential_approve(&cert_auth);
return HTTP_OK;
+ } else if (results->curl_result == CURLE_SSL_CERTPROBLEM) {
+ /*
+ * We can't tell from here whether it's a bad path, bad
+ * certificate, bad password, or something else wrong
+ * with the certificate. So we reject the credential to
+ * avoid caching or saving a bad password.
+ */
+ credential_reject(&cert_auth);
+ return HTTP_NOAUTH;
} else if (missing_target(results))
return HTTP_MISSING_TARGET;
else if (results->http_code == 401) {
+#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY
+ http_auth_methods &= ~CURLAUTH_GSSNEGOTIATE;
+ if (results->auth_avail) {
+ http_auth_methods &= results->auth_avail;
+ http_auth_methods_restricted = 1;
+ return HTTP_REAUTH;
+ }
+#endif
if (http_auth.username && http_auth.password) {
credential_reject(&http_auth);
return HTTP_NOAUTH;
} else {
-#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY
- http_auth_methods &= ~CURLAUTH_GSSNEGOTIATE;
- if (results->auth_avail) {
- http_auth_methods &= results->auth_avail;
- http_auth_methods_restricted = 1;
- }
-#endif
return HTTP_REAUTH;
}
} else {
@@ -2324,7 +2334,7 @@ struct http_pack_request *new_direct_http_pack_request(
off_t prev_posn = 0;
struct http_pack_request *preq;
- preq = xcalloc(1, sizeof(*preq));
+ CALLOC_ARRAY(preq, 1);
strbuf_init(&preq->tmpfile, 0);
preq->url = url;
@@ -2419,7 +2429,7 @@ struct http_object_request *new_http_object_request(const char *base_url,
off_t prev_posn = 0;
struct http_object_request *freq;
- freq = xcalloc(1, sizeof(*freq));
+ CALLOC_ARRAY(freq, 1);
strbuf_init(&freq->tmpfile, 0);
oidcpy(&freq->oid, oid);
freq->localfile = -1;
diff --git a/imap-send.c b/imap-send.c
index d0b94f9..bb085d6 100644
--- a/imap-send.c
+++ b/imap-send.c
@@ -963,9 +963,9 @@ static struct imap_store *imap_open_store(struct imap_server_conf *srvc, const c
char *arg, *rsp;
int s = -1, preauth;
- ctx = xcalloc(1, sizeof(*ctx));
+ CALLOC_ARRAY(ctx, 1);
- ctx->imap = imap = xcalloc(1, sizeof(*imap));
+ ctx->imap = CALLOC_ARRAY(imap, 1);
imap->buf.sock.fd[0] = imap->buf.sock.fd[1] = -1;
imap->in_progress_append = &imap->in_progress;
diff --git a/line-log.c b/line-log.c
index 75c8b1a..51d9331 100644
--- a/line-log.c
+++ b/line-log.c
@@ -296,7 +296,7 @@ static void line_log_data_insert(struct line_log_data **list,
return;
}
- p = xcalloc(1, sizeof(struct line_log_data));
+ CALLOC_ARRAY(p, 1);
p->path = path;
range_set_append(&p->ranges, begin, end);
if (ip) {
diff --git a/line-range.c b/line-range.c
index 9b50583..955a8a9 100644
--- a/line-range.c
+++ b/line-range.c
@@ -202,7 +202,7 @@ static const char *parse_range_funcname(
drv = userdiff_find_by_path(istate, path);
if (drv && drv->funcname.pattern) {
const struct userdiff_funcname *pe = &drv->funcname;
- xecfg = xcalloc(1, sizeof(*xecfg));
+ CALLOC_ARRAY(xecfg, 1);
xdiff_set_find_func(xecfg, pe->pattern, pe->cflags);
}
diff --git a/list-objects-filter.c b/list-objects-filter.c
index 4ec0041..39e2f15 100644
--- a/list-objects-filter.c
+++ b/list-objects-filter.c
@@ -186,7 +186,7 @@ static enum list_objects_filter_result filter_trees_depth(
seen_info = oidmap_get(
&filter_data->seen_at_depth, &obj->oid);
if (!seen_info) {
- seen_info = xcalloc(1, sizeof(*seen_info));
+ CALLOC_ARRAY(seen_info, 1);
oidcpy(&seen_info->base.oid, &obj->oid);
seen_info->depth = filter_data->current_depth;
oidmap_put(&filter_data->seen_at_depth, seen_info);
@@ -626,7 +626,7 @@ static void filter_combine__init(
size_t sub;
d->nr = filter_options->sub_nr;
- d->sub = xcalloc(d->nr, sizeof(*d->sub));
+ CALLOC_ARRAY(d->sub, d->nr);
for (sub = 0; sub < d->nr; sub++)
d->sub[sub].filter = list_objects_filter__init(
filter->omits ? &d->sub[sub].omits : NULL,
@@ -674,7 +674,7 @@ struct filter *list_objects_filter__init(
if (!init_fn)
return NULL;
- filter = xcalloc(1, sizeof(*filter));
+ CALLOC_ARRAY(filter, 1);
filter->omits = omitted;
init_fn(filter_options, filter);
return filter;
diff --git a/ll-merge.c b/ll-merge.c
index 1ec0b95..9a8a2c3 100644
--- a/ll-merge.c
+++ b/ll-merge.c
@@ -268,7 +268,7 @@ static int read_merge_config(const char *var, const char *value, void *cb)
if (!strncmp(fn->name, name, namelen) && !fn->name[namelen])
break;
if (!fn) {
- fn = xcalloc(1, sizeof(struct ll_merge_driver));
+ CALLOC_ARRAY(fn, 1);
fn->name = xmemdupz(name, namelen);
fn->fn = ll_ext_merge;
*ll_user_merge_tail = fn;
diff --git a/log-tree.c b/log-tree.c
index 4531ceb..f3178a6 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -369,8 +369,14 @@ void fmt_output_subject(struct strbuf *filename,
int start_len = filename->len;
int max_len = start_len + info->patch_name_max - (strlen(suffix) + 1);
- if (0 < info->reroll_count)
- strbuf_addf(filename, "v%d-", info->reroll_count);
+ if (info->reroll_count) {
+ struct strbuf temp = STRBUF_INIT;
+
+ strbuf_addf(&temp, "v%s", info->reroll_count);
+ format_sanitized_subject(filename, temp.buf, temp.len);
+ strbuf_addstr(filename, "-");
+ strbuf_release(&temp);
+ }
strbuf_addf(filename, "%04d-%s", nr, subject);
if (max_len < filename->len)
diff --git a/mailmap.c b/mailmap.c
index 9bb9cf8..d1f7c0d 100644
--- a/mailmap.c
+++ b/mailmap.c
@@ -83,7 +83,7 @@ static void add_mapping(struct string_list *map,
if (item->util) {
me = (struct mailmap_entry *)item->util;
} else {
- me = xcalloc(1, sizeof(struct mailmap_entry));
+ CALLOC_ARRAY(me, 1);
me->namemap.strdup_strings = 1;
me->namemap.cmp = namemap_cmp;
item->util = me;
@@ -157,20 +157,30 @@ static void read_mailmap_line(struct string_list *map, char *buffer)
add_mapping(map, name1, email1, name2, email2);
}
-static int read_mailmap_file(struct string_list *map, const char *filename)
+/* Flags for read_mailmap_file() */
+#define MAILMAP_NOFOLLOW (1<<0)
+
+static int read_mailmap_file(struct string_list *map, const char *filename,
+ unsigned flags)
{
char buffer[1024];
FILE *f;
+ int fd;
if (!filename)
return 0;
- f = fopen(filename, "r");
- if (!f) {
+ if (flags & MAILMAP_NOFOLLOW)
+ fd = open_nofollow(filename, O_RDONLY);
+ else
+ fd = open(filename, O_RDONLY);
+
+ if (fd < 0) {
if (errno == ENOENT)
return 0;
return error_errno("unable to open mailmap at %s", filename);
}
+ f = xfdopen(fd, "r");
while (fgets(buffer, sizeof(buffer), f) != NULL)
read_mailmap_line(map, buffer);
@@ -226,10 +236,12 @@ int read_mailmap(struct string_list *map)
git_mailmap_blob = "HEAD:.mailmap";
if (!startup_info->have_repository || !is_bare_repository())
- err |= read_mailmap_file(map, ".mailmap");
+ err |= read_mailmap_file(map, ".mailmap",
+ startup_info->have_repository ?
+ MAILMAP_NOFOLLOW : 0);
if (startup_info->have_repository)
err |= read_mailmap_blob(map, git_mailmap_blob);
- err |= read_mailmap_file(map, git_mailmap_file);
+ err |= read_mailmap_file(map, git_mailmap_file, 0);
return err;
}
diff --git a/mem-pool.c b/mem-pool.c
index 8401761..ccdcad2 100644
--- a/mem-pool.c
+++ b/mem-pool.c
@@ -5,7 +5,7 @@
#include "cache.h"
#include "mem-pool.h"
-#define BLOCK_GROWTH_SIZE 1024*1024 - sizeof(struct mp_block);
+#define BLOCK_GROWTH_SIZE (1024 * 1024 - sizeof(struct mp_block))
/*
* Allocate a new mp_block and insert it after the block specified in
diff --git a/merge-ort.c b/merge-ort.c
index 603d30c..6c2792b 100644
--- a/merge-ort.c
+++ b/merge-ort.c
@@ -18,6 +18,7 @@
#include "merge-ort.h"
#include "alloc.h"
+#include "attr.h"
#include "blob.h"
#include "cache-tree.h"
#include "commit.h"
@@ -25,6 +26,7 @@
#include "diff.h"
#include "diffcore.h"
#include "dir.h"
+#include "entry.h"
#include "ll-merge.h"
#include "object-store.h"
#include "revision.h"
@@ -51,6 +53,12 @@ enum merge_side {
MERGE_SIDE2 = 2
};
+struct traversal_callback_data {
+ unsigned long mask;
+ unsigned long dirmask;
+ struct name_entry names[3];
+};
+
struct rename_info {
/*
* All variables that are arrays of size 3 correspond to data tracked
@@ -67,8 +75,12 @@ struct rename_info {
/*
* dirs_removed: directories removed on a given side of history.
+ *
+ * The keys of dirs_removed[side] are the directories that were removed
+ * on the given side of history. The value of the strintmap for each
+ * directory is a value from enum dir_rename_relevance.
*/
- struct strset dirs_removed[3];
+ struct strintmap dirs_removed[3];
/*
* dir_rename_count: tracking where parts of a directory were renamed to
@@ -89,6 +101,46 @@ struct rename_info {
struct strmap dir_renames[3];
/*
+ * relevant_sources: deleted paths wanted in rename detection, and why
+ *
+ * relevant_sources is a set of deleted paths on each side of
+ * history for which we need rename detection. If a path is deleted
+ * on one side of history, we need to detect if it is part of a
+ * rename if either
+ * * the file is modified/deleted on the other side of history
+ * * we need to detect renames for an ancestor directory
+ * If neither of those are true, we can skip rename detection for
+ * that path. The reason is stored as a value from enum
+ * file_rename_relevance, as the reason can inform the algorithm in
+ * diffcore_rename_extended().
+ */
+ struct strintmap relevant_sources[3];
+
+ /*
+ * dir_rename_mask:
+ * 0: optimization removing unmodified potential rename source okay
+ * 2 or 4: optimization okay, but must check for files added to dir
+ * 7: optimization forbidden; need rename source in case of dir rename
+ */
+ unsigned dir_rename_mask:3;
+
+ /*
+ * callback_data_*: supporting data structures for alternate traversal
+ *
+ * We sometimes need to be able to traverse through all the files
+ * in a given tree before all immediate subdirectories within that
+ * tree. Since traverse_trees() doesn't do that naturally, we have
+ * a traverse_trees_wrapper() that stores any immediate
+ * subdirectories while traversing files, then traverses the
+ * immediate subdirectories later. These callback_data* variables
+ * store the information for the subdirectories so that we can do
+ * that traversal order.
+ */
+ struct traversal_callback_data *callback_data;
+ int callback_data_nr, callback_data_alloc;
+ char *callback_data_traverse_path;
+
+ /*
* needed_limit: value needed for inexact rename detection to run
*
* If the current rename limit wasn't high enough for inexact
@@ -171,6 +223,16 @@ struct merge_options_internal {
struct rename_info renames;
/*
+ * attr_index: hacky minimal index used for renormalization
+ *
+ * renormalization code _requires_ an index, though it only needs to
+ * find a .gitattributes file within the index. So, when
+ * renormalization is important, we create a special index with just
+ * that one file.
+ */
+ struct index_state attr_index;
+
+ /*
* current_dir_name, toplevel_dir: temporary vars
*
* These are used in collect_merge_info_callback(), and will set the
@@ -318,8 +380,8 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti,
int i;
void (*strmap_func)(struct strmap *, int) =
reinitialize ? strmap_partial_clear : strmap_clear;
- void (*strset_func)(struct strset *) =
- reinitialize ? strset_partial_clear : strset_clear;
+ void (*strintmap_func)(struct strintmap *) =
+ reinitialize ? strintmap_partial_clear : strintmap_clear;
/*
* We marked opti->paths with strdup_strings = 0, so that we
@@ -349,21 +411,20 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti,
string_list_clear(&opti->paths_to_free, 0);
opti->paths_to_free.strdup_strings = 0;
+ if (opti->attr_index.cache_nr) /* true iff opt->renormalize */
+ discard_index(&opti->attr_index);
+
/* Free memory used by various renames maps */
for (i = MERGE_SIDE1; i <= MERGE_SIDE2; ++i) {
- struct hashmap_iter iter;
- struct strmap_entry *entry;
-
- strset_func(&renames->dirs_removed[i]);
+ strintmap_func(&renames->dirs_removed[i]);
- strmap_for_each_entry(&renames->dir_rename_count[i],
- &iter, entry) {
- struct strintmap *counts = entry->value;
- strintmap_clear(counts);
- }
- strmap_func(&renames->dir_rename_count[i], 1);
+ partial_clear_dir_rename_count(&renames->dir_rename_count[i]);
+ if (!reinitialize)
+ strmap_clear(&renames->dir_rename_count[i], 1);
strmap_func(&renames->dir_renames[i], 0);
+
+ strintmap_func(&renames->relevant_sources[i]);
}
if (!reinitialize) {
@@ -386,6 +447,12 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti,
}
strmap_clear(&opti->output, 0);
}
+
+ renames->dir_rename_mask = 0;
+
+ /* Clean out callback_data as well. */
+ FREE_AND_NULL(renames->callback_data);
+ renames->callback_data_nr = renames->callback_data_alloc = 0;
}
static int err(struct merge_options *opt, const char *err, ...)
@@ -476,6 +543,82 @@ static char *unique_path(struct strmap *existing_paths,
/*** Function Grouping: functions related to collect_merge_info() ***/
+static int traverse_trees_wrapper_callback(int n,
+ unsigned long mask,
+ unsigned long dirmask,
+ struct name_entry *names,
+ struct traverse_info *info)
+{
+ struct merge_options *opt = info->data;
+ struct rename_info *renames = &opt->priv->renames;
+ unsigned filemask = mask & ~dirmask;
+
+ assert(n==3);
+
+ if (!renames->callback_data_traverse_path)
+ renames->callback_data_traverse_path = xstrdup(info->traverse_path);
+
+ if (filemask && filemask == renames->dir_rename_mask)
+ renames->dir_rename_mask = 0x07;
+
+ ALLOC_GROW(renames->callback_data, renames->callback_data_nr + 1,
+ renames->callback_data_alloc);
+ renames->callback_data[renames->callback_data_nr].mask = mask;
+ renames->callback_data[renames->callback_data_nr].dirmask = dirmask;
+ COPY_ARRAY(renames->callback_data[renames->callback_data_nr].names,
+ names, 3);
+ renames->callback_data_nr++;
+
+ return mask;
+}
+
+/*
+ * Much like traverse_trees(), BUT:
+ * - read all the tree entries FIRST, saving them
+ * - note that the above step provides an opportunity to compute necessary
+ * additional details before the "real" traversal
+ * - loop through the saved entries and call the original callback on them
+ */
+static int traverse_trees_wrapper(struct index_state *istate,
+ int n,
+ struct tree_desc *t,
+ struct traverse_info *info)
+{
+ int ret, i, old_offset;
+ traverse_callback_t old_fn;
+ char *old_callback_data_traverse_path;
+ struct merge_options *opt = info->data;
+ struct rename_info *renames = &opt->priv->renames;
+
+ assert(renames->dir_rename_mask == 2 || renames->dir_rename_mask == 4);
+
+ old_callback_data_traverse_path = renames->callback_data_traverse_path;
+ old_fn = info->fn;
+ old_offset = renames->callback_data_nr;
+
+ renames->callback_data_traverse_path = NULL;
+ info->fn = traverse_trees_wrapper_callback;
+ ret = traverse_trees(istate, n, t, info);
+ if (ret < 0)
+ return ret;
+
+ info->traverse_path = renames->callback_data_traverse_path;
+ info->fn = old_fn;
+ for (i = old_offset; i < renames->callback_data_nr; ++i) {
+ info->fn(n,
+ renames->callback_data[i].mask,
+ renames->callback_data[i].dirmask,
+ renames->callback_data[i].names,
+ info);
+ }
+
+ renames->callback_data_nr = old_offset;
+ free(renames->callback_data_traverse_path);
+ renames->callback_data_traverse_path = old_callback_data_traverse_path;
+ info->traverse_path = NULL;
+ return 0;
+}
+
static void setup_path_info(struct merge_options *opt,
struct string_list_item *result,
const char *current_dir_name,
@@ -539,12 +682,25 @@ static void add_pair(struct merge_options *opt,
struct name_entry *names,
const char *pathname,
unsigned side,
- unsigned is_add /* if false, is_delete */)
+ unsigned is_add /* if false, is_delete */,
+ unsigned match_mask,
+ unsigned dir_rename_mask)
{
struct diff_filespec *one, *two;
struct rename_info *renames = &opt->priv->renames;
int names_idx = is_add ? side : 0;
+ if (!is_add) {
+ unsigned content_relevant = (match_mask == 0);
+ unsigned location_relevant = (dir_rename_mask == 0x07);
+
+ if (content_relevant || location_relevant) {
+ /* content_relevant trumps location_relevant */
+ strintmap_set(&renames->relevant_sources[side], pathname,
+ content_relevant ? RELEVANT_CONTENT : RELEVANT_LOCATION);
+ }
+ }
+
one = alloc_filespec(pathname);
two = alloc_filespec(pathname);
fill_filespec(is_add ? two : one,
@@ -563,14 +719,75 @@ static void collect_rename_info(struct merge_options *opt,
struct rename_info *renames = &opt->priv->renames;
unsigned side;
+ /*
+ * Update dir_rename_mask (determines ignore-rename-source validity)
+ *
+ * dir_rename_mask helps us keep track of when directory rename
+ * detection may be relevant. Basically, whenver a directory is
+ * removed on one side of history, and a file is added to that
+ * directory on the other side of history, directory rename
+ * detection is relevant (meaning we have to detect renames for all
+ * files within that directory to deduce where the directory
+ * moved). Also, whenever a directory needs directory rename
+ * detection, due to the "majority rules" choice for where to move
+ * it (see t6423 testcase 1f), we also need to detect renames for
+ * all files within subdirectories of that directory as well.
+ *
+ * Here we haven't looked at files within the directory yet, we are
+ * just looking at the directory itself. So, if we aren't yet in
+ * a case where a parent directory needed directory rename detection
+ * (i.e. dir_rename_mask != 0x07), and if the directory was removed
+ * on one side of history, record the mask of the other side of
+ * history in dir_rename_mask.
+ */
+ if (renames->dir_rename_mask != 0x07 &&
+ (dirmask == 3 || dirmask == 5)) {
+ /* simple sanity check */
+ assert(renames->dir_rename_mask == 0 ||
+ renames->dir_rename_mask == (dirmask & ~1));
+ /* update dir_rename_mask; have it record mask of new side */
+ renames->dir_rename_mask = (dirmask & ~1);
+ }
+
/* Update dirs_removed, as needed */
if (dirmask == 1 || dirmask == 3 || dirmask == 5) {
/* absent_mask = 0x07 - dirmask; sides = absent_mask/2 */
unsigned sides = (0x07 - dirmask)/2;
+ unsigned relevance = (renames->dir_rename_mask == 0x07) ?
+ RELEVANT_FOR_ANCESTOR : NOT_RELEVANT;
+ /*
+ * Record relevance of this directory. However, note that
+ * when collect_merge_info_callback() recurses into this
+ * directory and calls collect_rename_info() on paths
+ * within that directory, if we find a path that was added
+ * to this directory on the other side of history, we will
+ * upgrade this value to RELEVANT_FOR_SELF; see below.
+ */
if (sides & 1)
- strset_add(&renames->dirs_removed[1], fullname);
+ strintmap_set(&renames->dirs_removed[1], fullname,
+ relevance);
if (sides & 2)
- strset_add(&renames->dirs_removed[2], fullname);
+ strintmap_set(&renames->dirs_removed[2], fullname,
+ relevance);
+ }
+
+ /*
+ * Here's the block that potentially upgrades to RELEVANT_FOR_SELF.
+ * When we run across a file added to a directory. In such a case,
+ * find the directory of the file and upgrade its relevance.
+ */
+ if (renames->dir_rename_mask == 0x07 &&
+ (filemask == 2 || filemask == 4)) {
+ /*
+ * Need directory rename for parent directory on other side
+ * of history from added file. Thus
+ * side = (~filemask & 0x06) >> 1
+ * or
+ * side = 3 - (filemask/2).
+ */
+ unsigned side = 3 - (filemask >> 1);
+ strintmap_set(&renames->dirs_removed[side], dirname,
+ RELEVANT_FOR_SELF);
}
if (filemask == 0 || filemask == 7)
@@ -581,11 +798,15 @@ static void collect_rename_info(struct merge_options *opt,
/* Check for deletion on side */
if ((filemask & 1) && !(filemask & side_mask))
- add_pair(opt, names, fullname, side, 0 /* delete */);
+ add_pair(opt, names, fullname, side, 0 /* delete */,
+ match_mask & filemask,
+ renames->dir_rename_mask);
/* Check for addition on side */
if (!(filemask & 1) && (filemask & side_mask))
- add_pair(opt, names, fullname, side, 1 /* add */);
+ add_pair(opt, names, fullname, side, 1 /* add */,
+ match_mask & filemask,
+ renames->dir_rename_mask);
}
}
@@ -603,12 +824,14 @@ static int collect_merge_info_callback(int n,
*/
struct merge_options *opt = info->data;
struct merge_options_internal *opti = opt->priv;
+ struct rename_info *renames = &opt->priv->renames;
struct string_list_item pi; /* Path Info */
struct conflict_info *ci; /* typed alias to pi.util (which is void*) */
struct name_entry *p;
size_t len;
char *fullpath;
const char *dirname = opti->current_dir_name;
+ unsigned prev_dir_rename_mask = renames->dir_rename_mask;
unsigned filemask = mask & ~dirmask;
unsigned match_mask = 0; /* will be updated below */
unsigned mbase_null = !(mask & 1);
@@ -749,8 +972,13 @@ static int collect_merge_info_callback(int n,
original_dir_name = opti->current_dir_name;
opti->current_dir_name = pi.string;
- ret = traverse_trees(NULL, 3, t, &newinfo);
+ if (renames->dir_rename_mask == 0 ||
+ renames->dir_rename_mask == 0x07)
+ ret = traverse_trees(NULL, 3, t, &newinfo);
+ else
+ ret = traverse_trees_wrapper(NULL, 3, t, &newinfo);
opti->current_dir_name = original_dir_name;
+ renames->dir_rename_mask = prev_dir_rename_mask;
for (i = MERGE_BASE; i <= MERGE_SIDE2; i++)
free(buf[i]);
@@ -974,6 +1202,63 @@ static int merge_submodule(struct merge_options *opt,
return 0;
}
+static void initialize_attr_index(struct merge_options *opt)
+{
+ /*
+ * The renormalize_buffer() functions require attributes, and
+ * annoyingly those can only be read from the working tree or from
+ * an index_state. merge-ort doesn't have an index_state, so we
+ * generate a fake one containing only attribute information.
+ */
+ struct merged_info *mi;
+ struct index_state *attr_index = &opt->priv->attr_index;
+ struct cache_entry *ce;
+
+ attr_index->initialized = 1;
+
+ if (!opt->renormalize)
+ return;
+
+ mi = strmap_get(&opt->priv->paths, GITATTRIBUTES_FILE);
+ if (!mi)
+ return;
+
+ if (mi->clean) {
+ int len = strlen(GITATTRIBUTES_FILE);
+ ce = make_empty_cache_entry(attr_index, len);
+ ce->ce_mode = create_ce_mode(mi->result.mode);
+ ce->ce_flags = create_ce_flags(0);
+ ce->ce_namelen = len;
+ oidcpy(&ce->oid, &mi->result.oid);
+ memcpy(ce->name, GITATTRIBUTES_FILE, len);
+ add_index_entry(attr_index, ce,
+ ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
+ get_stream_filter(attr_index, GITATTRIBUTES_FILE, &ce->oid);
+ } else {
+ int stage, len;
+ struct conflict_info *ci;
+
+ ASSIGN_AND_VERIFY_CI(ci, mi);
+ for (stage = 0; stage < 3; stage++) {
+ unsigned stage_mask = (1 << stage);
+
+ if (!(ci->filemask & stage_mask))
+ continue;
+ len = strlen(GITATTRIBUTES_FILE);
+ ce = make_empty_cache_entry(attr_index, len);
+ ce->ce_mode = create_ce_mode(ci->stages[stage].mode);
+ ce->ce_flags = create_ce_flags(stage);
+ ce->ce_namelen = len;
+ oidcpy(&ce->oid, &ci->stages[stage].oid);
+ memcpy(ce->name, GITATTRIBUTES_FILE, len);
+ add_index_entry(attr_index, ce,
+ ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
+ get_stream_filter(attr_index, GITATTRIBUTES_FILE,
+ &ce->oid);
+ }
+ }
+}
+
static int merge_3way(struct merge_options *opt,
const char *path,
const struct object_id *o,
@@ -988,6 +1273,9 @@ static int merge_3way(struct merge_options *opt,
char *base, *name1, *name2;
int merge_status;
+ if (!opt->priv->attr_index.initialized)
+ initialize_attr_index(opt);
+
ll_opts.renormalize = opt->renormalize;
ll_opts.extra_marker_size = extra_marker_size;
ll_opts.xdl_opts = opt->xdl_opts;
@@ -1026,7 +1314,7 @@ static int merge_3way(struct merge_options *opt,
merge_status = ll_merge(result_buf, path, &orig, base,
&src1, name1, &src2, name2,
- opt->repo->index, &ll_opts);
+ &opt->priv->attr_index, &ll_opts);
free(base);
free(name1);
@@ -1302,131 +1590,6 @@ static char *handle_path_level_conflicts(struct merge_options *opt,
return new_path;
}
-static void dirname_munge(char *filename)
-{
- char *slash = strrchr(filename, '/');
- if (!slash)
- slash = filename;
- *slash = '\0';
-}
-
-static void increment_count(struct strmap *dir_rename_count,
- char *old_dir,
- char *new_dir)
-{
- struct strintmap *counts;
- struct strmap_entry *e;
-
- /* Get the {new_dirs -> counts} mapping using old_dir */
- e = strmap_get_entry(dir_rename_count, old_dir);
- if (e) {
- counts = e->value;
- } else {
- counts = xmalloc(sizeof(*counts));
- strintmap_init_with_options(counts, 0, NULL, 1);
- strmap_put(dir_rename_count, old_dir, counts);
- }
-
- /* Increment the count for new_dir */
- strintmap_incr(counts, new_dir, 1);
-}
-
-static void update_dir_rename_counts(struct strmap *dir_rename_count,
- struct strset *dirs_removed,
- const char *oldname,
- const char *newname)
-{
- char *old_dir = xstrdup(oldname);
- char *new_dir = xstrdup(newname);
- char new_dir_first_char = new_dir[0];
- int first_time_in_loop = 1;
-
- while (1) {
- dirname_munge(old_dir);
- dirname_munge(new_dir);
-
- /*
- * When renaming
- * "a/b/c/d/e/foo.c" -> "a/b/some/thing/else/e/foo.c"
- * then this suggests that both
- * a/b/c/d/e/ => a/b/some/thing/else/e/
- * a/b/c/d/ => a/b/some/thing/else/
- * so we want to increment counters for both. We do NOT,
- * however, also want to suggest that there was the following
- * rename:
- * a/b/c/ => a/b/some/thing/
- * so we need to quit at that point.
- *
- * Note the when first_time_in_loop, we only strip off the
- * basename, and we don't care if that's different.
- */
- if (!first_time_in_loop) {
- char *old_sub_dir = strchr(old_dir, '\0')+1;
- char *new_sub_dir = strchr(new_dir, '\0')+1;
- if (!*new_dir) {
- /*
- * Special case when renaming to root directory,
- * i.e. when new_dir == "". In this case, we had
- * something like
- * a/b/subdir => subdir
- * and so dirname_munge() sets things up so that
- * old_dir = "a/b\0subdir\0"
- * new_dir = "\0ubdir\0"
- * We didn't have a '/' to overwrite a '\0' onto
- * in new_dir, so we have to compare differently.
- */
- if (new_dir_first_char != old_sub_dir[0] ||
- strcmp(old_sub_dir+1, new_sub_dir))
- break;
- } else {
- if (strcmp(old_sub_dir, new_sub_dir))
- break;
- }
- }
-
- if (strset_contains(dirs_removed, old_dir))
- increment_count(dir_rename_count, old_dir, new_dir);
- else
- break;
-
- /* If we hit toplevel directory ("") for old or new dir, quit */
- if (!*old_dir || !*new_dir)
- break;
-
- first_time_in_loop = 0;
- }
-
- /* Free resources we don't need anymore */
- free(old_dir);
- free(new_dir);
-}
-
-static void compute_rename_counts(struct diff_queue_struct *pairs,
- struct strmap *dir_rename_count,
- struct strset *dirs_removed)
-{
- int i;
-
- for (i = 0; i < pairs->nr; ++i) {
- struct diff_filepair *pair = pairs->queue[i];
-
- /* File not part of directory rename if it wasn't renamed */
- if (pair->status != 'R')
- continue;
-
- /*
- * Make dir_rename_count contain a map of a map:
- * old_directory -> {new_directory -> count}
- * In other words, for every pair look at the directories for
- * the old filename and the new filename and count how many
- * times that pairing occurs.
- */
- update_dir_rename_counts(dir_rename_count, dirs_removed,
- pair->one->path,
- pair->two->path);
- }
-}
-
static void get_provisional_directory_renames(struct merge_options *opt,
unsigned side,
int *clean)
@@ -1435,9 +1598,6 @@ static void get_provisional_directory_renames(struct merge_options *opt,
struct strmap_entry *entry;
struct rename_info *renames = &opt->priv->renames;
- compute_rename_counts(&renames->pairs[side],
- &renames->dir_rename_count[side],
- &renames->dirs_removed[side]);
/*
* Collapse
* dir_rename_count: old_directory -> {new_directory -> count}
@@ -1466,6 +1626,9 @@ static void get_provisional_directory_renames(struct merge_options *opt,
}
}
+ if (max == 0)
+ continue;
+
if (bad_max == max) {
path_msg(opt, source_dir, 0,
_("CONFLICT (directory rename split): "
@@ -1474,18 +1637,7 @@ static void get_provisional_directory_renames(struct merge_options *opt,
"no destination getting a majority of the "
"files."),
source_dir);
- /*
- * We should mark this as unclean IF something attempts
- * to use this rename. We do not yet have the logic
- * in place to detect if this directory rename is being
- * used, and optimizations that reduce the number of
- * renames cause this to falsely trigger. For now,
- * just disable it, causing t6423 testcase 2a to break.
- * We'll later fix the detection, and when we do we
- * will re-enable setting *clean to 0 (and thereby fix
- * t6423 testcase 2a).
- */
- /* *clean = 0; */
+ *clean = 0;
} else {
strmap_put(&renames->dir_renames[side],
source_dir, (void*)best);
@@ -1576,8 +1728,7 @@ static void compute_collisions(struct strmap *collisions,
if (collision_info) {
free(new_path);
} else {
- collision_info = xcalloc(1,
- sizeof(struct collision_info));
+ CALLOC_ARRAY(collision_info, 1);
string_list_init(&collision_info->source_files, 0);
strmap_put(collisions, new_path, collision_info);
}
@@ -1718,7 +1869,7 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
struct conflict_info *dir_ci;
char *cur_dir = dirs_to_insert.items[i].string;
- dir_ci = xcalloc(1, sizeof(*dir_ci));
+ CALLOC_ARRAY(dir_ci, 1);
dir_ci->merged.directory_name = parent_name;
len = strlen(parent_name);
@@ -2112,6 +2263,19 @@ static int process_renames(struct merge_options *opt,
return clean_merge;
}
+static inline int possible_side_renames(struct rename_info *renames,
+ unsigned side_index)
+{
+ return renames->pairs[side_index].nr > 0 &&
+ !strintmap_empty(&renames->relevant_sources[side_index]);
+}
+
+static inline int possible_renames(struct rename_info *renames)
+{
+ return possible_side_renames(renames, 1) ||
+ possible_side_renames(renames, 2);
+}
+
static void resolve_diffpair_statuses(struct diff_queue_struct *q)
{
/*
@@ -2148,6 +2312,16 @@ static void detect_regular_renames(struct merge_options *opt,
struct diff_options diff_opts;
struct rename_info *renames = &opt->priv->renames;
+ if (!possible_side_renames(renames, side_index)) {
+ /*
+ * No rename detection needed for this side, but we still need
+ * to make sure 'adds' are marked correctly in case the other
+ * side had directory renames.
+ */
+ resolve_diffpair_statuses(&renames->pairs[side_index]);
+ return;
+ }
+
repo_diff_setup(opt->repo, &diff_opts);
diff_opts.flags.recursive = 1;
diff_opts.flags.rename_empty = 0;
@@ -2162,7 +2336,10 @@ static void detect_regular_renames(struct merge_options *opt,
diff_queued_diff = renames->pairs[side_index];
trace2_region_enter("diff", "diffcore_rename", opt->repo);
- diffcore_rename(&diff_opts);
+ diffcore_rename_extended(&diff_opts,
+ &renames->relevant_sources[side_index],
+ &renames->dirs_removed[side_index],
+ &renames->dir_rename_count[side_index]);
trace2_region_leave("diff", "diffcore_rename", opt->repo);
resolve_diffpair_statuses(&diff_queued_diff);
@@ -2262,6 +2439,8 @@ static int detect_and_process_renames(struct merge_options *opt,
int need_dir_renames, s, clean = 1;
memset(&combined, 0, sizeof(combined));
+ if (!possible_renames(renames))
+ goto cleanup;
trace2_region_enter("merge", "regular renames", opt->repo);
detect_regular_renames(opt, MERGE_SIDE1);
@@ -2289,13 +2468,32 @@ static int detect_and_process_renames(struct merge_options *opt,
clean &= collect_renames(opt, &combined, MERGE_SIDE2,
&renames->dir_renames[1],
&renames->dir_renames[2]);
- QSORT(combined.queue, combined.nr, compare_pairs);
+ STABLE_QSORT(combined.queue, combined.nr, compare_pairs);
trace2_region_leave("merge", "directory renames", opt->repo);
trace2_region_enter("merge", "process renames", opt->repo);
clean &= process_renames(opt, &combined);
trace2_region_leave("merge", "process renames", opt->repo);
+ goto simple_cleanup; /* collect_renames() handles some of cleanup */
+
+cleanup:
+ /*
+ * Free now unneeded filepairs, which would have been handled
+ * in collect_renames() normally but we skipped that code.
+ */
+ for (s = MERGE_SIDE1; s <= MERGE_SIDE2; s++) {
+ struct diff_queue_struct *side_pairs;
+ int i;
+
+ side_pairs = &renames->pairs[s];
+ for (i = 0; i < side_pairs->nr; ++i) {
+ struct diff_filepair *p = side_pairs->queue[i];
+ diff_free_filepair(p);
+ }
+ }
+
+simple_cleanup:
/* Free memory for renames->pairs[] and combined */
for (s = MERGE_SIDE1; s <= MERGE_SIDE2; s++) {
free(renames->pairs[s].queue);
@@ -2340,6 +2538,61 @@ static int string_list_df_name_compare(const char *one, const char *two)
return onelen - twolen;
}
+static int read_oid_strbuf(struct merge_options *opt,
+ const struct object_id *oid,
+ struct strbuf *dst)
+{
+ void *buf;
+ enum object_type type;
+ unsigned long size;
+ buf = read_object_file(oid, &type, &size);
+ if (!buf)
+ return err(opt, _("cannot read object %s"), oid_to_hex(oid));
+ if (type != OBJ_BLOB) {
+ free(buf);
+ return err(opt, _("object %s is not a blob"), oid_to_hex(oid));
+ }
+ strbuf_attach(dst, buf, size, size + 1);
+ return 0;
+}
+
+static int blob_unchanged(struct merge_options *opt,
+ const struct version_info *base,
+ const struct version_info *side,
+ const char *path)
+{
+ struct strbuf basebuf = STRBUF_INIT;
+ struct strbuf sidebuf = STRBUF_INIT;
+ int ret = 0; /* assume changed for safety */
+ struct index_state *idx = &opt->priv->attr_index;
+
+ if (!idx->initialized)
+ initialize_attr_index(opt);
+
+ if (base->mode != side->mode)
+ return 0;
+ if (oideq(&base->oid, &side->oid))
+ return 1;
+
+ if (read_oid_strbuf(opt, &base->oid, &basebuf) ||
+ read_oid_strbuf(opt, &side->oid, &sidebuf))
+ goto error_return;
+ /*
+ * Note: binary | is used so that both renormalizations are
+ * performed. Comparison can be skipped if both files are
+ * unchanged since their sha1s have already been compared.
+ */
+ if (renormalize_buffer(idx, path, basebuf.buf, basebuf.len, &basebuf) |
+ renormalize_buffer(idx, path, sidebuf.buf, sidebuf.len, &sidebuf))
+ ret = (basebuf.len == sidebuf.len &&
+ !memcmp(basebuf.buf, sidebuf.buf, basebuf.len));
+
+error_return:
+ strbuf_release(&basebuf);
+ strbuf_release(&sidebuf);
+ return ret;
+}
+
struct directory_versions {
/*
* versions: list of (basename -> version_info)
@@ -2400,22 +2653,15 @@ static void write_tree(struct object_id *result_oid,
size_t hash_size)
{
size_t maxlen = 0, extra;
- unsigned int nr = versions->nr - offset;
+ unsigned int nr;
struct strbuf buf = STRBUF_INIT;
- struct string_list relevant_entries = STRING_LIST_INIT_NODUP;
int i;
- /*
- * We want to sort the last (versions->nr-offset) entries in versions.
- * Do so by abusing the string_list API a bit: make another string_list
- * that contains just those entries and then sort them.
- *
- * We won't use relevant_entries again and will let it just pop off the
- * stack, so there won't be allocation worries or anything.
- */
- relevant_entries.items = versions->items + offset;
- relevant_entries.nr = versions->nr - offset;
- QSORT(relevant_entries.items, relevant_entries.nr, tree_entry_order);
+ assert(offset <= versions->nr);
+ nr = versions->nr - offset;
+ if (versions->nr)
+ /* No need for STABLE_QSORT -- filenames must be unique */
+ QSORT(versions->items + offset, nr, tree_entry_order);
/* Pre-allocate some space in buf */
extra = hash_size + 8; /* 8: 6 for mode, 1 for space, 1 for NUL char */
@@ -2703,7 +2949,7 @@ static void process_entry(struct merge_options *opt,
* the directory to remain here, so we need to move this
* path to some new location.
*/
- new_ci = xcalloc(1, sizeof(*new_ci));
+ CALLOC_ARRAY(new_ci, 1);
/* We don't really want new_ci->merged.result copied, but it'll
* be overwritten below so it doesn't matter. We also don't
* want any directory mode/oid values copied, but we'll zero
@@ -2926,8 +3172,13 @@ static void process_entry(struct merge_options *opt,
modify_branch = (side == 1) ? opt->branch1 : opt->branch2;
delete_branch = (side == 1) ? opt->branch2 : opt->branch1;
- if (ci->path_conflict &&
- oideq(&ci->stages[0].oid, &ci->stages[side].oid)) {
+ if (opt->renormalize &&
+ blob_unchanged(opt, &ci->stages[0], &ci->stages[side],
+ path)) {
+ ci->merged.is_null = 1;
+ ci->merged.clean = 1;
+ } else if (ci->path_conflict &&
+ oideq(&ci->stages[0].oid, &ci->stages[side].oid)) {
/*
* This came from a rename/delete; no action to take,
* but avoid printing "modify/delete" conflict notice
@@ -3083,7 +3334,7 @@ static int checkout(struct merge_options *opt,
unpack_opts.verbose_update = (opt->verbosity > 2);
unpack_opts.fn = twoway_merge;
if (1/* FIXME: opts->overwrite_ignore*/) {
- unpack_opts.dir = xcalloc(1, sizeof(*unpack_opts.dir));
+ CALLOC_ARRAY(unpack_opts.dir, 1);
unpack_opts.dir->flags |= DIR_SHOW_IGNORED;
setup_standard_excludes(unpack_opts.dir);
}
@@ -3099,23 +3350,27 @@ static int checkout(struct merge_options *opt,
return ret;
}
-static int record_conflicted_index_entries(struct merge_options *opt,
- struct index_state *index,
- struct strmap *paths,
- struct strmap *conflicted)
+static int record_conflicted_index_entries(struct merge_options *opt)
{
struct hashmap_iter iter;
struct strmap_entry *e;
+ struct index_state *index = opt->repo->index;
+ struct checkout state = CHECKOUT_INIT;
int errs = 0;
int original_cache_nr;
- if (strmap_empty(conflicted))
+ if (strmap_empty(&opt->priv->conflicted))
return 0;
+ /* If any entries have skip_worktree set, we'll have to check 'em out */
+ state.force = 1;
+ state.quiet = 1;
+ state.refresh_cache = 1;
+ state.istate = index;
original_cache_nr = index->cache_nr;
/* Put every entry from paths into plist, then sort */
- strmap_for_each_entry(conflicted, &iter, e) {
+ strmap_for_each_entry(&opt->priv->conflicted, &iter, e) {
const char *path = e->key;
struct conflict_info *ci = e->value;
int pos;
@@ -3156,9 +3411,23 @@ static int record_conflicted_index_entries(struct merge_options *opt,
* the higher order stages. Thus, we need override
* the CE_SKIP_WORKTREE bit and manually write those
* files to the working disk here.
- *
- * TODO: Implement this CE_SKIP_WORKTREE fixup.
*/
+ if (ce_skip_worktree(ce)) {
+ struct stat st;
+
+ if (!lstat(path, &st)) {
+ char *new_name = unique_path(&opt->priv->paths,
+ path,
+ "cruft");
+
+ path_msg(opt, path, 1,
+ _("Note: %s not up to date and in way of checking out conflicted version; old copy renamed to %s"),
+ path, new_name);
+ errs |= rename(path, new_name);
+ free(new_name);
+ }
+ errs |= checkout_entry(ce, &state, NULL, NULL);
+ }
/*
* Mark this cache entry for removal and instead add
@@ -3190,6 +3459,11 @@ static int record_conflicted_index_entries(struct merge_options *opt,
* entries we added to the end into their right locations.
*/
remove_marked_cache_entries(index, 1);
+ /*
+ * No need for STABLE_QSORT -- cmp_cache_name_compare sorts primarily
+ * on filename and secondarily on stage, and (name, stage #) are a
+ * unique tuple.
+ */
QSORT(index->cache, index->cache_nr, cmp_cache_name_compare);
return errs;
@@ -3203,7 +3477,8 @@ void merge_switch_to_result(struct merge_options *opt,
{
assert(opt->priv == NULL);
if (result->clean >= 0 && update_worktree_and_index) {
- struct merge_options_internal *opti = result->priv;
+ const char *filename;
+ FILE *fp;
trace2_region_enter("merge", "checkout", opt->repo);
if (checkout(opt, head, result->tree)) {
@@ -3214,14 +3489,22 @@ void merge_switch_to_result(struct merge_options *opt,
trace2_region_leave("merge", "checkout", opt->repo);
trace2_region_enter("merge", "record_conflicted", opt->repo);
- if (record_conflicted_index_entries(opt, opt->repo->index,
- &opti->paths,
- &opti->conflicted)) {
+ opt->priv = result->priv;
+ if (record_conflicted_index_entries(opt)) {
/* failure to function */
+ opt->priv = NULL;
result->clean = -1;
return;
}
+ opt->priv = NULL;
trace2_region_leave("merge", "record_conflicted", opt->repo);
+
+ trace2_region_enter("merge", "write_auto_merge", opt->repo);
+ filename = git_path_auto_merge(opt->repo);
+ fp = xfopen(filename, "w");
+ fprintf(fp, "%s\n", oid_to_hex(&result->tree->object.oid));
+ fclose(fp);
+ trace2_region_leave("merge", "write_auto_merge", opt->repo);
}
if (display_update_msgs) {
@@ -3266,6 +3549,8 @@ void merge_finalize(struct merge_options *opt,
{
struct merge_options_internal *opti = result->priv;
+ if (opt->renormalize)
+ git_attr_set_direction(GIT_ATTR_CHECKIN);
assert(opt->priv == NULL);
clear_or_reinit_internal_opts(opti, 0);
@@ -3274,6 +3559,23 @@ void merge_finalize(struct merge_options *opt,
/*** Function Grouping: helper functions for merge_incore_*() ***/
+static struct tree *shift_tree_object(struct repository *repo,
+ struct tree *one, struct tree *two,
+ const char *subtree_shift)
+{
+ struct object_id shifted;
+
+ if (!*subtree_shift) {
+ shift_tree(repo, &one->object.oid, &two->object.oid, &shifted, 0);
+ } else {
+ shift_tree_by(repo, &one->object.oid, &two->object.oid, &shifted,
+ subtree_shift);
+ }
+ if (oideq(&two->object.oid, &shifted))
+ return two;
+ return lookup_tree(repo, &shifted);
+}
+
static inline void set_commit_tree(struct commit *c, struct tree *t)
{
c->maybe_tree = t;
@@ -3341,6 +3643,10 @@ static void merge_start(struct merge_options *opt, struct merge_result *result)
/* Default to histogram diff. Actually, just hardcode it...for now. */
opt->xdl_opts = DIFF_WITH_ALG(opt, HISTOGRAM_DIFF);
+ /* Handle attr direction stuff for renormalization */
+ if (opt->renormalize)
+ git_attr_set_direction(GIT_ATTR_CHECKOUT);
+
/* Initialization of opt->priv, our internal merge data */
trace2_region_enter("merge", "allocate/init", opt->repo);
if (opt->priv) {
@@ -3353,12 +3659,14 @@ static void merge_start(struct merge_options *opt, struct merge_result *result)
/* Initialization of various renames fields */
renames = &opt->priv->renames;
for (i = MERGE_SIDE1; i <= MERGE_SIDE2; i++) {
- strset_init_with_options(&renames->dirs_removed[i],
- NULL, 0);
+ strintmap_init_with_options(&renames->dirs_removed[i],
+ NOT_RELEVANT, NULL, 0);
strmap_init_with_options(&renames->dir_rename_count[i],
NULL, 1);
strmap_init_with_options(&renames->dir_renames[i],
NULL, 0);
+ strintmap_init_with_options(&renames->relevant_sources[i],
+ 0, NULL, 0);
}
/*
@@ -3397,6 +3705,13 @@ static void merge_ort_nonrecursive_internal(struct merge_options *opt,
{
struct object_id working_tree_oid;
+ if (opt->subtree_shift) {
+ side2 = shift_tree_object(opt->repo, side1, side2,
+ opt->subtree_shift);
+ merge_base = shift_tree_object(opt->repo, side1, merge_base,
+ opt->subtree_shift);
+ }
+
trace2_region_enter("merge", "collect_merge_info", opt->repo);
if (collect_merge_info(opt, merge_base, side1, side2) != 0) {
/*
diff --git a/merge-recursive.c b/merge-recursive.c
index 91d8597..27b222a 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -303,7 +303,7 @@ static inline void setup_rename_conflict_info(enum rename_type rename_type,
return;
}
- ci = xcalloc(1, sizeof(struct rename_conflict_info));
+ CALLOC_ARRAY(ci, 1);
ci->rename_type = rename_type;
ci->ren1 = ren1;
ci->ren2 = ren2;
@@ -1077,6 +1077,11 @@ static int merge_3way(struct merge_options *opt,
read_mmblob(&src1, &a->oid);
read_mmblob(&src2, &b->oid);
+ /*
+ * FIXME: Using a->path for normalization rules in ll_merge could be
+ * wrong if we renamed from a->path to b->path. We should use the
+ * target path for where the file will be written.
+ */
merge_status = ll_merge(result_buf, a->path, &orig, base,
&src1, name1, &src2, name2,
opt->repo->index, &ll_opts);
@@ -1156,6 +1161,8 @@ static void print_commit(struct commit *commit)
struct strbuf sb = STRBUF_INIT;
struct pretty_print_context ctx = {0};
ctx.date_mode.type = DATE_NORMAL;
+ /* FIXME: Merge this with output_commit_title() */
+ assert(!merge_remote_util(commit));
format_commit_message(commit, " %h: %m %s", &sb, &ctx);
fprintf(stderr, "%s\n", sb.buf);
strbuf_release(&sb);
@@ -1179,6 +1186,11 @@ static int merge_submodule(struct merge_options *opt,
int search = !opt->priv->call_depth;
/* store a in result in case we fail */
+ /* FIXME: This is the WRONG resolution for the recursive case when
+ * we need to be careful to avoid accidentally matching either side.
+ * Should probably use o instead there, much like we do for merging
+ * binaries.
+ */
oidcpy(result, a);
/* we can not handle deletion conflicts */
@@ -1303,6 +1315,13 @@ static int merge_mode_and_contents(struct merge_options *opt,
if ((S_IFMT & a->mode) != (S_IFMT & b->mode)) {
result->clean = 0;
+ /*
+ * FIXME: This is a bad resolution for recursive case; for
+ * the recursive case we want something that is unlikely to
+ * accidentally match either side. Also, while it makes
+ * sense to prefer regular files over symlinks, it doesn't
+ * make sense to prefer regular files over submodules.
+ */
if (S_ISREG(a->mode)) {
result->blob.mode = a->mode;
oidcpy(&result->blob.oid, &a->oid);
@@ -1351,6 +1370,7 @@ static int merge_mode_and_contents(struct merge_options *opt,
free(result_buf.ptr);
if (ret)
return ret;
+ /* FIXME: bug, what if modes didn't match? */
result->clean = (merge_status == 0);
} else if (S_ISGITLINK(a->mode)) {
result->clean = merge_submodule(opt, &result->blob.oid,
@@ -2391,8 +2411,7 @@ static void compute_collisions(struct hashmap *collisions,
continue;
collision_ent = collision_find_entry(collisions, new_path);
if (!collision_ent) {
- collision_ent = xcalloc(1,
- sizeof(struct collision_entry));
+ CALLOC_ARRAY(collision_ent, 1);
hashmap_entry_init(&collision_ent->ent,
strhash(new_path));
hashmap_put(collisions, &collision_ent->ent);
@@ -2596,7 +2615,7 @@ static struct string_list *get_renames(struct merge_options *opt,
struct string_list *renames;
compute_collisions(&collisions, dir_renames, pairs);
- renames = xcalloc(1, sizeof(struct string_list));
+ CALLOC_ARRAY(renames, 1);
for (i = 0; i < pairs->nr; ++i) {
struct string_list_item *item;
@@ -2666,6 +2685,14 @@ static int process_renames(struct merge_options *opt,
struct string_list b_by_dst = STRING_LIST_INIT_NODUP;
const struct rename *sre;
+ /*
+ * FIXME: As string-list.h notes, it's O(n^2) to build a sorted
+ * string_list one-by-one, but O(n log n) to build it unsorted and
+ * then sort it. Note that as we build the list, we do not need to
+ * check if the existing destination path is already in the list,
+ * because the structure of diffcore_rename guarantees we won't
+ * have duplicates.
+ */
for (i = 0; i < a_renames->nr; i++) {
sre = a_renames->items[i].util;
string_list_insert(&a_by_dst, sre->pair->two->path)->util
@@ -3604,6 +3631,15 @@ static int merge_recursive_internal(struct merge_options *opt,
return err(opt, _("merge returned no commit"));
}
+ /*
+ * FIXME: Since merge_recursive_internal() is only ever called by
+ * places that ensure the index is loaded first
+ * (e.g. builtin/merge.c, rebase/sequencer, etc.), in the common
+ * case where the merge base was unique that means when we get here
+ * we immediately discard the index and re-read it, which is a
+ * complete waste of time. We should only be discarding and
+ * re-reading if we were forced to recurse.
+ */
discard_index(opt->repo->index);
if (!opt->priv->call_depth)
repo_read_index(opt->repo);
@@ -3666,7 +3702,7 @@ static int merge_start(struct merge_options *opt, struct tree *head)
return -1;
}
- opt->priv = xcalloc(1, sizeof(*opt->priv));
+ CALLOC_ARRAY(opt->priv, 1);
string_list_init(&opt->priv->df_conflict_file_set, 1);
return 0;
}
diff --git a/midx.c b/midx.c
index 971faa8..9e86583 100644
--- a/midx.c
+++ b/midx.c
@@ -12,6 +12,7 @@
#include "run-command.h"
#include "repository.h"
#include "chunk-format.h"
+#include "pack.h"
#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
#define MIDX_VERSION 1
@@ -47,11 +48,22 @@ static uint8_t oid_version(void)
}
}
+static const unsigned char *get_midx_checksum(struct multi_pack_index *m)
+{
+ return m->data + m->data_len - the_hash_algo->rawsz;
+}
+
static char *get_midx_filename(const char *object_dir)
{
return xstrfmt("%s/pack/multi-pack-index", object_dir);
}
+char *get_midx_rev_filename(struct multi_pack_index *m)
+{
+ return xstrfmt("%s/pack/multi-pack-index-%s.rev",
+ m->object_dir, hash_to_hex(get_midx_checksum(m)));
+}
+
static int midx_read_oid_fanout(const unsigned char *chunk_start,
size_t chunk_size, void *data)
{
@@ -145,8 +157,8 @@ struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local
m->num_objects = ntohl(m->chunk_oid_fanout[255]);
- m->pack_names = xcalloc(m->num_packs, sizeof(*m->pack_names));
- m->packs = xcalloc(m->num_packs, sizeof(*m->packs));
+ CALLOC_ARRAY(m->pack_names, m->num_packs);
+ CALLOC_ARRAY(m->packs, m->num_packs);
cur_pack_name = (const char *)m->chunk_pack_names;
for (i = 0; i < m->num_packs; i++) {
@@ -239,7 +251,7 @@ struct object_id *nth_midxed_object_oid(struct object_id *oid,
return oid;
}
-static off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
+off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
{
const unsigned char *offset_data;
uint32_t offset32;
@@ -258,7 +270,7 @@ static off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
return offset32;
}
-static uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
+uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
{
return get_be32(m->chunk_object_offsets +
(off_t)pos * MIDX_CHUNK_OFFSET_WIDTH);
@@ -431,6 +443,14 @@ static int pack_info_compare(const void *_a, const void *_b)
return strcmp(a->pack_name, b->pack_name);
}
+static int idx_or_pack_name_cmp(const void *_va, const void *_vb)
+{
+ const char *pack_name = _va;
+ const struct pack_info *compar = _vb;
+
+ return cmp_idx_or_pack_name(pack_name, compar->pack_name);
+}
+
struct write_midx_context {
struct pack_info *info;
uint32_t nr;
@@ -443,8 +463,11 @@ struct write_midx_context {
uint32_t entries_nr;
uint32_t *pack_perm;
+ uint32_t *pack_order;
unsigned large_offsets_needed:1;
uint32_t num_large_offsets;
+
+ int preferred_pack_idx;
};
static void add_pack_to_midx(const char *full_path, size_t full_path_len,
@@ -489,6 +512,7 @@ struct pack_midx_entry {
uint32_t pack_int_id;
time_t pack_mtime;
uint64_t offset;
+ unsigned preferred : 1;
};
static int midx_oid_compare(const void *_a, const void *_b)
@@ -500,6 +524,12 @@ static int midx_oid_compare(const void *_a, const void *_b)
if (cmp)
return cmp;
+ /* Sort objects in a preferred pack first when multiple copies exist. */
+ if (a->preferred > b->preferred)
+ return -1;
+ if (a->preferred < b->preferred)
+ return 1;
+
if (a->pack_mtime > b->pack_mtime)
return -1;
else if (a->pack_mtime < b->pack_mtime)
@@ -527,7 +557,8 @@ static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
static void fill_pack_entry(uint32_t pack_int_id,
struct packed_git *p,
uint32_t cur_object,
- struct pack_midx_entry *entry)
+ struct pack_midx_entry *entry,
+ int preferred)
{
if (nth_packed_object_id(&entry->oid, p, cur_object) < 0)
die(_("failed to locate object %d in packfile"), cur_object);
@@ -536,6 +567,7 @@ static void fill_pack_entry(uint32_t pack_int_id,
entry->pack_mtime = p->mtime;
entry->offset = nth_packed_object_offset(p, cur_object);
+ entry->preferred = !!preferred;
}
/*
@@ -552,7 +584,8 @@ static void fill_pack_entry(uint32_t pack_int_id,
static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
struct pack_info *info,
uint32_t nr_packs,
- uint32_t *nr_objects)
+ uint32_t *nr_objects,
+ int preferred_pack)
{
uint32_t cur_fanout, cur_pack, cur_object;
uint32_t alloc_fanout, alloc_objects, total_objects = 0;
@@ -589,12 +622,17 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
nth_midxed_pack_midx_entry(m,
&entries_by_fanout[nr_fanout],
cur_object);
+ if (nth_midxed_pack_int_id(m, cur_object) == preferred_pack)
+ entries_by_fanout[nr_fanout].preferred = 1;
+ else
+ entries_by_fanout[nr_fanout].preferred = 0;
nr_fanout++;
}
}
for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
uint32_t start = 0, end;
+ int preferred = cur_pack == preferred_pack;
if (cur_fanout)
start = get_pack_fanout(info[cur_pack].p, cur_fanout - 1);
@@ -602,7 +640,11 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
for (cur_object = start; cur_object < end; cur_object++) {
ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
- fill_pack_entry(cur_pack, info[cur_pack].p, cur_object, &entries_by_fanout[nr_fanout]);
+ fill_pack_entry(cur_pack,
+ info[cur_pack].p,
+ cur_object,
+ &entries_by_fanout[nr_fanout],
+ preferred);
nr_fanout++;
}
}
@@ -776,10 +818,80 @@ static int write_midx_large_offsets(struct hashfile *f,
return 0;
}
+struct midx_pack_order_data {
+ uint32_t nr;
+ uint32_t pack;
+ off_t offset;
+};
+
+static int midx_pack_order_cmp(const void *va, const void *vb)
+{
+ const struct midx_pack_order_data *a = va, *b = vb;
+ if (a->pack < b->pack)
+ return -1;
+ else if (a->pack > b->pack)
+ return 1;
+ else if (a->offset < b->offset)
+ return -1;
+ else if (a->offset > b->offset)
+ return 1;
+ else
+ return 0;
+}
+
+static uint32_t *midx_pack_order(struct write_midx_context *ctx)
+{
+ struct midx_pack_order_data *data;
+ uint32_t *pack_order;
+ uint32_t i;
+
+ ALLOC_ARRAY(data, ctx->entries_nr);
+ for (i = 0; i < ctx->entries_nr; i++) {
+ struct pack_midx_entry *e = &ctx->entries[i];
+ data[i].nr = i;
+ data[i].pack = ctx->pack_perm[e->pack_int_id];
+ if (!e->preferred)
+ data[i].pack |= (1U << 31);
+ data[i].offset = e->offset;
+ }
+
+ QSORT(data, ctx->entries_nr, midx_pack_order_cmp);
+
+ ALLOC_ARRAY(pack_order, ctx->entries_nr);
+ for (i = 0; i < ctx->entries_nr; i++)
+ pack_order[i] = data[i].nr;
+ free(data);
+
+ return pack_order;
+}
+
+static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
+ struct write_midx_context *ctx)
+{
+ struct strbuf buf = STRBUF_INIT;
+ const char *tmp_file;
+
+ strbuf_addf(&buf, "%s-%s.rev", midx_name, hash_to_hex(midx_hash));
+
+ tmp_file = write_rev_file_order(NULL, ctx->pack_order, ctx->entries_nr,
+ midx_hash, WRITE_REV);
+
+ if (finalize_object_file(tmp_file, buf.buf))
+ die(_("cannot store reverse index file"));
+
+ strbuf_release(&buf);
+}
+
+static void clear_midx_files_ext(struct repository *r, const char *ext,
+ unsigned char *keep_hash);
+
static int write_midx_internal(const char *object_dir, struct multi_pack_index *m,
- struct string_list *packs_to_drop, unsigned flags)
+ struct string_list *packs_to_drop,
+ const char *preferred_pack_name,
+ unsigned flags)
{
char *midx_name;
+ unsigned char midx_hash[GIT_MAX_RAWSZ];
uint32_t i;
struct hashfile *f = NULL;
struct lock_file lk;
@@ -828,7 +940,19 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
if (ctx.m && ctx.nr == ctx.m->num_packs && !packs_to_drop)
goto cleanup;
- ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr);
+ ctx.preferred_pack_idx = -1;
+ if (preferred_pack_name) {
+ for (i = 0; i < ctx.nr; i++) {
+ if (!cmp_idx_or_pack_name(preferred_pack_name,
+ ctx.info[i].pack_name)) {
+ ctx.preferred_pack_idx = i;
+ break;
+ }
+ }
+ }
+
+ ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr,
+ ctx.preferred_pack_idx);
ctx.large_offsets_needed = 0;
for (i = 0; i < ctx.entries_nr; i++) {
@@ -889,13 +1013,30 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
pack_name_concat_len += strlen(ctx.info[i].pack_name) + 1;
}
+ /* Check that the preferred pack wasn't expired (if given). */
+ if (preferred_pack_name) {
+ struct pack_info *preferred = bsearch(preferred_pack_name,
+ ctx.info, ctx.nr,
+ sizeof(*ctx.info),
+ idx_or_pack_name_cmp);
+
+ if (!preferred)
+ warning(_("unknown preferred pack: '%s'"),
+ preferred_pack_name);
+ else {
+ uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id];
+ if (perm == PACK_EXPIRED)
+ warning(_("preferred pack '%s' is expired"),
+ preferred_pack_name);
+ }
+ }
+
if (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
(pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR);
f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
- FREE_AND_NULL(midx_name);
if (ctx.m)
close_midx(ctx.m);
@@ -927,8 +1068,16 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
write_chunkfile(cf, &ctx);
- finalize_hashfile(f, NULL, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
+ finalize_hashfile(f, midx_hash, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
free_chunkfile(cf);
+
+ if (flags & MIDX_WRITE_REV_INDEX)
+ ctx.pack_order = midx_pack_order(&ctx);
+
+ if (flags & MIDX_WRITE_REV_INDEX)
+ write_midx_reverse_index(midx_name, midx_hash, &ctx);
+ clear_midx_files_ext(the_repository, ".rev", midx_hash);
+
commit_lock_file(&lk);
cleanup:
@@ -943,13 +1092,55 @@ cleanup:
free(ctx.info);
free(ctx.entries);
free(ctx.pack_perm);
+ free(ctx.pack_order);
free(midx_name);
return result;
}
-int write_midx_file(const char *object_dir, unsigned flags)
+int write_midx_file(const char *object_dir,
+ const char *preferred_pack_name,
+ unsigned flags)
{
- return write_midx_internal(object_dir, NULL, NULL, flags);
+ return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name,
+ flags);
+}
+
+struct clear_midx_data {
+ char *keep;
+ const char *ext;
+};
+
+static void clear_midx_file_ext(const char *full_path, size_t full_path_len,
+ const char *file_name, void *_data)
+{
+ struct clear_midx_data *data = _data;
+
+ if (!(starts_with(file_name, "multi-pack-index-") &&
+ ends_with(file_name, data->ext)))
+ return;
+ if (data->keep && !strcmp(data->keep, file_name))
+ return;
+
+ if (unlink(full_path))
+ die_errno(_("failed to remove %s"), full_path);
+}
+
+static void clear_midx_files_ext(struct repository *r, const char *ext,
+ unsigned char *keep_hash)
+{
+ struct clear_midx_data data;
+ memset(&data, 0, sizeof(struct clear_midx_data));
+
+ if (keep_hash)
+ data.keep = xstrfmt("multi-pack-index-%s%s",
+ hash_to_hex(keep_hash), ext);
+ data.ext = ext;
+
+ for_each_file_in_pack_dir(r->objects->odb->path,
+ clear_midx_file_ext,
+ &data);
+
+ free(data.keep);
}
void clear_midx_file(struct repository *r)
@@ -964,6 +1155,8 @@ void clear_midx_file(struct repository *r)
if (remove_path(midx))
die(_("failed to clear multi-pack-index at %s"), midx);
+ clear_midx_files_ext(r, ".rev", NULL);
+
free(midx);
}
@@ -1144,7 +1337,7 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla
if (!m)
return 0;
- count = xcalloc(m->num_packs, sizeof(uint32_t));
+ CALLOC_ARRAY(count, m->num_packs);
if (flags & MIDX_PROGRESS)
progress = start_delayed_progress(_("Counting referenced objects"),
@@ -1184,7 +1377,7 @@ int expire_midx_packs(struct repository *r, const char *object_dir, unsigned fla
free(count);
if (packs_to_drop.nr)
- result = write_midx_internal(object_dir, m, &packs_to_drop, flags);
+ result = write_midx_internal(object_dir, m, &packs_to_drop, NULL, flags);
string_list_clear(&packs_to_drop, 0);
return result;
@@ -1315,7 +1508,7 @@ int midx_repack(struct repository *r, const char *object_dir, size_t batch_size,
if (!m)
return 0;
- include_pack = xcalloc(m->num_packs, sizeof(unsigned char));
+ CALLOC_ARRAY(include_pack, m->num_packs);
if (batch_size) {
if (fill_included_packs_batch(r, m, include_pack, batch_size))
@@ -1373,7 +1566,7 @@ int midx_repack(struct repository *r, const char *object_dir, size_t batch_size,
goto cleanup;
}
- result = write_midx_internal(object_dir, m, NULL, flags);
+ result = write_midx_internal(object_dir, m, NULL, NULL, flags);
m = NULL;
cleanup:
diff --git a/midx.h b/midx.h
index b18cf53..8684cf0 100644
--- a/midx.h
+++ b/midx.h
@@ -15,6 +15,10 @@ struct multi_pack_index {
const unsigned char *data;
size_t data_len;
+ const uint32_t *revindex_data;
+ const uint32_t *revindex_map;
+ size_t revindex_len;
+
uint32_t signature;
unsigned char version;
unsigned char hash_len;
@@ -36,10 +40,15 @@ struct multi_pack_index {
};
#define MIDX_PROGRESS (1 << 0)
+#define MIDX_WRITE_REV_INDEX (1 << 1)
+
+char *get_midx_rev_filename(struct multi_pack_index *m);
struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local);
int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t pack_int_id);
int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result);
+off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos);
+uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos);
struct object_id *nth_midxed_object_oid(struct object_id *oid,
struct multi_pack_index *m,
uint32_t n);
@@ -47,7 +56,7 @@ int fill_midx_entry(struct repository *r, const struct object_id *oid, struct pa
int midx_contains_pack(struct multi_pack_index *m, const char *idx_or_pack_name);
int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local);
-int write_midx_file(const char *object_dir, unsigned flags);
+int write_midx_file(const char *object_dir, const char *preferred_pack_name, unsigned flags);
void clear_midx_file(struct repository *r);
int verify_midx_file(struct repository *r, const char *object_dir, unsigned flags);
int expire_midx_packs(struct repository *r, const char *object_dir, unsigned flags);
diff --git a/name-hash.c b/name-hash.c
index 383cf58..7487d33 100644
--- a/name-hash.c
+++ b/name-hash.c
@@ -229,7 +229,7 @@ static void init_dir_mutex(void)
{
int j;
- lazy_dir_mutex_array = xcalloc(LAZY_MAX_MUTEX, sizeof(pthread_mutex_t));
+ CALLOC_ARRAY(lazy_dir_mutex_array, LAZY_MAX_MUTEX);
for (j = 0; j < LAZY_MAX_MUTEX; j++)
init_recursive_mutex(&lazy_dir_mutex_array[j]);
@@ -518,9 +518,9 @@ static void threaded_lazy_init_name_hash(
k_start = 0;
nr_each = DIV_ROUND_UP(istate->cache_nr, lazy_nr_dir_threads);
- lazy_entries = xcalloc(istate->cache_nr, sizeof(struct lazy_entry));
- td_dir = xcalloc(lazy_nr_dir_threads, sizeof(struct lazy_dir_thread_data));
- td_name = xcalloc(1, sizeof(struct lazy_name_thread_data));
+ CALLOC_ARRAY(lazy_entries, istate->cache_nr);
+ CALLOC_ARRAY(td_dir, lazy_nr_dir_threads);
+ CALLOC_ARRAY(td_name, 1);
init_dir_mutex();
diff --git a/negotiator/default.c b/negotiator/default.c
index 4b78f6b..434189a 100644
--- a/negotiator/default.c
+++ b/negotiator/default.c
@@ -167,7 +167,7 @@ void default_negotiator_init(struct fetch_negotiator *negotiator)
negotiator->next = next;
negotiator->ack = ack;
negotiator->release = release;
- negotiator->data = ns = xcalloc(1, sizeof(*ns));
+ negotiator->data = CALLOC_ARRAY(ns, 1);
ns->rev_list.compare = compare_commits_by_commit_date;
if (marked)
diff --git a/negotiator/skipping.c b/negotiator/skipping.c
index dffbc76..1236e79 100644
--- a/negotiator/skipping.c
+++ b/negotiator/skipping.c
@@ -62,7 +62,7 @@ static struct entry *rev_list_push(struct data *data, struct commit *commit, int
struct entry *entry;
commit->object.flags |= mark | SEEN;
- entry = xcalloc(1, sizeof(*entry));
+ CALLOC_ARRAY(entry, 1);
entry->commit = commit;
prio_queue_put(&data->rev_list, entry);
@@ -241,7 +241,7 @@ void skipping_negotiator_init(struct fetch_negotiator *negotiator)
negotiator->next = next;
negotiator->ack = ack;
negotiator->release = release;
- negotiator->data = data = xcalloc(1, sizeof(*data));
+ negotiator->data = CALLOC_ARRAY(data, 1);
data->rev_list.compare = compare;
if (marked)
diff --git a/notes-merge.c b/notes-merge.c
index 2fe724f..d2771fa 100644
--- a/notes-merge.c
+++ b/notes-merge.c
@@ -136,7 +136,7 @@ static struct notes_merge_pair *diff_tree_remote(struct notes_merge_options *o,
diff_tree_oid(base, remote, "", &opt);
diffcore_std(&opt);
- changes = xcalloc(diff_queued_diff.nr, sizeof(struct notes_merge_pair));
+ CALLOC_ARRAY(changes, diff_queued_diff.nr);
for (i = 0; i < diff_queued_diff.nr; i++) {
struct diff_filepair *p = diff_queued_diff.queue[i];
diff --git a/notes-utils.c b/notes-utils.c
index 4bf4888..d7d18e3 100644
--- a/notes-utils.c
+++ b/notes-utils.c
@@ -129,7 +129,7 @@ struct notes_rewrite_cfg *init_copy_notes_for_rewrite(const char *cmd)
c->cmd = cmd;
c->enabled = 1;
c->combine = combine_notes_concatenate;
- c->refs = xcalloc(1, sizeof(struct string_list));
+ CALLOC_ARRAY(c->refs, 1);
c->refs->strdup_strings = 1;
c->refs_from_env = 0;
c->mode_from_env = 0;
diff --git a/notes.c b/notes.c
index d5ac081..a19e4ad 100644
--- a/notes.c
+++ b/notes.c
@@ -452,7 +452,7 @@ static void load_subtree(struct notes_tree *t, struct leaf_node *subtree,
goto handle_non_note;
}
- l = xcalloc(1, sizeof(*l));
+ CALLOC_ARRAY(l, 1);
oidcpy(&l->key_oid, &object_oid);
oidcpy(&l->val_oid, &entry.oid);
if (note_tree_insert(t, node, n, l, type,
diff --git a/object-file.c b/object-file.c
index 5bcfde8..624af40 100644
--- a/object-file.c
+++ b/object-file.c
@@ -546,7 +546,7 @@ static int link_alt_odb_entry(struct repository *r, const char *entry,
return -1;
}
- ent = xcalloc(1, sizeof(*ent));
+ CALLOC_ARRAY(ent, 1);
ent->path = xstrdup(pathbuf.buf);
/* add the alternate entry */
diff --git a/object-store.h b/object-store.h
index 541dab0..ec32c23 100644
--- a/object-store.h
+++ b/object-store.h
@@ -153,6 +153,11 @@ struct raw_object_store {
/* A most-recently-used ordered version of the packed_git list. */
struct list_head packed_git_mru;
+ struct {
+ struct packed_git **packs;
+ unsigned flags;
+ } kept_pack_cache;
+
/*
* A map of packfiles to packed_git structs for tracking which
* packs have been loaded already.
diff --git a/object.c b/object.c
index 98017be..1418845 100644
--- a/object.c
+++ b/object.c
@@ -127,7 +127,7 @@ static void grow_object_hash(struct repository *r)
int new_hash_size = r->parsed_objects->obj_hash_size < 32 ? 32 : 2 * r->parsed_objects->obj_hash_size;
struct object **new_hash;
- new_hash = xcalloc(new_hash_size, sizeof(struct object *));
+ CALLOC_ARRAY(new_hash, new_hash_size);
for (i = 0; i < r->parsed_objects->obj_hash_size; i++) {
struct object *obj = r->parsed_objects->obj_hash[i];
@@ -177,12 +177,11 @@ void *object_as_type(struct object *obj, enum object_type type, int quiet)
}
}
-struct object *lookup_unknown_object(const struct object_id *oid)
+struct object *lookup_unknown_object(struct repository *r, const struct object_id *oid)
{
- struct object *obj = lookup_object(the_repository, oid);
+ struct object *obj = lookup_object(r, oid);
if (!obj)
- obj = create_object(the_repository, oid,
- alloc_object_node(the_repository));
+ obj = create_object(r, oid, alloc_object_node(r));
return obj;
}
@@ -478,7 +477,7 @@ struct parsed_object_pool *parsed_object_pool_new(void)
o->object_state = allocate_alloc_state();
o->is_shallow = -1;
- o->shallow_stat = xcalloc(1, sizeof(*o->shallow_stat));
+ CALLOC_ARRAY(o->shallow_stat, 1);
o->buffer_slab = allocate_commit_buffer_slab();
diff --git a/object.h b/object.h
index 59daadc..87a6da4 100644
--- a/object.h
+++ b/object.h
@@ -145,7 +145,7 @@ struct object *parse_object_or_die(const struct object_id *oid, const char *name
struct object *parse_object_buffer(struct repository *r, const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p);
/** Returns the object, with potentially excess memory allocated. **/
-struct object *lookup_unknown_object(const struct object_id *oid);
+struct object *lookup_unknown_object(struct repository *r, const struct object_id *oid);
struct object_list *object_list_insert(struct object *item,
struct object_list **list_p);
diff --git a/pack-bitmap.c b/pack-bitmap.c
index 1f69b5f..3ed1543 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -13,6 +13,7 @@
#include "repository.h"
#include "object-store.h"
#include "list-objects-filter-options.h"
+#include "config.h"
/*
* An entry on the bitmap index, representing the bitmap for a given
@@ -978,7 +979,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
/* try to open a bitmapped pack, but don't parse it yet
* because we may not need to use it */
- bitmap_git = xcalloc(1, sizeof(*bitmap_git));
+ CALLOC_ARRAY(bitmap_git, 1);
if (open_pack_bitmap(revs->repo, bitmap_git) < 0)
goto cleanup;
@@ -997,6 +998,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
object_list_insert(object, &wants);
object = parse_object_or_die(get_tagged_oid(tag), NULL);
+ object->flags |= (tag->object.flags & UNINTERESTING);
}
if (object->flags & UNINTERESTING)
@@ -1350,6 +1352,24 @@ void test_bitmap_walk(struct rev_info *revs)
free_bitmap_index(bitmap_git);
}
+int test_bitmap_commits(struct repository *r)
+{
+ struct bitmap_index *bitmap_git = prepare_bitmap_git(r);
+ struct object_id oid;
+ MAYBE_UNUSED void *value;
+
+ if (!bitmap_git)
+ die("failed to load bitmap indexes");
+
+ kh_foreach(bitmap_git->bitmaps, oid, value, {
+ printf("%s\n", oid_to_hex(&oid));
+ });
+
+ free_bitmap_index(bitmap_git);
+
+ return 0;
+}
+
int rebuild_bitmap(const uint32_t *reposition,
struct ewah_bitmap *source,
struct bitmap *dest)
@@ -1388,7 +1408,7 @@ uint32_t *create_bitmap_mapping(struct bitmap_index *bitmap_git,
uint32_t *reposition;
num_objects = bitmap_git->pack->num_objects;
- reposition = xcalloc(num_objects, sizeof(uint32_t));
+ CALLOC_ARRAY(reposition, num_objects);
for (i = 0; i < num_objects; ++i) {
struct object_id oid;
@@ -1511,3 +1531,8 @@ off_t get_disk_usage_from_bitmap(struct bitmap_index *bitmap_git,
return total;
}
+
+const struct string_list *bitmap_preferred_tips(struct repository *r)
+{
+ return repo_config_get_value_multi(r, "pack.preferbitmaptips");
+}
diff --git a/pack-bitmap.h b/pack-bitmap.h
index 36d9993..78f2b3f 100644
--- a/pack-bitmap.h
+++ b/pack-bitmap.h
@@ -5,6 +5,7 @@
#include "khash.h"
#include "pack.h"
#include "pack-objects.h"
+#include "string-list.h"
struct commit;
struct repository;
@@ -49,6 +50,7 @@ void traverse_bitmap_commit_list(struct bitmap_index *,
struct rev_info *revs,
show_reachable_fn show_reachable);
void test_bitmap_walk(struct rev_info *revs);
+int test_bitmap_commits(struct repository *r);
struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
struct list_objects_filter_options *filter);
int reuse_partial_packfile_from_bitmap(struct bitmap_index *,
@@ -90,4 +92,6 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
const char *filename,
uint16_t options);
+const struct string_list *bitmap_preferred_tips(struct repository *r);
+
#endif
diff --git a/pack-objects.c b/pack-objects.c
index f2a4338..fe2a4ea 100644
--- a/pack-objects.c
+++ b/pack-objects.c
@@ -49,7 +49,7 @@ static void rehash_objects(struct packing_data *pdata)
pdata->index_size = 1024;
free(pdata->index);
- pdata->index = xcalloc(pdata->index_size, sizeof(*pdata->index));
+ CALLOC_ARRAY(pdata->index, pdata->index_size);
entry = pdata->objects;
diff --git a/pack-revindex.c b/pack-revindex.c
index 83fe4de..0e4a31d 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -3,6 +3,7 @@
#include "object-store.h"
#include "packfile.h"
#include "config.h"
+#include "midx.h"
struct revindex_entry {
off_t offset;
@@ -253,7 +254,8 @@ cleanup:
*data_p = (const uint32_t *)data;
}
- close(fd);
+ if (fd >= 0)
+ close(fd);
return ret;
}
@@ -292,6 +294,43 @@ int load_pack_revindex(struct packed_git *p)
return -1;
}
+int load_midx_revindex(struct multi_pack_index *m)
+{
+ char *revindex_name;
+ int ret;
+ if (m->revindex_data)
+ return 0;
+
+ revindex_name = get_midx_rev_filename(m);
+
+ ret = load_revindex_from_disk(revindex_name,
+ m->num_objects,
+ &m->revindex_map,
+ &m->revindex_len);
+ if (ret)
+ goto cleanup;
+
+ m->revindex_data = (const uint32_t *)((const char *)m->revindex_map + RIDX_HEADER_SIZE);
+
+cleanup:
+ free(revindex_name);
+ return ret;
+}
+
+int close_midx_revindex(struct multi_pack_index *m)
+{
+ if (!m || !m->revindex_map)
+ return 0;
+
+ munmap((void*)m->revindex_map, m->revindex_len);
+
+ m->revindex_map = NULL;
+ m->revindex_data = NULL;
+ m->revindex_len = 0;
+
+ return 0;
+}
+
int offset_to_pack_pos(struct packed_git *p, off_t ofs, uint32_t *pos)
{
unsigned lo, hi;
@@ -346,3 +385,91 @@ off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos)
else
return nth_packed_object_offset(p, pack_pos_to_index(p, pos));
}
+
+uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos)
+{
+ if (!m->revindex_data)
+ BUG("pack_pos_to_midx: reverse index not yet loaded");
+ if (m->num_objects <= pos)
+ BUG("pack_pos_to_midx: out-of-bounds object at %"PRIu32, pos);
+ return get_be32(m->revindex_data + pos);
+}
+
+struct midx_pack_key {
+ uint32_t pack;
+ off_t offset;
+
+ uint32_t preferred_pack;
+ struct multi_pack_index *midx;
+};
+
+static int midx_pack_order_cmp(const void *va, const void *vb)
+{
+ const struct midx_pack_key *key = va;
+ struct multi_pack_index *midx = key->midx;
+
+ uint32_t versus = pack_pos_to_midx(midx, (uint32_t*)vb - (const uint32_t *)midx->revindex_data);
+ uint32_t versus_pack = nth_midxed_pack_int_id(midx, versus);
+ off_t versus_offset;
+
+ uint32_t key_preferred = key->pack == key->preferred_pack;
+ uint32_t versus_preferred = versus_pack == key->preferred_pack;
+
+ /*
+ * First, compare the preferred-ness, noting that the preferred pack
+ * comes first.
+ */
+ if (key_preferred && !versus_preferred)
+ return -1;
+ else if (!key_preferred && versus_preferred)
+ return 1;
+
+ /* Then, break ties first by comparing the pack IDs. */
+ if (key->pack < versus_pack)
+ return -1;
+ else if (key->pack > versus_pack)
+ return 1;
+
+ /* Finally, break ties by comparing offsets within a pack. */
+ versus_offset = nth_midxed_offset(midx, versus);
+ if (key->offset < versus_offset)
+ return -1;
+ else if (key->offset > versus_offset)
+ return 1;
+
+ return 0;
+}
+
+int midx_to_pack_pos(struct multi_pack_index *m, uint32_t at, uint32_t *pos)
+{
+ struct midx_pack_key key;
+ uint32_t *found;
+
+ if (!m->revindex_data)
+ BUG("midx_to_pack_pos: reverse index not yet loaded");
+ if (m->num_objects <= at)
+ BUG("midx_to_pack_pos: out-of-bounds object at %"PRIu32, at);
+
+ key.pack = nth_midxed_pack_int_id(m, at);
+ key.offset = nth_midxed_offset(m, at);
+ key.midx = m;
+ /*
+ * The preferred pack sorts first, so determine its identifier by
+ * looking at the first object in pseudo-pack order.
+ *
+ * Note that if no --preferred-pack is explicitly given when writing a
+ * multi-pack index, then whichever pack has the lowest identifier
+ * implicitly is preferred (and includes all its objects, since ties are
+ * broken first by pack identifier).
+ */
+ key.preferred_pack = nth_midxed_pack_int_id(m, pack_pos_to_midx(m, 0));
+
+ found = bsearch(&key, m->revindex_data, m->num_objects,
+ sizeof(*m->revindex_data), midx_pack_order_cmp);
+
+ if (!found)
+ return error("bad offset for revindex");
+
+ *pos = found - m->revindex_data;
+ return 0;
+}
diff --git a/pack-revindex.h b/pack-revindex.h
index ba7c82c..479b8f2 100644
--- a/pack-revindex.h
+++ b/pack-revindex.h
@@ -14,6 +14,20 @@
*
* - offset: the byte offset within the .pack file at which the object contents
* can be found
+ *
+ * The revindex can also be used with a multi-pack index (MIDX). In this
+ * setting:
+ *
+ * - index position refers to an object's numeric position within the MIDX
+ *
+ * - pack position refers to an object's position within a non-existent pack
+ * described by the MIDX. The pack structure is described in
+ * Documentation/technical/pack-format.txt.
+ *
+ * It is effectively a concatanation of all packs in the MIDX (ordered by
+ * their numeric ID within the MIDX) in their original order within each
+ * pack), removing duplicates, and placing the preferred pack (if any)
+ * first.
*/
@@ -24,6 +38,7 @@
#define GIT_TEST_REV_INDEX_DIE_IN_MEMORY "GIT_TEST_REV_INDEX_DIE_IN_MEMORY"
struct packed_git;
+struct multi_pack_index;
/*
* load_pack_revindex populates the revindex's internal data-structures for the
@@ -35,6 +50,22 @@ struct packed_git;
int load_pack_revindex(struct packed_git *p);
/*
+ * load_midx_revindex loads the '.rev' file corresponding to the given
+ * multi-pack index by mmap-ing it and assigning pointers in the
+ * multi_pack_index to point at it.
+ *
+ * A negative number is returned on error.
+ */
+int load_midx_revindex(struct multi_pack_index *m);
+
+/*
+ * Frees resources associated with a multi-pack reverse index.
+ *
+ * A negative number is returned on error.
+ */
+int close_midx_revindex(struct multi_pack_index *m);
+
+/*
* offset_to_pack_pos converts an object offset to a pack position. This
* function returns zero on success, and a negative number otherwise. The
* parameter 'pos' is usable only on success.
@@ -71,4 +102,26 @@ uint32_t pack_pos_to_index(struct packed_git *p, uint32_t pos);
*/
off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos);
+/*
+ * pack_pos_to_midx converts the object at position "pos" within the MIDX
+ * pseudo-pack into a MIDX position.
+ *
+ * If the reverse index has not yet been loaded, or the position is out of
+ * bounds, this function aborts.
+ *
+ * This function runs in time O(log N) with the number of objects in the MIDX.
+ */
+uint32_t pack_pos_to_midx(struct multi_pack_index *m, uint32_t pos);
+
+/*
+ * midx_to_pack_pos converts from the MIDX-relative position at "at" to the
+ * corresponding pack position.
+ *
+ * If the reverse index has not yet been loaded, or the position is out of
+ * bounds, this function aborts.
+ *
+ * This function runs in constant time.
+ */
+int midx_to_pack_pos(struct multi_pack_index *midx, uint32_t at, uint32_t *pos);
+
#endif
diff --git a/pack-write.c b/pack-write.c
index 2ca85a9..f1fc3ec 100644
--- a/pack-write.c
+++ b/pack-write.c
@@ -201,21 +201,12 @@ static void write_rev_header(struct hashfile *f)
}
static void write_rev_index_positions(struct hashfile *f,
- struct pack_idx_entry **objects,
+ uint32_t *pack_order,
uint32_t nr_objects)
{
- uint32_t *pack_order;
uint32_t i;
-
- ALLOC_ARRAY(pack_order, nr_objects);
- for (i = 0; i < nr_objects; i++)
- pack_order[i] = i;
- QSORT_S(pack_order, nr_objects, pack_order_cmp, objects);
-
for (i = 0; i < nr_objects; i++)
hashwrite_be32(f, pack_order[i]);
-
- free(pack_order);
}
static void write_rev_trailer(struct hashfile *f, const unsigned char *hash)
@@ -229,6 +220,29 @@ const char *write_rev_file(const char *rev_name,
const unsigned char *hash,
unsigned flags)
{
+ uint32_t *pack_order;
+ uint32_t i;
+ const char *ret;
+
+ ALLOC_ARRAY(pack_order, nr_objects);
+ for (i = 0; i < nr_objects; i++)
+ pack_order[i] = i;
+ QSORT_S(pack_order, nr_objects, pack_order_cmp, objects);
+
+ ret = write_rev_file_order(rev_name, pack_order, nr_objects, hash,
+ flags);
+
+ free(pack_order);
+
+ return ret;
+}
+
+const char *write_rev_file_order(const char *rev_name,
+ uint32_t *pack_order,
+ uint32_t nr_objects,
+ const unsigned char *hash,
+ unsigned flags)
+{
struct hashfile *f;
int fd;
@@ -262,7 +276,7 @@ const char *write_rev_file(const char *rev_name,
write_rev_header(f);
- write_rev_index_positions(f, objects, nr_objects);
+ write_rev_index_positions(f, pack_order, nr_objects);
write_rev_trailer(f, hash);
if (rev_name && adjust_shared_perm(rev_name) < 0)
diff --git a/pack.h b/pack.h
index 857cbd5..fa13954 100644
--- a/pack.h
+++ b/pack.h
@@ -94,6 +94,7 @@ struct ref;
void write_promisor_file(const char *promisor_name, struct ref **sought, int nr_sought);
const char *write_rev_file(const char *rev_name, struct pack_idx_entry **objects, uint32_t nr_objects, const unsigned char *hash, unsigned flags);
+const char *write_rev_file_order(const char *rev_name, uint32_t *pack_order, uint32_t nr_objects, const unsigned char *hash, unsigned flags);
/*
* The "hdr" output buffer should be at least this big, which will handle sizes
diff --git a/packfile.c b/packfile.c
index 1fec12a..b79cbc8 100644
--- a/packfile.c
+++ b/packfile.c
@@ -638,7 +638,7 @@ unsigned char *use_pack(struct packed_git *p,
if (p->pack_fd == -1 && open_packed_git(p))
die("packfile %s cannot be accessed", p->pack_name);
- win = xcalloc(1, sizeof(*win));
+ CALLOC_ARRAY(win, 1);
win->offset = (offset / window_align) * window_align;
len = p->pack_size - win->offset;
if (len > packed_git_window_size)
@@ -862,6 +862,9 @@ static void prepare_pack(const char *full_name, size_t full_name_len,
if (!strcmp(file_name, "multi-pack-index"))
return;
+ if (starts_with(file_name, "multi-pack-index") &&
+ ends_with(file_name, ".rev"))
+ return;
if (ends_with(file_name, ".idx") ||
ends_with(file_name, ".rev") ||
ends_with(file_name, ".pack") ||
@@ -2066,12 +2069,79 @@ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pa
return 0;
}
+static void maybe_invalidate_kept_pack_cache(struct repository *r,
+ unsigned flags)
+{
+ if (!r->objects->kept_pack_cache.packs)
+ return;
+ if (r->objects->kept_pack_cache.flags == flags)
+ return;
+ FREE_AND_NULL(r->objects->kept_pack_cache.packs);
+ r->objects->kept_pack_cache.flags = 0;
+}
+
+static struct packed_git **kept_pack_cache(struct repository *r, unsigned flags)
+{
+ maybe_invalidate_kept_pack_cache(r, flags);
+
+ if (!r->objects->kept_pack_cache.packs) {
+ struct packed_git **packs = NULL;
+ size_t nr = 0, alloc = 0;
+ struct packed_git *p;
+
+ /*
+ * We want "all" packs here, because we need to cover ones that
+ * are used by a midx, as well. We need to look in every one of
+ * them (instead of the midx itself) to cover duplicates. It's
+ * possible that an object is found in two packs that the midx
+ * covers, one kept and one not kept, but the midx returns only
+ * the non-kept version.
+ */
+ for (p = get_all_packs(r); p; p = p->next) {
+ if ((p->pack_keep && (flags & ON_DISK_KEEP_PACKS)) ||
+ (p->pack_keep_in_core && (flags & IN_CORE_KEEP_PACKS))) {
+ ALLOC_GROW(packs, nr + 1, alloc);
+ packs[nr++] = p;
+ }
+ }
+ ALLOC_GROW(packs, nr + 1, alloc);
+ packs[nr] = NULL;
+
+ r->objects->kept_pack_cache.packs = packs;
+ r->objects->kept_pack_cache.flags = flags;
+ }
+
+ return r->objects->kept_pack_cache.packs;
+}
+
+int find_kept_pack_entry(struct repository *r,
+ const struct object_id *oid,
+ unsigned flags,
+ struct pack_entry *e)
+{
+ struct packed_git **cache;
+
+ for (cache = kept_pack_cache(r, flags); *cache; cache++) {
+ struct packed_git *p = *cache;
+ if (fill_pack_entry(oid, e, p))
+ return 1;
+ }
+
+ return 0;
+}
+
int has_object_pack(const struct object_id *oid)
{
struct pack_entry e;
return find_pack_entry(the_repository, oid, &e);
}
+int has_object_kept_pack(const struct object_id *oid, unsigned flags)
+{
+ struct pack_entry e;
+ return find_kept_pack_entry(the_repository, oid, flags, &e);
+}
+
int has_pack_index(const unsigned char *sha1)
{
struct stat st;
@@ -2177,6 +2247,7 @@ static int add_promisor_object(const struct object_id *oid,
return 0;
while (tree_entry_gently(&desc, &entry))
oidset_insert(set, &entry.oid);
+ free_tree_buffer(tree);
} else if (obj->type == OBJ_COMMIT) {
struct commit *commit = (struct commit *) obj;
struct commit_list *parents = commit->parents;
diff --git a/packfile.h b/packfile.h
index 4cfec9e..3ae117a 100644
--- a/packfile.h
+++ b/packfile.h
@@ -162,13 +162,18 @@ int packed_object_info(struct repository *r,
void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1);
+#define ON_DISK_KEEP_PACKS 1
+#define IN_CORE_KEEP_PACKS 2
+
/*
* Iff a pack file in the given repository contains the object named by sha1,
* return true and store its location to e.
*/
int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e);
+int find_kept_pack_entry(struct repository *r, const struct object_id *oid, unsigned flags, struct pack_entry *e);
int has_object_pack(const struct object_id *oid);
+int has_object_kept_pack(const struct object_id *oid, unsigned flags);
int has_pack_index(const unsigned char *sha1);
diff --git a/parse-options.c b/parse-options.c
index fbea16e..e6f5676 100644
--- a/parse-options.c
+++ b/parse-options.c
@@ -625,6 +625,8 @@ static int show_gitcomp(const struct option *opts, int show_all)
*
* Right now this is only used to preprocess and substitute
* OPTION_ALIAS.
+ *
+ * The returned options should be freed using free_preprocessed_options.
*/
static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
const struct option *options)
@@ -678,6 +680,7 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
newopt[i].short_name = short_name;
newopt[i].long_name = long_name;
newopt[i].help = strbuf_detach(&help, NULL);
+ newopt[i].flags |= PARSE_OPT_FROM_ALIAS;
break;
}
@@ -693,6 +696,20 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
return newopt;
}
+static void free_preprocessed_options(struct option *options)
+{
+ int i;
+
+ if (!options)
+ return;
+
+ for (i = 0; options[i].type != OPTION_END; i++) {
+ if (options[i].flags & PARSE_OPT_FROM_ALIAS)
+ free((void *)options[i].help);
+ }
+ free(options);
+}
+
static int usage_with_options_internal(struct parse_opt_ctx_t *,
const char * const *,
const struct option *, int, int);
@@ -870,7 +887,7 @@ int parse_options(int argc, const char **argv, const char *prefix,
}
precompose_argv_prefix(argc, argv, NULL);
- free(real_options);
+ free_preprocessed_options(real_options);
free(ctx.alias_groups);
return parse_options_end(&ctx);
}
diff --git a/parse-options.h b/parse-options.h
index ff6506a..a845a9d 100644
--- a/parse-options.h
+++ b/parse-options.h
@@ -28,26 +28,27 @@ enum parse_opt_type {
};
enum parse_opt_flags {
- PARSE_OPT_KEEP_DASHDASH = 1,
- PARSE_OPT_STOP_AT_NON_OPTION = 2,
- PARSE_OPT_KEEP_ARGV0 = 4,
- PARSE_OPT_KEEP_UNKNOWN = 8,
- PARSE_OPT_NO_INTERNAL_HELP = 16,
- PARSE_OPT_ONE_SHOT = 32
+ PARSE_OPT_KEEP_DASHDASH = 1 << 0,
+ PARSE_OPT_STOP_AT_NON_OPTION = 1 << 1,
+ PARSE_OPT_KEEP_ARGV0 = 1 << 2,
+ PARSE_OPT_KEEP_UNKNOWN = 1 << 3,
+ PARSE_OPT_NO_INTERNAL_HELP = 1 << 4,
+ PARSE_OPT_ONE_SHOT = 1 << 5,
};
enum parse_opt_option_flags {
- PARSE_OPT_OPTARG = 1,
- PARSE_OPT_NOARG = 2,
- PARSE_OPT_NONEG = 4,
- PARSE_OPT_HIDDEN = 8,
- PARSE_OPT_LASTARG_DEFAULT = 16,
- PARSE_OPT_NODASH = 32,
- PARSE_OPT_LITERAL_ARGHELP = 64,
- PARSE_OPT_SHELL_EVAL = 256,
- PARSE_OPT_NOCOMPLETE = 512,
- PARSE_OPT_COMP_ARG = 1024,
- PARSE_OPT_CMDMODE = 2048
+ PARSE_OPT_OPTARG = 1 << 0,
+ PARSE_OPT_NOARG = 1 << 1,
+ PARSE_OPT_NONEG = 1 << 2,
+ PARSE_OPT_HIDDEN = 1 << 3,
+ PARSE_OPT_LASTARG_DEFAULT = 1 << 4,
+ PARSE_OPT_NODASH = 1 << 5,
+ PARSE_OPT_LITERAL_ARGHELP = 1 << 6,
+ PARSE_OPT_FROM_ALIAS = 1 << 7,
+ PARSE_OPT_SHELL_EVAL = 1 << 8,
+ PARSE_OPT_NOCOMPLETE = 1 << 9,
+ PARSE_OPT_COMP_ARG = 1 << 10,
+ PARSE_OPT_CMDMODE = 1 << 11,
};
enum parse_opt_result {
diff --git a/patch-ids.c b/patch-ids.c
index 3f404e4..8bf4255 100644
--- a/patch-ids.c
+++ b/patch-ids.c
@@ -124,7 +124,7 @@ struct patch_id *add_commit_patch_id(struct commit *commit,
if (!patch_id_defined(commit))
return NULL;
- key = xcalloc(1, sizeof(*key));
+ CALLOC_ARRAY(key, 1);
if (init_patch_id_entry(key, commit, ids)) {
free(key);
return NULL;
diff --git a/path.c b/path.c
index 7b385e5..9e883eb 100644
--- a/path.c
+++ b/path.c
@@ -1534,5 +1534,6 @@ REPO_GIT_PATH_FUNC(merge_rr, "MERGE_RR")
REPO_GIT_PATH_FUNC(merge_mode, "MERGE_MODE")
REPO_GIT_PATH_FUNC(merge_head, "MERGE_HEAD")
REPO_GIT_PATH_FUNC(merge_autostash, "MERGE_AUTOSTASH")
+REPO_GIT_PATH_FUNC(auto_merge, "AUTO_MERGE")
REPO_GIT_PATH_FUNC(fetch_head, "FETCH_HEAD")
REPO_GIT_PATH_FUNC(shallow, "shallow")
diff --git a/path.h b/path.h
index e7e77da..251c78d 100644
--- a/path.h
+++ b/path.h
@@ -176,6 +176,7 @@ struct path_cache {
const char *merge_mode;
const char *merge_head;
const char *merge_autostash;
+ const char *auto_merge;
const char *fetch_head;
const char *shallow;
};
@@ -191,6 +192,7 @@ const char *git_path_merge_rr(struct repository *r);
const char *git_path_merge_mode(struct repository *r);
const char *git_path_merge_head(struct repository *r);
const char *git_path_merge_autostash(struct repository *r);
+const char *git_path_auto_merge(struct repository *r);
const char *git_path_fetch_head(struct repository *r);
const char *git_path_shallow(struct repository *r);
diff --git a/pathspec.c b/pathspec.c
index d67688b..14c7e9f 100644
--- a/pathspec.c
+++ b/pathspec.c
@@ -156,7 +156,7 @@ static void parse_pathspec_attr_match(struct pathspec_item *item, const char *va
string_list_remove_empty_items(&list, 0);
item->attr_check = attr_check_alloc();
- item->attr_match = xcalloc(list.nr, sizeof(struct attr_match));
+ CALLOC_ARRAY(item->attr_match, list.nr);
for_each_string_list_item(si, &list) {
size_t attr_len;
@@ -563,7 +563,7 @@ void parse_pathspec(struct pathspec *pathspec,
if (!(flags & PATHSPEC_PREFER_CWD))
BUG("PATHSPEC_PREFER_CWD requires arguments");
- pathspec->items = item = xcalloc(1, sizeof(*item));
+ pathspec->items = CALLOC_ARRAY(item, 1);
item->match = xstrdup(prefix);
item->original = xstrdup(prefix);
item->nowildcard_len = item->len = strlen(prefix);
diff --git a/perl/Git.pm b/perl/Git.pm
index 02eacef..73ebbf8 100644
--- a/perl/Git.pm
+++ b/perl/Git.pm
@@ -619,6 +619,19 @@ Return path to the git repository. Must be called on a repository instance.
sub repo_path { $_[0]->{opts}->{Repository} }
+=item hooks_path ()
+
+Return path to the hooks directory. Must be called on a repository instance.
+
+=cut
+
+sub hooks_path {
+ my ($self) = @_;
+
+ my $dir = $self->command_oneline('rev-parse', '--git-path', 'hooks');
+ my $abs = abs_path($dir);
+ return $abs;
+}
=item wc_path ()
diff --git a/pkt-line.c b/pkt-line.c
index d633005..98304ce 100644
--- a/pkt-line.c
+++ b/pkt-line.c
@@ -194,27 +194,53 @@ int packet_write_fmt_gently(int fd, const char *fmt, ...)
return status;
}
-static int packet_write_gently(const int fd_out, const char *buf, size_t size)
+static int do_packet_write(const int fd_out, const char *buf, size_t size,
+ struct strbuf *err)
{
- static char packet_write_buffer[LARGE_PACKET_MAX];
+ char header[4];
size_t packet_size;
- if (size > sizeof(packet_write_buffer) - 4)
- return error(_("packet write failed - data exceeds max packet size"));
+ if (size > LARGE_PACKET_DATA_MAX) {
+ strbuf_addstr(err, _("packet write failed - data exceeds max packet size"));
+ return -1;
+ }
packet_trace(buf, size, 1);
packet_size = size + 4;
- set_packet_header(packet_write_buffer, packet_size);
- memcpy(packet_write_buffer + 4, buf, size);
- if (write_in_full(fd_out, packet_write_buffer, packet_size) < 0)
- return error(_("packet write failed"));
+
+ set_packet_header(header, packet_size);
+
+ /*
+ * Write the header and the buffer in 2 parts so that we do
+ * not need to allocate a buffer or rely on a static buffer.
+ * This also avoids putting a large buffer on the stack which
+ * might have multi-threading issues.
+ */
+
+ if (write_in_full(fd_out, header, 4) < 0 ||
+ write_in_full(fd_out, buf, size) < 0) {
+ strbuf_addf(err, _("packet write failed: %s"), strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
+static int packet_write_gently(const int fd_out, const char *buf, size_t size)
+{
+ struct strbuf err = STRBUF_INIT;
+ if (do_packet_write(fd_out, buf, size, &err)) {
+ error("%s", err.buf);
+ strbuf_release(&err);
+ return -1;
+ }
return 0;
}
void packet_write(int fd_out, const char *buf, size_t size)
{
- if (packet_write_gently(fd_out, buf, size))
- die_errno(_("packet write failed"));
+ struct strbuf err = STRBUF_INIT;
+ if (do_packet_write(fd_out, buf, size, &err))
+ die("%s", err.buf);
}
void packet_buf_write(struct strbuf *buf, const char *fmt, ...)
@@ -242,26 +268,27 @@ void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len)
packet_trace(data, len, 1);
}
-int write_packetized_from_fd(int fd_in, int fd_out)
+int write_packetized_from_fd_no_flush(int fd_in, int fd_out)
{
- static char buf[LARGE_PACKET_DATA_MAX];
+ char *buf = xmalloc(LARGE_PACKET_DATA_MAX);
int err = 0;
ssize_t bytes_to_write;
while (!err) {
- bytes_to_write = xread(fd_in, buf, sizeof(buf));
- if (bytes_to_write < 0)
+ bytes_to_write = xread(fd_in, buf, LARGE_PACKET_DATA_MAX);
+ if (bytes_to_write < 0) {
+ free(buf);
return COPY_READ_ERROR;
+ }
if (bytes_to_write == 0)
break;
err = packet_write_gently(fd_out, buf, bytes_to_write);
}
- if (!err)
- err = packet_flush_gently(fd_out);
+ free(buf);
return err;
}
-int write_packetized_from_buf(const char *src_in, size_t len, int fd_out)
+int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out)
{
int err = 0;
size_t bytes_written = 0;
@@ -277,8 +304,6 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out)
err = packet_write_gently(fd_out, src_in + bytes_written, bytes_to_write);
bytes_written += bytes_to_write;
}
- if (!err)
- err = packet_flush_gently(fd_out);
return err;
}
@@ -298,8 +323,11 @@ static int get_packet_data(int fd, char **src_buf, size_t *src_size,
*src_size -= ret;
} else {
ret = read_in_full(fd, dst, size);
- if (ret < 0)
+ if (ret < 0) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error_errno(_("read error"));
die_errno(_("read error"));
+ }
}
/* And complain if we didn't get enough bytes to satisfy the read. */
@@ -307,6 +335,8 @@ static int get_packet_data(int fd, char **src_buf, size_t *src_size,
if (options & PACKET_READ_GENTLE_ON_EOF)
return -1;
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("the remote end hung up unexpectedly"));
die(_("the remote end hung up unexpectedly"));
}
@@ -335,6 +365,9 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
len = packet_length(linelen);
if (len < 0) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("protocol error: bad line length "
+ "character: %.4s"), linelen);
die(_("protocol error: bad line length character: %.4s"), linelen);
} else if (!len) {
packet_trace("0000", 4, 0);
@@ -349,12 +382,19 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
*pktlen = 0;
return PACKET_READ_RESPONSE_END;
} else if (len < 4) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("protocol error: bad line length %d"),
+ len);
die(_("protocol error: bad line length %d"), len);
}
len -= 4;
- if ((unsigned)len >= size)
+ if ((unsigned)len >= size) {
+ if (options & PACKET_READ_GENTLE_ON_READ_ERROR)
+ return error(_("protocol error: bad line length %d"),
+ len);
die(_("protocol error: bad line length %d"), len);
+ }
if (get_packet_data(fd, src_buffer, src_len, buffer, len, options) < 0) {
*pktlen = -1;
@@ -421,7 +461,7 @@ char *packet_read_line_buf(char **src, size_t *src_len, int *dst_len)
return packet_read_line_generic(-1, src, src_len, dst_len);
}
-ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out)
+ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out, int options)
{
int packet_len;
@@ -437,7 +477,7 @@ ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out)
* that there is already room for the extra byte.
*/
sb_out->buf + sb_out->len, LARGE_PACKET_DATA_MAX+1,
- PACKET_READ_GENTLE_ON_EOF);
+ options);
if (packet_len <= 0)
break;
sb_out->len += packet_len;
diff --git a/pkt-line.h b/pkt-line.h
index 8c90daa..5af5f45 100644
--- a/pkt-line.h
+++ b/pkt-line.h
@@ -32,8 +32,8 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((f
void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len);
int packet_flush_gently(int fd);
int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
-int write_packetized_from_fd(int fd_in, int fd_out);
-int write_packetized_from_buf(const char *src_in, size_t len, int fd_out);
+int write_packetized_from_fd_no_flush(int fd_in, int fd_out);
+int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out);
/*
* Read a packetized line into the buffer, which must be at least size bytes
@@ -68,10 +68,15 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out);
*
* If options contains PACKET_READ_DIE_ON_ERR_PACKET, it dies when it sees an
* ERR packet.
+ *
+ * If options contains PACKET_READ_GENTLE_ON_READ_ERROR, we will not die
+ * on read errors, but instead return -1. However, we may still die on an
+ * ERR packet (if requested).
*/
-#define PACKET_READ_GENTLE_ON_EOF (1u<<0)
-#define PACKET_READ_CHOMP_NEWLINE (1u<<1)
-#define PACKET_READ_DIE_ON_ERR_PACKET (1u<<2)
+#define PACKET_READ_GENTLE_ON_EOF (1u<<0)
+#define PACKET_READ_CHOMP_NEWLINE (1u<<1)
+#define PACKET_READ_DIE_ON_ERR_PACKET (1u<<2)
+#define PACKET_READ_GENTLE_ON_READ_ERROR (1u<<3)
int packet_read(int fd, char **src_buffer, size_t *src_len, char
*buffer, unsigned size, int options);
@@ -131,7 +136,7 @@ char *packet_read_line_buf(char **src_buf, size_t *src_len, int *size);
/*
* Reads a stream of variable sized packets until a flush packet is detected.
*/
-ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out);
+ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out, int options);
/*
* Receive multiplexed output stream over git native protocol.
diff --git a/po/TEAMS b/po/TEAMS
index 677cece..a32beb6 100644
--- a/po/TEAMS
+++ b/po/TEAMS
@@ -29,6 +29,10 @@ Repository: https://github.com/jnavila/git
Leader: Jean-Noël Avila <jn.avila@free.fr>
Members: Sébastien Helleu <flashcode@flashtux.org>
+Language: id (Indonesian)
+Repository: https://github.com/bagasme/git-po
+Leader: Bagas Sanjaya <bagasdotme@gmail.com>
+
Language: is (Icelandic)
Leader: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
diff --git a/po/bg.po b/po/bg.po
index d73e84c..529ea97 100644
--- a/po/bg.po
+++ b/po/bg.po
@@ -1,7 +1,7 @@
# Bulgarian translation of git po-file.
-# Copyright (C) 2014, 2015, 2016, 2017, 2018, 2019, 2020 Alexander Shopov <ash@kambanaria.org>.
+# Copyright (C) 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Alexander Shopov <ash@kambanaria.org>.
# This file is distributed under the same license as the git package.
-# Alexander Shopov <ash@kambanaria.org>, 2014, 2015, 2016, 2017, 2018, 2019, 2020.
+# Alexander Shopov <ash@kambanaria.org>, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021.
#
# ========================
# DICTIONARY TO MERGE IN GIT GUI
@@ -38,6 +38,7 @@
# graft присадка
# grafted repository хранилище с присаждане
# replace refs заместващи указатели
+# replace objects заместващи обекти
# embedded repository вградено/вътрешно хранилище (добавянето му е грешка)
# thin pack съкратен пакет
# pack file пакетен файл
@@ -149,6 +150,8 @@
# identity самоличност, информация за
# boundary commit гранично подаване
# integrate (changes) внасяне (на промени)
+# overflow data данни за отместването
+# reverse index обратен индекс (а не обърнат, за да не се бърка с reverse key index)
# ------------------------
# „$var“ - може да не сработва за shell има gettext и eval_gettext - проверка - намират се лесно по „$
# ------------------------
@@ -165,10 +168,10 @@
# for i in `sort -u FILES`; do cnt=`grep $i FILES | wc -l`; echo $cnt $i ;done | sort -n
msgid ""
msgstr ""
-"Project-Id-Version: git 2.30\n"
+"Project-Id-Version: git 2.31\n"
"Report-Msgid-Bugs-To: Git Mailing List <git@vger.kernel.org>\n"
-"POT-Creation-Date: 2020-12-21 07:10+0800\n"
-"PO-Revision-Date: 2020-12-22 17:48+0100\n"
+"POT-Creation-Date: 2021-03-04 22:41+0800\n"
+"PO-Revision-Date: 2021-03-05 12:11+0100\n"
"Last-Translator: Alexander Shopov <ash@kambanaria.org>\n"
"Language-Team: Bulgarian <dict@fsa-bg.org>\n"
"Language: bg\n"
@@ -182,9 +185,9 @@ msgstr ""
msgid "Huh (%s)?"
msgstr "Неуспешен анализ — „%s“."
-#: add-interactive.c:529 add-interactive.c:830 reset.c:65 sequencer.c:3284
-#: sequencer.c:3735 sequencer.c:3890 builtin/rebase.c:1532
-#: builtin/rebase.c:1955
+#: add-interactive.c:529 add-interactive.c:830 reset.c:65 sequencer.c:3292
+#: sequencer.c:3743 sequencer.c:3898 builtin/rebase.c:1538
+#: builtin/rebase.c:1963
msgid "could not read index"
msgstr "индексът не може да бъде прочетен"
@@ -212,7 +215,7 @@ msgstr "Обновяване"
msgid "could not stage '%s'"
msgstr "неуспешно добавяне в индекса на „%s“"
-#: add-interactive.c:703 add-interactive.c:892 reset.c:89 sequencer.c:3478
+#: add-interactive.c:703 add-interactive.c:892 reset.c:89 sequencer.c:3486
msgid "could not write index"
msgstr "индексът не може да бъде записан"
@@ -370,7 +373,7 @@ msgstr "извън индекса"
#: add-interactive.c:1144 apply.c:4987 apply.c:4990 builtin/am.c:2257
#: builtin/am.c:2260 builtin/bugreport.c:134 builtin/clone.c:124
-#: builtin/fetch.c:147 builtin/merge.c:284 builtin/pull.c:190
+#: builtin/fetch.c:150 builtin/merge.c:285 builtin/pull.c:190
#: builtin/submodule--helper.c:409 builtin/submodule--helper.c:1818
#: builtin/submodule--helper.c:1821 builtin/submodule--helper.c:2326
#: builtin/submodule--helper.c:2329 builtin/submodule--helper.c:2572
@@ -969,7 +972,7 @@ msgstr ""
msgid "Exiting because of an unresolved conflict."
msgstr "Изход от програмата заради некоригиран конфликт."
-#: advice.c:281 builtin/merge.c:1369
+#: advice.c:281 builtin/merge.c:1370
msgid "You have not concluded your merge (MERGE_HEAD exists)."
msgstr "Не сте завършили сливане. (Указателят „MERGE_HEAD“ съществува)."
@@ -1285,7 +1288,8 @@ msgstr "неуспешно прилагане на кръпка: „%s:%ld“"
msgid "cannot checkout %s"
msgstr "„%s“ не може да се изтегли"
-#: apply.c:3405 apply.c:3416 apply.c:3462 midx.c:73 setup.c:308
+#: apply.c:3405 apply.c:3416 apply.c:3462 midx.c:86 pack-revindex.c:213
+#: setup.c:308
#, c-format
msgid "failed to read %s"
msgstr "файлът „%s“ не може да бъде прочетен"
@@ -1450,7 +1454,7 @@ msgstr ""
msgid "unable to add cache entry for %s"
msgstr "не може да се добави запис в кеша за „%s“"
-#: apply.c:4374 builtin/bisect--helper.c:524
+#: apply.c:4374 builtin/bisect--helper.c:523
#, c-format
msgid "failed to write to '%s'"
msgstr "в „%s“ не може да се пише"
@@ -1486,7 +1490,7 @@ msgstr[1] "Прилагане на кръпката „%%s“ с %d отхвър
msgid "truncating .rej filename to %.*s.rej"
msgstr "съкращаване на името на файла с отхвърлените парчета на „ %.*s.rej“"
-#: apply.c:4576 builtin/fetch.c:927 builtin/fetch.c:1228
+#: apply.c:4576 builtin/fetch.c:933 builtin/fetch.c:1334
#, c-format
msgid "cannot open %s"
msgstr "„%s“ не може да бъде отворен"
@@ -1542,7 +1546,7 @@ msgstr[0] ""
msgstr[1] ""
"Добавени са %d реда след корекцията на грешките в знаците за интервали."
-#: apply.c:4960 builtin/add.c:618 builtin/mv.c:304 builtin/rm.c:406
+#: apply.c:4960 builtin/add.c:626 builtin/mv.c:304 builtin/rm.c:406
msgid "Unable to write new index file"
msgstr "Новият индекс не може да бъде записан"
@@ -1613,7 +1617,7 @@ msgid "build a temporary index based on embedded index information"
msgstr ""
"създаване на временен индекс на база на включената информация за индекса"
-#: apply.c:5025 builtin/checkout-index.c:182 builtin/ls-files.c:525
+#: apply.c:5025 builtin/checkout-index.c:195 builtin/ls-files.c:540
msgid "paths are separated with NUL character"
msgstr "разделяне на пътищата с нулевия знак „NUL“"
@@ -1623,7 +1627,7 @@ msgstr "да се осигури контекст от поне такъв БР
#: apply.c:5028 builtin/am.c:2245 builtin/interpret-trailers.c:98
#: builtin/interpret-trailers.c:100 builtin/interpret-trailers.c:102
-#: builtin/pack-objects.c:3562 builtin/rebase.c:1346
+#: builtin/pack-objects.c:3577 builtin/rebase.c:1352
msgid "action"
msgstr "действие"
@@ -1652,9 +1656,9 @@ msgstr "оставяне на отхвърлените парчета във ф
msgid "allow overlapping hunks"
msgstr "позволяване на застъпващи се парчета"
-#: apply.c:5045 builtin/add.c:329 builtin/check-ignore.c:22
-#: builtin/commit.c:1364 builtin/count-objects.c:98 builtin/fsck.c:775
-#: builtin/log.c:2287 builtin/mv.c:123 builtin/read-tree.c:128
+#: apply.c:5045 builtin/add.c:337 builtin/check-ignore.c:22
+#: builtin/commit.c:1364 builtin/count-objects.c:98 builtin/fsck.c:757
+#: builtin/log.c:2286 builtin/mv.c:123 builtin/read-tree.c:128
msgid "be verbose"
msgstr "повече подробности"
@@ -1742,14 +1746,14 @@ msgstr "git archive --remote ХРАНИЛИЩЕ [--exec КОМАНДА] --list"
msgid "cannot read %s"
msgstr "обектът „%s“ не може да бъде прочетен"
-#: archive.c:345 sequencer.c:459 sequencer.c:1736 sequencer.c:2886
-#: sequencer.c:3327 sequencer.c:3436 builtin/am.c:249 builtin/commit.c:786
-#: builtin/merge.c:1138
+#: archive.c:345 sequencer.c:459 sequencer.c:1744 sequencer.c:2894
+#: sequencer.c:3335 sequencer.c:3444 builtin/am.c:249 builtin/commit.c:786
+#: builtin/merge.c:1139
#, c-format
msgid "could not read '%s'"
msgstr "файлът „%s“ не може да бъде прочетен"
-#: archive.c:430 builtin/add.c:181 builtin/add.c:594 builtin/rm.c:315
+#: archive.c:430 builtin/add.c:189 builtin/add.c:602 builtin/rm.c:315
#, c-format
msgid "pathspec '%s' did not match any files"
msgstr "пътят „%s“ не съвпада с никой файл"
@@ -1791,7 +1795,7 @@ msgstr "ФОРМАТ"
msgid "archive format"
msgstr "ФОРМАТ на архива"
-#: archive.c:556 builtin/log.c:1765
+#: archive.c:556 builtin/log.c:1764
msgid "prefix"
msgstr "ПРЕФИКС"
@@ -1799,11 +1803,11 @@ msgstr "ПРЕФИКС"
msgid "prepend prefix to each pathname in the archive"
msgstr "добавяне на този ПРЕФИКС към всеки път в архива"
-#: archive.c:558 archive.c:561 builtin/blame.c:886 builtin/blame.c:890
-#: builtin/blame.c:891 builtin/commit-tree.c:117 builtin/config.c:135
+#: archive.c:558 archive.c:561 builtin/blame.c:884 builtin/blame.c:888
+#: builtin/blame.c:889 builtin/commit-tree.c:117 builtin/config.c:135
#: builtin/fast-export.c:1207 builtin/fast-export.c:1209
-#: builtin/fast-export.c:1213 builtin/grep.c:919 builtin/hash-object.c:105
-#: builtin/ls-files.c:561 builtin/ls-files.c:564 builtin/notes.c:412
+#: builtin/fast-export.c:1213 builtin/grep.c:920 builtin/hash-object.c:105
+#: builtin/ls-files.c:576 builtin/ls-files.c:579 builtin/notes.c:412
#: builtin/notes.c:578 builtin/read-tree.c:123 parse-options.h:190
msgid "file"
msgstr "ФАЙЛ"
@@ -1968,12 +1972,12 @@ msgstr "Двоично търсене: трябва да се провери б
msgid "a %s revision is needed"
msgstr "необходима е версия „%s“"
-#: bisect.c:941 builtin/notes.c:177 builtin/tag.c:255
+#: bisect.c:941 builtin/notes.c:177 builtin/tag.c:287
#, c-format
msgid "could not create file '%s'"
msgstr "файлът „%s“ не може да бъде създаден"
-#: bisect.c:987 builtin/merge.c:152
+#: bisect.c:987 builtin/merge.c:153
#, c-format
msgid "could not read file '%s'"
msgstr "файлът „%s“ не може да бъде прочетен"
@@ -1991,7 +1995,7 @@ msgstr "„%s“ e както „%s“, така и „%s“\n"
#, c-format
msgid ""
"No testable commit found.\n"
-"Maybe you started with bad path parameters?\n"
+"Maybe you started with bad path arguments?\n"
msgstr ""
"Липсва подходящо за тестване подаване.\n"
"Проверете параметрите за пътищата.\n"
@@ -2027,11 +2031,11 @@ msgstr ""
"Едновременното задаване на опциите „--reverse“ и „--first-parent“ изисква "
"указването на крайно подаване"
-#: blame.c:2821 bundle.c:213 ref-filter.c:2272 remote.c:2031 sequencer.c:2138
-#: sequencer.c:4633 submodule.c:855 builtin/commit.c:1045 builtin/log.c:409
-#: builtin/log.c:1023 builtin/log.c:1625 builtin/log.c:2046 builtin/log.c:2336
-#: builtin/merge.c:423 builtin/pack-objects.c:3380 builtin/pack-objects.c:3395
-#: builtin/shortlog.c:267
+#: blame.c:2821 bundle.c:213 ref-filter.c:2206 remote.c:2041 sequencer.c:2146
+#: sequencer.c:4641 submodule.c:856 builtin/commit.c:1045 builtin/log.c:411
+#: builtin/log.c:1016 builtin/log.c:1624 builtin/log.c:2045 builtin/log.c:2335
+#: builtin/merge.c:424 builtin/pack-objects.c:3395 builtin/pack-objects.c:3410
+#: builtin/shortlog.c:255
msgid "revision walk setup failed"
msgstr "неуспешно настройване на обхождането на версиите"
@@ -2210,7 +2214,7 @@ msgstr "Файлът „%s“ не изглежда да е пратка на gi
msgid "unrecognized header: %s%s (%d)"
msgstr "непозната заглавна част: %s%s (%d)"
-#: bundle.c:136 rerere.c:480 rerere.c:690 sequencer.c:2390 sequencer.c:3176
+#: bundle.c:136 rerere.c:464 rerere.c:674 sequencer.c:2398 sequencer.c:3184
#: builtin/commit.c:814
#, c-format
msgid "could not open '%s'"
@@ -2254,286 +2258,301 @@ msgstr "Командата „git pack-objects“ не може да бъде с
msgid "pack-objects died"
msgstr "Командата „git pack-objects“ не завърши успешно"
-#: bundle.c:379
-msgid "rev-list died"
-msgstr "Командата „git rev-list“ не завърши успешно"
-
-#: bundle.c:428
+#: bundle.c:386
#, c-format
msgid "ref '%s' is excluded by the rev-list options"
msgstr ""
"указателят „%s“ не е бил включен поради опциите зададени на „git rev-list“"
-#: bundle.c:498
+#: bundle.c:490
#, c-format
msgid "unsupported bundle version %d"
msgstr "неподдържана версия на индекса %d"
-#: bundle.c:500
+#: bundle.c:492
#, c-format
msgid "cannot write bundle version %d with algorithm %s"
msgstr "пратка %d не може да се запише с алгоритъм %s"
-#: bundle.c:522 builtin/log.c:209 builtin/log.c:1927 builtin/shortlog.c:408
+#: bundle.c:510 builtin/log.c:210 builtin/log.c:1926 builtin/shortlog.c:396
#, c-format
msgid "unrecognized argument: %s"
msgstr "непознат аргумент: %s"
-#: bundle.c:530
+#: bundle.c:539
msgid "Refusing to create empty bundle."
msgstr "Създаването на празна пратка е невъзможно."
-#: bundle.c:540
+#: bundle.c:549
#, c-format
msgid "cannot create '%s'"
-msgstr "Файлът „%s“ не може да бъде създаден"
+msgstr "файлът „%s“ не може да бъде създаден"
-#: bundle.c:565
+#: bundle.c:574
msgid "index-pack died"
-msgstr "Командата „git index-pack“ не завърши успешно"
+msgstr "командата „git index-pack“ не завърши успешно"
+
+#: chunk-format.c:113
+msgid "terminating chunk id appears earlier than expected"
+msgstr "идентификаторът за краен откъс се явява по-рано от очакваното"
+
+#: chunk-format.c:122
+#, c-format
+msgid "improper chunk offset(s) %<PRIx64> and %<PRIx64>"
+msgstr "неправилно отместване на откъс/и %<PRIx64> и %<PRIx64>"
+
+#: chunk-format.c:129
+#, c-format
+msgid "duplicate chunk ID %<PRIx32> found"
+msgstr "повтарящ се идентификатор на откъс %<PRIx32>"
+
+#: chunk-format.c:143
+#, c-format
+msgid "final chunk has non-zero id %<PRIx32>"
+msgstr "ненулев идентификатор за краен откъс %<PRIx32>"
#: color.c:329
#, c-format
msgid "invalid color value: %.*s"
msgstr "неправилна стойност за цвят: %.*s"
-#: commit-graph.c:188 midx.c:47
+#: commit-graph.c:197 midx.c:46
msgid "invalid hash version"
msgstr "неправилна версия на контролна сума"
-#: commit-graph.c:246
+#: commit-graph.c:255
msgid "commit-graph file is too small"
-msgstr "файлът с гра̀фа на подаванията е твърде малък"
+msgstr "файлът за гра̀фа с подаванията е твърде малък"
-#: commit-graph.c:311
+#: commit-graph.c:348
#, c-format
msgid "commit-graph signature %X does not match signature %X"
msgstr "отпечатъкът на гра̀фа с подаванията %X не съвпада с %X"
-#: commit-graph.c:318
+#: commit-graph.c:355
#, c-format
msgid "commit-graph version %X does not match version %X"
msgstr "версията на гра̀фа с подаванията %X не съвпада с %X"
-#: commit-graph.c:325
+#: commit-graph.c:362
#, c-format
msgid "commit-graph hash version %X does not match version %X"
msgstr "версията на контролната сума на гра̀фа с подаванията %X не съвпада с %X"
-#: commit-graph.c:342
+#: commit-graph.c:379
#, c-format
msgid "commit-graph file is too small to hold %u chunks"
msgstr "файлът с гра̀фа на подаванията е твърде малък, за да съдържа %u откъси"
-#: commit-graph.c:361
-#, c-format
-msgid "commit-graph improper chunk offset %08x%08x"
-msgstr "неправилно отместване на откъс: %08x%08x"
-
-#: commit-graph.c:433
-#, c-format
-msgid "commit-graph chunk id %08x appears multiple times"
-msgstr "откъсът %08x се явява многократно"
-
-#: commit-graph.c:499
+#: commit-graph.c:472
msgid "commit-graph has no base graphs chunk"
msgstr "базовият откъс липсва в гра̀фа с подаванията"
-#: commit-graph.c:509
+#: commit-graph.c:482
msgid "commit-graph chain does not match"
msgstr "веригата на гра̀фа с подаванията не съвпада"
-#: commit-graph.c:557
+#: commit-graph.c:530
#, c-format
msgid "invalid commit-graph chain: line '%s' not a hash"
msgstr ""
"грешка във веригата на гра̀фа с подаванията: ред „%s“ не е контролна сума"
-#: commit-graph.c:581
+#: commit-graph.c:554
msgid "unable to find all commit-graph files"
msgstr "някои файлове на гра̀фа с подаванията не могат да бъдат открити"
-#: commit-graph.c:721 commit-graph.c:785
+#: commit-graph.c:735 commit-graph.c:772
msgid "invalid commit position. commit-graph is likely corrupt"
msgstr ""
"неправилна позиция на подаването. Вероятно графът с подаванията е повреден"
-#: commit-graph.c:742
+#: commit-graph.c:756
#, c-format
msgid "could not find commit %s"
msgstr "подаването „%s“ не може да бъде открито"
-#: commit-graph.c:1036 builtin/am.c:1292
+#: commit-graph.c:789
+msgid "commit-graph requires overflow generation data but has none"
+msgstr ""
+"графът с подаванията изисква генериране на данни за отместването, но такива "
+"липсват"
+
+#: commit-graph.c:1065 builtin/am.c:1292
#, c-format
msgid "unable to parse commit %s"
msgstr "подаването не може да бъде анализирано: %s"
-#: commit-graph.c:1252 builtin/pack-objects.c:2864
+#: commit-graph.c:1327 builtin/pack-objects.c:2872
#, c-format
msgid "unable to get type of object %s"
msgstr "видът на обекта „%s“ не може да бъде определен"
-#: commit-graph.c:1283
+#: commit-graph.c:1358
msgid "Loading known commits in commit graph"
msgstr "Зареждане на познатите подавания в гра̀фа с подаванията"
-#: commit-graph.c:1300
+#: commit-graph.c:1375
msgid "Expanding reachable commits in commit graph"
msgstr "Разширяване на достижимите подавания в гра̀фа"
-#: commit-graph.c:1320
+#: commit-graph.c:1395
msgid "Clearing commit marks in commit graph"
msgstr "Изчистване на отбелязванията на подаванията в гра̀фа с подаванията"
-#: commit-graph.c:1339
+#: commit-graph.c:1414
+msgid "Computing commit graph topological levels"
+msgstr "Изчисляване на топологичните нива в гра̀фа с подаванията"
+
+#: commit-graph.c:1467
msgid "Computing commit graph generation numbers"
msgstr "Изчисляване на номерата на поколенията в гра̀фа с подаванията"
-#: commit-graph.c:1406
+#: commit-graph.c:1548
msgid "Computing commit changed paths Bloom filters"
msgstr "Изчисляване на филтрите на Блум на пътищата с промяна при подаването"
-#: commit-graph.c:1483
+#: commit-graph.c:1625
msgid "Collecting referenced commits"
msgstr "Събиране на свързаните подавания"
-#: commit-graph.c:1508
+#: commit-graph.c:1650
#, c-format
msgid "Finding commits for commit graph in %d pack"
msgid_plural "Finding commits for commit graph in %d packs"
msgstr[0] "Откриване на подаванията в гра̀фа в %d пакетен файл"
msgstr[1] "Откриване на подаванията в гра̀фа в %d пакетни файла"
-#: commit-graph.c:1521
+#: commit-graph.c:1663
#, c-format
msgid "error adding pack %s"
msgstr "грешка при добавяне на пакетен файл „%s“"
-#: commit-graph.c:1525
+#: commit-graph.c:1667
#, c-format
msgid "error opening index for %s"
msgstr "грешка при отваряне на индекса на „%s“"
-#: commit-graph.c:1562
+#: commit-graph.c:1704
msgid "Finding commits for commit graph among packed objects"
msgstr "Откриване на подаванията в гра̀фа измежду пакетираните обекти"
-#: commit-graph.c:1580
+#: commit-graph.c:1722
msgid "Finding extra edges in commit graph"
msgstr "Откриване на още върхове в гра̀фа с подаванията"
-#: commit-graph.c:1628
+#: commit-graph.c:1771
msgid "failed to write correct number of base graph ids"
msgstr "правилният брой на базовите идентификатори не може да се запише"
-#: commit-graph.c:1670 midx.c:819
+#: commit-graph.c:1802 midx.c:794
#, c-format
msgid "unable to create leading directories of %s"
msgstr "родителските директории на „%s“ не могат да бъдат създадени"
-#: commit-graph.c:1683
+#: commit-graph.c:1815
msgid "unable to create temporary graph layer"
msgstr "не може да бъде създаден временен слой за гра̀фа с подаванията"
-#: commit-graph.c:1688
+#: commit-graph.c:1820
#, c-format
msgid "unable to adjust shared permissions for '%s'"
msgstr "правата за споделен достъп до „%s“ не могат да бъдат зададени"
-#: commit-graph.c:1758
+#: commit-graph.c:1879
#, c-format
msgid "Writing out commit graph in %d pass"
msgid_plural "Writing out commit graph in %d passes"
msgstr[0] "Запазване на гра̀фа с подаванията в %d пас"
msgstr[1] "Запазване на гра̀фа с подаванията в %d паса"
-#: commit-graph.c:1803
+#: commit-graph.c:1915
msgid "unable to open commit-graph chain file"
msgstr "файлът с веригата на гра̀фа с подаванията не може да се отвори"
-#: commit-graph.c:1819
+#: commit-graph.c:1931
msgid "failed to rename base commit-graph file"
msgstr "основният файл на гра̀фа с подаванията не може да бъде преименуван"
-#: commit-graph.c:1839
+#: commit-graph.c:1951
msgid "failed to rename temporary commit-graph file"
msgstr "временният файл на гра̀фа с подаванията не може да бъде преименуван"
-#: commit-graph.c:1965
+#: commit-graph.c:2084
msgid "Scanning merged commits"
msgstr "Търсене на подаванията със сливания"
-#: commit-graph.c:2009
+#: commit-graph.c:2128
msgid "Merging commit-graph"
msgstr "Сливане на гра̀фа с подаванията"
-#: commit-graph.c:2115
+#: commit-graph.c:2235
msgid "attempting to write a commit-graph, but 'core.commitGraph' is disabled"
msgstr ""
"опит за запис на гра̀фа с подаванията, но настройката „core.commitGraph“ е "
"изключена"
-#: commit-graph.c:2214
+#: commit-graph.c:2342
msgid "too many commits to write graph"
msgstr "прекалено много подавания за записване на гра̀фа"
-#: commit-graph.c:2307
+#: commit-graph.c:2440
msgid "the commit-graph file has incorrect checksum and is likely corrupt"
msgstr "графът с подаванията е с грешна сума за проверка — вероятно е повреден"
-#: commit-graph.c:2317
+#: commit-graph.c:2450
#, c-format
msgid "commit-graph has incorrect OID order: %s then %s"
msgstr ""
"неправилна подредба на обектите по идентификатор в гра̀фа с подаванията: „%s“ "
"е преди „%s“, а не трябва"
-#: commit-graph.c:2327 commit-graph.c:2342
+#: commit-graph.c:2460 commit-graph.c:2475
#, c-format
msgid "commit-graph has incorrect fanout value: fanout[%d] = %u != %u"
msgstr ""
"неправилна стойност за откъс в гра̀фа с подаванията: fanout[%d] = %u, а "
"трябва да е %u"
-#: commit-graph.c:2334
+#: commit-graph.c:2467
#, c-format
msgid "failed to parse commit %s from commit-graph"
msgstr "подаване „%s“ в гра̀фа с подаванията не може да се анализира"
-#: commit-graph.c:2352
+#: commit-graph.c:2485
msgid "Verifying commits in commit graph"
msgstr "Проверка на подаванията в гра̀фа"
-#: commit-graph.c:2367
+#: commit-graph.c:2500
#, c-format
msgid "failed to parse commit %s from object database for commit-graph"
msgstr ""
"подаване „%s“ в базата от данни към гра̀фа с подаванията не може да се "
"анализира"
-#: commit-graph.c:2374
+#: commit-graph.c:2507
#, c-format
msgid "root tree OID for commit %s in commit-graph is %s != %s"
msgstr ""
"идентификаторът на обект за кореновото дърво за подаване „%s“ в гра̀фа с "
"подаванията е „%s“, а трябва да е „%s“"
-#: commit-graph.c:2384
+#: commit-graph.c:2517
#, c-format
msgid "commit-graph parent list for commit %s is too long"
msgstr "списъкът с родители на „%s“ в гра̀фа с подаванията е прекалено дълъг"
-#: commit-graph.c:2393
+#: commit-graph.c:2526
#, c-format
msgid "commit-graph parent for %s is %s != %s"
msgstr "родителят на „%s“ в гра̀фа с подаванията е „%s“, а трябва да е „%s“"
-#: commit-graph.c:2407
+#: commit-graph.c:2540
#, c-format
msgid "commit-graph parent list for commit %s terminates early"
msgstr "списъкът с родители на „%s“ в гра̀фа с подаванията е прекалено къс"
-#: commit-graph.c:2412
+#: commit-graph.c:2545
#, c-format
msgid ""
"commit-graph has generation number zero for commit %s, but non-zero elsewhere"
@@ -2541,7 +2560,7 @@ msgstr ""
"номерът на поколението на подаване „%s“ в гра̀фа с подаванията е 0, а другаде "
"не е"
-#: commit-graph.c:2416
+#: commit-graph.c:2549
#, c-format
msgid ""
"commit-graph has non-zero generation number for commit %s, but zero elsewhere"
@@ -2549,21 +2568,21 @@ msgstr ""
"номерът на поколението на подаване „%s“ в гра̀фа с подаванията не е 0, а "
"другаде е"
-#: commit-graph.c:2432
+#: commit-graph.c:2566
#, c-format
-msgid "commit-graph generation for commit %s is %u != %u"
+msgid "commit-graph generation for commit %s is %<PRIuMAX> < %<PRIuMAX>"
msgstr ""
-"номерът на поколението на подаване „%s“ в гра̀фа с подаванията е %u, а "
-"другаде е %u"
+"номерът на поколението на подаване „%s“ в гра̀фа с подаванията е %<PRIuMAX> < "
+"%<PRIuMAX>"
-#: commit-graph.c:2438
+#: commit-graph.c:2572
#, c-format
msgid "commit date for commit %s in commit-graph is %<PRIuMAX> != %<PRIuMAX>"
msgstr ""
"датата на подаване на „%s“ в гра̀фа с подаванията е %<PRIuMAX>, а трябва да е "
"%<PRIuMAX>"
-#: commit.c:52 sequencer.c:2879 builtin/am.c:359 builtin/am.c:403
+#: commit.c:52 sequencer.c:2887 builtin/am.c:359 builtin/am.c:403
#: builtin/am.c:1371 builtin/am.c:2018 builtin/replace.c:457
#, c-format
msgid "could not parse %s"
@@ -2597,29 +2616,29 @@ msgstr ""
"\n"
" git config advice.graftFileDeprecated false"
-#: commit.c:1172
+#: commit.c:1223
#, c-format
msgid "Commit %s has an untrusted GPG signature, allegedly by %s."
msgstr ""
"Подаването „%s“ е с недоверен подпис от GPG, който твърди, че е на „%s“."
-#: commit.c:1176
+#: commit.c:1227
#, c-format
msgid "Commit %s has a bad GPG signature allegedly by %s."
msgstr ""
"Подаването „%s“ е с неправилен подпис от GPG, който твърди, че е на „%s“."
-#: commit.c:1179
+#: commit.c:1230
#, c-format
msgid "Commit %s does not have a GPG signature."
msgstr "Подаването „%s“ е без подпис от GPG."
-#: commit.c:1182
+#: commit.c:1233
#, c-format
msgid "Commit %s has a good GPG signature by %s\n"
msgstr "Подаването „%s“ е с коректен подпис от GPG на „%s“.\n"
-#: commit.c:1436
+#: commit.c:1487
msgid ""
"Warning: commit message did not conform to UTF-8.\n"
"You may want to amend it after fixing the message, or set the config\n"
@@ -2633,7 +2652,7 @@ msgstr ""
msgid "memory exhausted"
msgstr "паметта свърши"
-#: config.c:125
+#: config.c:126
#, c-format
msgid ""
"exceeded maximum include depth (%d) while including\n"
@@ -2648,162 +2667,206 @@ msgstr ""
" %s\n"
"Това може да се дължи на зацикляне при вмъкването."
-#: config.c:141
+#: config.c:142
#, c-format
msgid "could not expand include path '%s'"
msgstr "пътят за вмъкване „%s“не може да бъде разширен"
-#: config.c:152
+#: config.c:153
msgid "relative config includes must come from files"
msgstr "относителните вмъквания на конфигурации трябва да идват от файлове"
-#: config.c:198
+#: config.c:199
msgid "relative config include conditionals must come from files"
msgstr "относителните условни изрази за вмъкване трябва да идват от файлове"
-#: config.c:378
+#: config.c:396
+#, c-format
+msgid "invalid config format: %s"
+msgstr "неправилен формат на настройка: %s"
+
+#: config.c:400
+#, c-format
+msgid "missing environment variable name for configuration '%.*s'"
+msgstr "липсва име на променлива на средата за настройката „%.*s“"
+
+#: config.c:405
+#, c-format
+msgid "missing environment variable '%s' for configuration '%.*s'"
+msgstr "липсва променлива на средата „%s“ за настройката „%.*s“"
+
+#: config.c:442
#, c-format
msgid "key does not contain a section: %s"
msgstr "ключът не съдържа раздел: „%s“"
-#: config.c:384
+#: config.c:448
#, c-format
msgid "key does not contain variable name: %s"
msgstr "ключът не съдържа име на променлива: „%s“"
-#: config.c:408 sequencer.c:2580
+#: config.c:472 sequencer.c:2588
#, c-format
msgid "invalid key: %s"
msgstr "неправилен ключ: „%s“"
-#: config.c:414
+#: config.c:478
#, c-format
msgid "invalid key (newline): %s"
msgstr "неправилен ключ (нов ред): „%s“"
-#: config.c:450 config.c:462
+#: config.c:511
+msgid "empty config key"